xref: /openbmc/qemu/linux-user/syscall.c (revision ee135aa0)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77 
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84 
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
112 
113 #include "qemu.h"
114 
115 #ifndef CLONE_IO
116 #define CLONE_IO                0x80000000      /* Clone io context */
117 #endif
118 
119 /* We can't directly call the host clone syscall, because this will
120  * badly confuse libc (breaking mutexes, for example). So we must
121  * divide clone flags into:
122  *  * flag combinations that look like pthread_create()
123  *  * flag combinations that look like fork()
124  *  * flags we can implement within QEMU itself
125  *  * flags we can't support and will return an error for
126  */
127 /* For thread creation, all these flags must be present; for
128  * fork, none must be present.
129  */
130 #define CLONE_THREAD_FLAGS                              \
131     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
132      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
133 
134 /* These flags are ignored:
135  * CLONE_DETACHED is now ignored by the kernel;
136  * CLONE_IO is just an optimisation hint to the I/O scheduler
137  */
138 #define CLONE_IGNORED_FLAGS                     \
139     (CLONE_DETACHED | CLONE_IO)
140 
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS               \
143     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
144      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
145 
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
148     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
149      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
150 
151 #define CLONE_INVALID_FORK_FLAGS                                        \
152     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
153 
154 #define CLONE_INVALID_THREAD_FLAGS                                      \
155     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
156        CLONE_IGNORED_FLAGS))
157 
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159  * have almost all been allocated. We cannot support any of
160  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162  * The checks against the invalid thread masks above will catch these.
163  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
164  */
165 
166 //#define DEBUG
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168  * once. This exercises the codepaths for restart.
169  */
170 //#define DEBUG_ERESTARTSYS
171 
172 //#include <linux/msdos_fs.h>
173 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
174 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
175 
176 #undef _syscall0
177 #undef _syscall1
178 #undef _syscall2
179 #undef _syscall3
180 #undef _syscall4
181 #undef _syscall5
182 #undef _syscall6
183 
184 #define _syscall0(type,name)		\
185 static type name (void)			\
186 {					\
187 	return syscall(__NR_##name);	\
188 }
189 
190 #define _syscall1(type,name,type1,arg1)		\
191 static type name (type1 arg1)			\
192 {						\
193 	return syscall(__NR_##name, arg1);	\
194 }
195 
196 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
197 static type name (type1 arg1,type2 arg2)		\
198 {							\
199 	return syscall(__NR_##name, arg1, arg2);	\
200 }
201 
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
203 static type name (type1 arg1,type2 arg2,type3 arg3)		\
204 {								\
205 	return syscall(__NR_##name, arg1, arg2, arg3);		\
206 }
207 
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
210 {										\
211 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
212 }
213 
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
215 		  type5,arg5)							\
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
217 {										\
218 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
219 }
220 
221 
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
223 		  type5,arg5,type6,arg6)					\
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
225                   type6 arg6)							\
226 {										\
227 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
228 }
229 
230 
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
243 
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
246 #endif
247 
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
251 #endif
252 
253 #ifdef __NR_gettid
254 _syscall0(int, gettid)
255 #else
256 /* This is a replacement for the host gettid() and must return a host
257    errno. */
258 static int gettid(void) {
259     return -ENOSYS;
260 }
261 #endif
262 
263 /* For the 64-bit guest on 32-bit host case we must emulate
264  * getdents using getdents64, because otherwise the host
265  * might hand us back more dirent records than we can fit
266  * into the guest buffer after structure format conversion.
267  * Otherwise we emulate getdents with getdents if the host has it.
268  */
269 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
270 #define EMULATE_GETDENTS_WITH_GETDENTS
271 #endif
272 
273 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
274 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
275 #endif
276 #if (defined(TARGET_NR_getdents) && \
277       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
278     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
279 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
280 #endif
281 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
282 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
283           loff_t *, res, uint, wh);
284 #endif
285 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
286 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
287           siginfo_t *, uinfo)
288 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
289 #ifdef __NR_exit_group
290 _syscall1(int,exit_group,int,error_code)
291 #endif
292 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
293 _syscall1(int,set_tid_address,int *,tidptr)
294 #endif
295 #if defined(TARGET_NR_futex) && defined(__NR_futex)
296 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
297           const struct timespec *,timeout,int *,uaddr2,int,val3)
298 #endif
299 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
300 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
301           unsigned long *, user_mask_ptr);
302 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
303 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
304           unsigned long *, user_mask_ptr);
305 #define __NR_sys_getcpu __NR_getcpu
306 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
307 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
308           void *, arg);
309 _syscall2(int, capget, struct __user_cap_header_struct *, header,
310           struct __user_cap_data_struct *, data);
311 _syscall2(int, capset, struct __user_cap_header_struct *, header,
312           struct __user_cap_data_struct *, data);
313 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
314 _syscall2(int, ioprio_get, int, which, int, who)
315 #endif
316 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
317 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
318 #endif
319 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
320 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
321 #endif
322 
323 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
324 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
325           unsigned long, idx1, unsigned long, idx2)
326 #endif
327 
328 static bitmask_transtbl fcntl_flags_tbl[] = {
329   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
330   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
331   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
332   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
333   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
334   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
335   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
336   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
337   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
338   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
339   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
340   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
341   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
342 #if defined(O_DIRECT)
343   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
344 #endif
345 #if defined(O_NOATIME)
346   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
347 #endif
348 #if defined(O_CLOEXEC)
349   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
350 #endif
351 #if defined(O_PATH)
352   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
353 #endif
354 #if defined(O_TMPFILE)
355   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
356 #endif
357   /* Don't terminate the list prematurely on 64-bit host+guest.  */
358 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
359   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
360 #endif
361   { 0, 0, 0, 0 }
362 };
363 
364 enum {
365     QEMU_IFLA_BR_UNSPEC,
366     QEMU_IFLA_BR_FORWARD_DELAY,
367     QEMU_IFLA_BR_HELLO_TIME,
368     QEMU_IFLA_BR_MAX_AGE,
369     QEMU_IFLA_BR_AGEING_TIME,
370     QEMU_IFLA_BR_STP_STATE,
371     QEMU_IFLA_BR_PRIORITY,
372     QEMU_IFLA_BR_VLAN_FILTERING,
373     QEMU_IFLA_BR_VLAN_PROTOCOL,
374     QEMU_IFLA_BR_GROUP_FWD_MASK,
375     QEMU_IFLA_BR_ROOT_ID,
376     QEMU_IFLA_BR_BRIDGE_ID,
377     QEMU_IFLA_BR_ROOT_PORT,
378     QEMU_IFLA_BR_ROOT_PATH_COST,
379     QEMU_IFLA_BR_TOPOLOGY_CHANGE,
380     QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
381     QEMU_IFLA_BR_HELLO_TIMER,
382     QEMU_IFLA_BR_TCN_TIMER,
383     QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
384     QEMU_IFLA_BR_GC_TIMER,
385     QEMU_IFLA_BR_GROUP_ADDR,
386     QEMU_IFLA_BR_FDB_FLUSH,
387     QEMU_IFLA_BR_MCAST_ROUTER,
388     QEMU_IFLA_BR_MCAST_SNOOPING,
389     QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
390     QEMU_IFLA_BR_MCAST_QUERIER,
391     QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
392     QEMU_IFLA_BR_MCAST_HASH_MAX,
393     QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
394     QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
395     QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
396     QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
397     QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
398     QEMU_IFLA_BR_MCAST_QUERY_INTVL,
399     QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
400     QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
401     QEMU_IFLA_BR_NF_CALL_IPTABLES,
402     QEMU_IFLA_BR_NF_CALL_IP6TABLES,
403     QEMU_IFLA_BR_NF_CALL_ARPTABLES,
404     QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
405     QEMU_IFLA_BR_PAD,
406     QEMU_IFLA_BR_VLAN_STATS_ENABLED,
407     QEMU_IFLA_BR_MCAST_STATS_ENABLED,
408     QEMU_IFLA_BR_MCAST_IGMP_VERSION,
409     QEMU_IFLA_BR_MCAST_MLD_VERSION,
410     QEMU___IFLA_BR_MAX,
411 };
412 
413 enum {
414     QEMU_IFLA_UNSPEC,
415     QEMU_IFLA_ADDRESS,
416     QEMU_IFLA_BROADCAST,
417     QEMU_IFLA_IFNAME,
418     QEMU_IFLA_MTU,
419     QEMU_IFLA_LINK,
420     QEMU_IFLA_QDISC,
421     QEMU_IFLA_STATS,
422     QEMU_IFLA_COST,
423     QEMU_IFLA_PRIORITY,
424     QEMU_IFLA_MASTER,
425     QEMU_IFLA_WIRELESS,
426     QEMU_IFLA_PROTINFO,
427     QEMU_IFLA_TXQLEN,
428     QEMU_IFLA_MAP,
429     QEMU_IFLA_WEIGHT,
430     QEMU_IFLA_OPERSTATE,
431     QEMU_IFLA_LINKMODE,
432     QEMU_IFLA_LINKINFO,
433     QEMU_IFLA_NET_NS_PID,
434     QEMU_IFLA_IFALIAS,
435     QEMU_IFLA_NUM_VF,
436     QEMU_IFLA_VFINFO_LIST,
437     QEMU_IFLA_STATS64,
438     QEMU_IFLA_VF_PORTS,
439     QEMU_IFLA_PORT_SELF,
440     QEMU_IFLA_AF_SPEC,
441     QEMU_IFLA_GROUP,
442     QEMU_IFLA_NET_NS_FD,
443     QEMU_IFLA_EXT_MASK,
444     QEMU_IFLA_PROMISCUITY,
445     QEMU_IFLA_NUM_TX_QUEUES,
446     QEMU_IFLA_NUM_RX_QUEUES,
447     QEMU_IFLA_CARRIER,
448     QEMU_IFLA_PHYS_PORT_ID,
449     QEMU_IFLA_CARRIER_CHANGES,
450     QEMU_IFLA_PHYS_SWITCH_ID,
451     QEMU_IFLA_LINK_NETNSID,
452     QEMU_IFLA_PHYS_PORT_NAME,
453     QEMU_IFLA_PROTO_DOWN,
454     QEMU_IFLA_GSO_MAX_SEGS,
455     QEMU_IFLA_GSO_MAX_SIZE,
456     QEMU_IFLA_PAD,
457     QEMU_IFLA_XDP,
458     QEMU_IFLA_EVENT,
459     QEMU_IFLA_NEW_NETNSID,
460     QEMU_IFLA_IF_NETNSID,
461     QEMU_IFLA_CARRIER_UP_COUNT,
462     QEMU_IFLA_CARRIER_DOWN_COUNT,
463     QEMU_IFLA_NEW_IFINDEX,
464     QEMU___IFLA_MAX
465 };
466 
467 enum {
468     QEMU_IFLA_BRPORT_UNSPEC,
469     QEMU_IFLA_BRPORT_STATE,
470     QEMU_IFLA_BRPORT_PRIORITY,
471     QEMU_IFLA_BRPORT_COST,
472     QEMU_IFLA_BRPORT_MODE,
473     QEMU_IFLA_BRPORT_GUARD,
474     QEMU_IFLA_BRPORT_PROTECT,
475     QEMU_IFLA_BRPORT_FAST_LEAVE,
476     QEMU_IFLA_BRPORT_LEARNING,
477     QEMU_IFLA_BRPORT_UNICAST_FLOOD,
478     QEMU_IFLA_BRPORT_PROXYARP,
479     QEMU_IFLA_BRPORT_LEARNING_SYNC,
480     QEMU_IFLA_BRPORT_PROXYARP_WIFI,
481     QEMU_IFLA_BRPORT_ROOT_ID,
482     QEMU_IFLA_BRPORT_BRIDGE_ID,
483     QEMU_IFLA_BRPORT_DESIGNATED_PORT,
484     QEMU_IFLA_BRPORT_DESIGNATED_COST,
485     QEMU_IFLA_BRPORT_ID,
486     QEMU_IFLA_BRPORT_NO,
487     QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
488     QEMU_IFLA_BRPORT_CONFIG_PENDING,
489     QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
490     QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
491     QEMU_IFLA_BRPORT_HOLD_TIMER,
492     QEMU_IFLA_BRPORT_FLUSH,
493     QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
494     QEMU_IFLA_BRPORT_PAD,
495     QEMU_IFLA_BRPORT_MCAST_FLOOD,
496     QEMU_IFLA_BRPORT_MCAST_TO_UCAST,
497     QEMU_IFLA_BRPORT_VLAN_TUNNEL,
498     QEMU_IFLA_BRPORT_BCAST_FLOOD,
499     QEMU_IFLA_BRPORT_GROUP_FWD_MASK,
500     QEMU_IFLA_BRPORT_NEIGH_SUPPRESS,
501     QEMU___IFLA_BRPORT_MAX
502 };
503 
504 enum {
505     QEMU_IFLA_TUN_UNSPEC,
506     QEMU_IFLA_TUN_OWNER,
507     QEMU_IFLA_TUN_GROUP,
508     QEMU_IFLA_TUN_TYPE,
509     QEMU_IFLA_TUN_PI,
510     QEMU_IFLA_TUN_VNET_HDR,
511     QEMU_IFLA_TUN_PERSIST,
512     QEMU_IFLA_TUN_MULTI_QUEUE,
513     QEMU_IFLA_TUN_NUM_QUEUES,
514     QEMU_IFLA_TUN_NUM_DISABLED_QUEUES,
515     QEMU___IFLA_TUN_MAX,
516 };
517 
518 enum {
519     QEMU_IFLA_INFO_UNSPEC,
520     QEMU_IFLA_INFO_KIND,
521     QEMU_IFLA_INFO_DATA,
522     QEMU_IFLA_INFO_XSTATS,
523     QEMU_IFLA_INFO_SLAVE_KIND,
524     QEMU_IFLA_INFO_SLAVE_DATA,
525     QEMU___IFLA_INFO_MAX,
526 };
527 
528 enum {
529     QEMU_IFLA_INET_UNSPEC,
530     QEMU_IFLA_INET_CONF,
531     QEMU___IFLA_INET_MAX,
532 };
533 
534 enum {
535     QEMU_IFLA_INET6_UNSPEC,
536     QEMU_IFLA_INET6_FLAGS,
537     QEMU_IFLA_INET6_CONF,
538     QEMU_IFLA_INET6_STATS,
539     QEMU_IFLA_INET6_MCAST,
540     QEMU_IFLA_INET6_CACHEINFO,
541     QEMU_IFLA_INET6_ICMP6STATS,
542     QEMU_IFLA_INET6_TOKEN,
543     QEMU_IFLA_INET6_ADDR_GEN_MODE,
544     QEMU___IFLA_INET6_MAX
545 };
546 
547 enum {
548     QEMU_IFLA_XDP_UNSPEC,
549     QEMU_IFLA_XDP_FD,
550     QEMU_IFLA_XDP_ATTACHED,
551     QEMU_IFLA_XDP_FLAGS,
552     QEMU_IFLA_XDP_PROG_ID,
553     QEMU___IFLA_XDP_MAX,
554 };
555 
556 enum {
557     QEMU_RTA_UNSPEC,
558     QEMU_RTA_DST,
559     QEMU_RTA_SRC,
560     QEMU_RTA_IIF,
561     QEMU_RTA_OIF,
562     QEMU_RTA_GATEWAY,
563     QEMU_RTA_PRIORITY,
564     QEMU_RTA_PREFSRC,
565     QEMU_RTA_METRICS,
566     QEMU_RTA_MULTIPATH,
567     QEMU_RTA_PROTOINFO, /* no longer used */
568     QEMU_RTA_FLOW,
569     QEMU_RTA_CACHEINFO,
570     QEMU_RTA_SESSION, /* no longer used */
571     QEMU_RTA_MP_ALGO, /* no longer used */
572     QEMU_RTA_TABLE,
573     QEMU_RTA_MARK,
574     QEMU_RTA_MFC_STATS,
575     QEMU_RTA_VIA,
576     QEMU_RTA_NEWDST,
577     QEMU_RTA_PREF,
578     QEMU_RTA_ENCAP_TYPE,
579     QEMU_RTA_ENCAP,
580     QEMU_RTA_EXPIRES,
581     QEMU_RTA_PAD,
582     QEMU_RTA_UID,
583     QEMU_RTA_TTL_PROPAGATE,
584     QEMU_RTA_IP_PROTO,
585     QEMU_RTA_SPORT,
586     QEMU_RTA_DPORT,
587     QEMU___RTA_MAX
588 };
589 
590 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
591 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
592 typedef struct TargetFdTrans {
593     TargetFdDataFunc host_to_target_data;
594     TargetFdDataFunc target_to_host_data;
595     TargetFdAddrFunc target_to_host_addr;
596 } TargetFdTrans;
597 
598 static TargetFdTrans **target_fd_trans;
599 
600 static unsigned int target_fd_max;
601 
602 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
603 {
604     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
605         return target_fd_trans[fd]->target_to_host_data;
606     }
607     return NULL;
608 }
609 
610 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
611 {
612     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
613         return target_fd_trans[fd]->host_to_target_data;
614     }
615     return NULL;
616 }
617 
618 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
619 {
620     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
621         return target_fd_trans[fd]->target_to_host_addr;
622     }
623     return NULL;
624 }
625 
626 static void fd_trans_register(int fd, TargetFdTrans *trans)
627 {
628     unsigned int oldmax;
629 
630     if (fd >= target_fd_max) {
631         oldmax = target_fd_max;
632         target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
633         target_fd_trans = g_renew(TargetFdTrans *,
634                                   target_fd_trans, target_fd_max);
635         memset((void *)(target_fd_trans + oldmax), 0,
636                (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
637     }
638     target_fd_trans[fd] = trans;
639 }
640 
641 static void fd_trans_unregister(int fd)
642 {
643     if (fd >= 0 && fd < target_fd_max) {
644         target_fd_trans[fd] = NULL;
645     }
646 }
647 
648 static void fd_trans_dup(int oldfd, int newfd)
649 {
650     fd_trans_unregister(newfd);
651     if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
652         fd_trans_register(newfd, target_fd_trans[oldfd]);
653     }
654 }
655 
656 static int sys_getcwd1(char *buf, size_t size)
657 {
658   if (getcwd(buf, size) == NULL) {
659       /* getcwd() sets errno */
660       return (-1);
661   }
662   return strlen(buf)+1;
663 }
664 
665 #ifdef TARGET_NR_utimensat
666 #if defined(__NR_utimensat)
667 #define __NR_sys_utimensat __NR_utimensat
668 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
669           const struct timespec *,tsp,int,flags)
670 #else
671 static int sys_utimensat(int dirfd, const char *pathname,
672                          const struct timespec times[2], int flags)
673 {
674     errno = ENOSYS;
675     return -1;
676 }
677 #endif
678 #endif /* TARGET_NR_utimensat */
679 
680 #ifdef TARGET_NR_renameat2
681 #if defined(__NR_renameat2)
682 #define __NR_sys_renameat2 __NR_renameat2
683 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
684           const char *, new, unsigned int, flags)
685 #else
686 static int sys_renameat2(int oldfd, const char *old,
687                          int newfd, const char *new, int flags)
688 {
689     if (flags == 0) {
690         return renameat(oldfd, old, newfd, new);
691     }
692     errno = ENOSYS;
693     return -1;
694 }
695 #endif
696 #endif /* TARGET_NR_renameat2 */
697 
698 #ifdef CONFIG_INOTIFY
699 #include <sys/inotify.h>
700 
701 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
702 static int sys_inotify_init(void)
703 {
704   return (inotify_init());
705 }
706 #endif
707 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
708 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
709 {
710   return (inotify_add_watch(fd, pathname, mask));
711 }
712 #endif
713 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
714 static int sys_inotify_rm_watch(int fd, int32_t wd)
715 {
716   return (inotify_rm_watch(fd, wd));
717 }
718 #endif
719 #ifdef CONFIG_INOTIFY1
720 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
721 static int sys_inotify_init1(int flags)
722 {
723   return (inotify_init1(flags));
724 }
725 #endif
726 #endif
727 #else
728 /* Userspace can usually survive runtime without inotify */
729 #undef TARGET_NR_inotify_init
730 #undef TARGET_NR_inotify_init1
731 #undef TARGET_NR_inotify_add_watch
732 #undef TARGET_NR_inotify_rm_watch
733 #endif /* CONFIG_INOTIFY  */
734 
735 #if defined(TARGET_NR_prlimit64)
736 #ifndef __NR_prlimit64
737 # define __NR_prlimit64 -1
738 #endif
739 #define __NR_sys_prlimit64 __NR_prlimit64
740 /* The glibc rlimit structure may not be that used by the underlying syscall */
741 struct host_rlimit64 {
742     uint64_t rlim_cur;
743     uint64_t rlim_max;
744 };
745 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
746           const struct host_rlimit64 *, new_limit,
747           struct host_rlimit64 *, old_limit)
748 #endif
749 
750 
751 #if defined(TARGET_NR_timer_create)
752 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
753 static timer_t g_posix_timers[32] = { 0, } ;
754 
755 static inline int next_free_host_timer(void)
756 {
757     int k ;
758     /* FIXME: Does finding the next free slot require a lock? */
759     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
760         if (g_posix_timers[k] == 0) {
761             g_posix_timers[k] = (timer_t) 1;
762             return k;
763         }
764     }
765     return -1;
766 }
767 #endif
768 
769 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
770 #ifdef TARGET_ARM
771 static inline int regpairs_aligned(void *cpu_env, int num)
772 {
773     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
774 }
775 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
776 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
777 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
778 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
779  * of registers which translates to the same as ARM/MIPS, because we start with
780  * r3 as arg1 */
781 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
782 #elif defined(TARGET_SH4)
783 /* SH4 doesn't align register pairs, except for p{read,write}64 */
784 static inline int regpairs_aligned(void *cpu_env, int num)
785 {
786     switch (num) {
787     case TARGET_NR_pread64:
788     case TARGET_NR_pwrite64:
789         return 1;
790 
791     default:
792         return 0;
793     }
794 }
795 #elif defined(TARGET_XTENSA)
796 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
797 #else
798 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
799 #endif
800 
801 #define ERRNO_TABLE_SIZE 1200
802 
803 /* target_to_host_errno_table[] is initialized from
804  * host_to_target_errno_table[] in syscall_init(). */
805 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
806 };
807 
808 /*
809  * This list is the union of errno values overridden in asm-<arch>/errno.h
810  * minus the errnos that are not actually generic to all archs.
811  */
812 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
813     [EAGAIN]		= TARGET_EAGAIN,
814     [EIDRM]		= TARGET_EIDRM,
815     [ECHRNG]		= TARGET_ECHRNG,
816     [EL2NSYNC]		= TARGET_EL2NSYNC,
817     [EL3HLT]		= TARGET_EL3HLT,
818     [EL3RST]		= TARGET_EL3RST,
819     [ELNRNG]		= TARGET_ELNRNG,
820     [EUNATCH]		= TARGET_EUNATCH,
821     [ENOCSI]		= TARGET_ENOCSI,
822     [EL2HLT]		= TARGET_EL2HLT,
823     [EDEADLK]		= TARGET_EDEADLK,
824     [ENOLCK]		= TARGET_ENOLCK,
825     [EBADE]		= TARGET_EBADE,
826     [EBADR]		= TARGET_EBADR,
827     [EXFULL]		= TARGET_EXFULL,
828     [ENOANO]		= TARGET_ENOANO,
829     [EBADRQC]		= TARGET_EBADRQC,
830     [EBADSLT]		= TARGET_EBADSLT,
831     [EBFONT]		= TARGET_EBFONT,
832     [ENOSTR]		= TARGET_ENOSTR,
833     [ENODATA]		= TARGET_ENODATA,
834     [ETIME]		= TARGET_ETIME,
835     [ENOSR]		= TARGET_ENOSR,
836     [ENONET]		= TARGET_ENONET,
837     [ENOPKG]		= TARGET_ENOPKG,
838     [EREMOTE]		= TARGET_EREMOTE,
839     [ENOLINK]		= TARGET_ENOLINK,
840     [EADV]		= TARGET_EADV,
841     [ESRMNT]		= TARGET_ESRMNT,
842     [ECOMM]		= TARGET_ECOMM,
843     [EPROTO]		= TARGET_EPROTO,
844     [EDOTDOT]		= TARGET_EDOTDOT,
845     [EMULTIHOP]		= TARGET_EMULTIHOP,
846     [EBADMSG]		= TARGET_EBADMSG,
847     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
848     [EOVERFLOW]		= TARGET_EOVERFLOW,
849     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
850     [EBADFD]		= TARGET_EBADFD,
851     [EREMCHG]		= TARGET_EREMCHG,
852     [ELIBACC]		= TARGET_ELIBACC,
853     [ELIBBAD]		= TARGET_ELIBBAD,
854     [ELIBSCN]		= TARGET_ELIBSCN,
855     [ELIBMAX]		= TARGET_ELIBMAX,
856     [ELIBEXEC]		= TARGET_ELIBEXEC,
857     [EILSEQ]		= TARGET_EILSEQ,
858     [ENOSYS]		= TARGET_ENOSYS,
859     [ELOOP]		= TARGET_ELOOP,
860     [ERESTART]		= TARGET_ERESTART,
861     [ESTRPIPE]		= TARGET_ESTRPIPE,
862     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
863     [EUSERS]		= TARGET_EUSERS,
864     [ENOTSOCK]		= TARGET_ENOTSOCK,
865     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
866     [EMSGSIZE]		= TARGET_EMSGSIZE,
867     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
868     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
869     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
870     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
871     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
872     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
873     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
874     [EADDRINUSE]	= TARGET_EADDRINUSE,
875     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
876     [ENETDOWN]		= TARGET_ENETDOWN,
877     [ENETUNREACH]	= TARGET_ENETUNREACH,
878     [ENETRESET]		= TARGET_ENETRESET,
879     [ECONNABORTED]	= TARGET_ECONNABORTED,
880     [ECONNRESET]	= TARGET_ECONNRESET,
881     [ENOBUFS]		= TARGET_ENOBUFS,
882     [EISCONN]		= TARGET_EISCONN,
883     [ENOTCONN]		= TARGET_ENOTCONN,
884     [EUCLEAN]		= TARGET_EUCLEAN,
885     [ENOTNAM]		= TARGET_ENOTNAM,
886     [ENAVAIL]		= TARGET_ENAVAIL,
887     [EISNAM]		= TARGET_EISNAM,
888     [EREMOTEIO]		= TARGET_EREMOTEIO,
889     [EDQUOT]            = TARGET_EDQUOT,
890     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
891     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
892     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
893     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
894     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
895     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
896     [EALREADY]		= TARGET_EALREADY,
897     [EINPROGRESS]	= TARGET_EINPROGRESS,
898     [ESTALE]		= TARGET_ESTALE,
899     [ECANCELED]		= TARGET_ECANCELED,
900     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
901     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
902 #ifdef ENOKEY
903     [ENOKEY]		= TARGET_ENOKEY,
904 #endif
905 #ifdef EKEYEXPIRED
906     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
907 #endif
908 #ifdef EKEYREVOKED
909     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
910 #endif
911 #ifdef EKEYREJECTED
912     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
913 #endif
914 #ifdef EOWNERDEAD
915     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
916 #endif
917 #ifdef ENOTRECOVERABLE
918     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
919 #endif
920 #ifdef ENOMSG
921     [ENOMSG]            = TARGET_ENOMSG,
922 #endif
923 #ifdef ERKFILL
924     [ERFKILL]           = TARGET_ERFKILL,
925 #endif
926 #ifdef EHWPOISON
927     [EHWPOISON]         = TARGET_EHWPOISON,
928 #endif
929 };
930 
931 static inline int host_to_target_errno(int err)
932 {
933     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
934         host_to_target_errno_table[err]) {
935         return host_to_target_errno_table[err];
936     }
937     return err;
938 }
939 
940 static inline int target_to_host_errno(int err)
941 {
942     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
943         target_to_host_errno_table[err]) {
944         return target_to_host_errno_table[err];
945     }
946     return err;
947 }
948 
949 static inline abi_long get_errno(abi_long ret)
950 {
951     if (ret == -1)
952         return -host_to_target_errno(errno);
953     else
954         return ret;
955 }
956 
957 const char *target_strerror(int err)
958 {
959     if (err == TARGET_ERESTARTSYS) {
960         return "To be restarted";
961     }
962     if (err == TARGET_QEMU_ESIGRETURN) {
963         return "Successful exit from sigreturn";
964     }
965 
966     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
967         return NULL;
968     }
969     return strerror(target_to_host_errno(err));
970 }
971 
972 #define safe_syscall0(type, name) \
973 static type safe_##name(void) \
974 { \
975     return safe_syscall(__NR_##name); \
976 }
977 
978 #define safe_syscall1(type, name, type1, arg1) \
979 static type safe_##name(type1 arg1) \
980 { \
981     return safe_syscall(__NR_##name, arg1); \
982 }
983 
984 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
985 static type safe_##name(type1 arg1, type2 arg2) \
986 { \
987     return safe_syscall(__NR_##name, arg1, arg2); \
988 }
989 
990 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
991 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
992 { \
993     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
994 }
995 
996 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
997     type4, arg4) \
998 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
999 { \
1000     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
1001 }
1002 
1003 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
1004     type4, arg4, type5, arg5) \
1005 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
1006     type5 arg5) \
1007 { \
1008     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
1009 }
1010 
1011 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
1012     type4, arg4, type5, arg5, type6, arg6) \
1013 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
1014     type5 arg5, type6 arg6) \
1015 { \
1016     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
1017 }
1018 
1019 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
1020 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
1021 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
1022               int, flags, mode_t, mode)
1023 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
1024               struct rusage *, rusage)
1025 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
1026               int, options, struct rusage *, rusage)
1027 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
1028 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
1029               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
1030 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
1031               struct timespec *, tsp, const sigset_t *, sigmask,
1032               size_t, sigsetsize)
1033 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
1034               int, maxevents, int, timeout, const sigset_t *, sigmask,
1035               size_t, sigsetsize)
1036 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
1037               const struct timespec *,timeout,int *,uaddr2,int,val3)
1038 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
1039 safe_syscall2(int, kill, pid_t, pid, int, sig)
1040 safe_syscall2(int, tkill, int, tid, int, sig)
1041 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
1042 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
1043 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
1044 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
1045               unsigned long, pos_l, unsigned long, pos_h)
1046 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
1047               unsigned long, pos_l, unsigned long, pos_h)
1048 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
1049               socklen_t, addrlen)
1050 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
1051               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
1052 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
1053               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
1054 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
1055 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
1056 safe_syscall2(int, flock, int, fd, int, operation)
1057 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
1058               const struct timespec *, uts, size_t, sigsetsize)
1059 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
1060               int, flags)
1061 safe_syscall2(int, nanosleep, const struct timespec *, req,
1062               struct timespec *, rem)
1063 #ifdef TARGET_NR_clock_nanosleep
1064 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
1065               const struct timespec *, req, struct timespec *, rem)
1066 #endif
1067 #ifdef __NR_msgsnd
1068 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
1069               int, flags)
1070 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
1071               long, msgtype, int, flags)
1072 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
1073               unsigned, nsops, const struct timespec *, timeout)
1074 #else
1075 /* This host kernel architecture uses a single ipc syscall; fake up
1076  * wrappers for the sub-operations to hide this implementation detail.
1077  * Annoyingly we can't include linux/ipc.h to get the constant definitions
1078  * for the call parameter because some structs in there conflict with the
1079  * sys/ipc.h ones. So we just define them here, and rely on them being
1080  * the same for all host architectures.
1081  */
1082 #define Q_SEMTIMEDOP 4
1083 #define Q_MSGSND 11
1084 #define Q_MSGRCV 12
1085 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1086 
1087 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1088               void *, ptr, long, fifth)
1089 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1090 {
1091     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1092 }
1093 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1094 {
1095     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1096 }
1097 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1098                            const struct timespec *timeout)
1099 {
1100     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1101                     (long)timeout);
1102 }
1103 #endif
1104 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1105 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1106               size_t, len, unsigned, prio, const struct timespec *, timeout)
1107 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1108               size_t, len, unsigned *, prio, const struct timespec *, timeout)
1109 #endif
1110 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1111  * "third argument might be integer or pointer or not present" behaviour of
1112  * the libc function.
1113  */
1114 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1115 /* Similarly for fcntl. Note that callers must always:
1116  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1117  *  use the flock64 struct rather than unsuffixed flock
1118  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1119  */
1120 #ifdef __NR_fcntl64
1121 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1122 #else
1123 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1124 #endif
1125 
1126 static inline int host_to_target_sock_type(int host_type)
1127 {
1128     int target_type;
1129 
1130     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1131     case SOCK_DGRAM:
1132         target_type = TARGET_SOCK_DGRAM;
1133         break;
1134     case SOCK_STREAM:
1135         target_type = TARGET_SOCK_STREAM;
1136         break;
1137     default:
1138         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1139         break;
1140     }
1141 
1142 #if defined(SOCK_CLOEXEC)
1143     if (host_type & SOCK_CLOEXEC) {
1144         target_type |= TARGET_SOCK_CLOEXEC;
1145     }
1146 #endif
1147 
1148 #if defined(SOCK_NONBLOCK)
1149     if (host_type & SOCK_NONBLOCK) {
1150         target_type |= TARGET_SOCK_NONBLOCK;
1151     }
1152 #endif
1153 
1154     return target_type;
1155 }
1156 
1157 static abi_ulong target_brk;
1158 static abi_ulong target_original_brk;
1159 static abi_ulong brk_page;
1160 
1161 void target_set_brk(abi_ulong new_brk)
1162 {
1163     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1164     brk_page = HOST_PAGE_ALIGN(target_brk);
1165 }
1166 
1167 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1168 #define DEBUGF_BRK(message, args...)
1169 
1170 /* do_brk() must return target values and target errnos. */
1171 abi_long do_brk(abi_ulong new_brk)
1172 {
1173     abi_long mapped_addr;
1174     abi_ulong new_alloc_size;
1175 
1176     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1177 
1178     if (!new_brk) {
1179         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1180         return target_brk;
1181     }
1182     if (new_brk < target_original_brk) {
1183         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1184                    target_brk);
1185         return target_brk;
1186     }
1187 
1188     /* If the new brk is less than the highest page reserved to the
1189      * target heap allocation, set it and we're almost done...  */
1190     if (new_brk <= brk_page) {
1191         /* Heap contents are initialized to zero, as for anonymous
1192          * mapped pages.  */
1193         if (new_brk > target_brk) {
1194             memset(g2h(target_brk), 0, new_brk - target_brk);
1195         }
1196 	target_brk = new_brk;
1197         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1198     	return target_brk;
1199     }
1200 
1201     /* We need to allocate more memory after the brk... Note that
1202      * we don't use MAP_FIXED because that will map over the top of
1203      * any existing mapping (like the one with the host libc or qemu
1204      * itself); instead we treat "mapped but at wrong address" as
1205      * a failure and unmap again.
1206      */
1207     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1208     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1209                                         PROT_READ|PROT_WRITE,
1210                                         MAP_ANON|MAP_PRIVATE, 0, 0));
1211 
1212     if (mapped_addr == brk_page) {
1213         /* Heap contents are initialized to zero, as for anonymous
1214          * mapped pages.  Technically the new pages are already
1215          * initialized to zero since they *are* anonymous mapped
1216          * pages, however we have to take care with the contents that
1217          * come from the remaining part of the previous page: it may
1218          * contains garbage data due to a previous heap usage (grown
1219          * then shrunken).  */
1220         memset(g2h(target_brk), 0, brk_page - target_brk);
1221 
1222         target_brk = new_brk;
1223         brk_page = HOST_PAGE_ALIGN(target_brk);
1224         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1225             target_brk);
1226         return target_brk;
1227     } else if (mapped_addr != -1) {
1228         /* Mapped but at wrong address, meaning there wasn't actually
1229          * enough space for this brk.
1230          */
1231         target_munmap(mapped_addr, new_alloc_size);
1232         mapped_addr = -1;
1233         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1234     }
1235     else {
1236         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1237     }
1238 
1239 #if defined(TARGET_ALPHA)
1240     /* We (partially) emulate OSF/1 on Alpha, which requires we
1241        return a proper errno, not an unchanged brk value.  */
1242     return -TARGET_ENOMEM;
1243 #endif
1244     /* For everything else, return the previous break. */
1245     return target_brk;
1246 }
1247 
1248 static inline abi_long copy_from_user_fdset(fd_set *fds,
1249                                             abi_ulong target_fds_addr,
1250                                             int n)
1251 {
1252     int i, nw, j, k;
1253     abi_ulong b, *target_fds;
1254 
1255     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1256     if (!(target_fds = lock_user(VERIFY_READ,
1257                                  target_fds_addr,
1258                                  sizeof(abi_ulong) * nw,
1259                                  1)))
1260         return -TARGET_EFAULT;
1261 
1262     FD_ZERO(fds);
1263     k = 0;
1264     for (i = 0; i < nw; i++) {
1265         /* grab the abi_ulong */
1266         __get_user(b, &target_fds[i]);
1267         for (j = 0; j < TARGET_ABI_BITS; j++) {
1268             /* check the bit inside the abi_ulong */
1269             if ((b >> j) & 1)
1270                 FD_SET(k, fds);
1271             k++;
1272         }
1273     }
1274 
1275     unlock_user(target_fds, target_fds_addr, 0);
1276 
1277     return 0;
1278 }
1279 
1280 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1281                                                  abi_ulong target_fds_addr,
1282                                                  int n)
1283 {
1284     if (target_fds_addr) {
1285         if (copy_from_user_fdset(fds, target_fds_addr, n))
1286             return -TARGET_EFAULT;
1287         *fds_ptr = fds;
1288     } else {
1289         *fds_ptr = NULL;
1290     }
1291     return 0;
1292 }
1293 
1294 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1295                                           const fd_set *fds,
1296                                           int n)
1297 {
1298     int i, nw, j, k;
1299     abi_long v;
1300     abi_ulong *target_fds;
1301 
1302     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1303     if (!(target_fds = lock_user(VERIFY_WRITE,
1304                                  target_fds_addr,
1305                                  sizeof(abi_ulong) * nw,
1306                                  0)))
1307         return -TARGET_EFAULT;
1308 
1309     k = 0;
1310     for (i = 0; i < nw; i++) {
1311         v = 0;
1312         for (j = 0; j < TARGET_ABI_BITS; j++) {
1313             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1314             k++;
1315         }
1316         __put_user(v, &target_fds[i]);
1317     }
1318 
1319     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1320 
1321     return 0;
1322 }
1323 
1324 #if defined(__alpha__)
1325 #define HOST_HZ 1024
1326 #else
1327 #define HOST_HZ 100
1328 #endif
1329 
1330 static inline abi_long host_to_target_clock_t(long ticks)
1331 {
1332 #if HOST_HZ == TARGET_HZ
1333     return ticks;
1334 #else
1335     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1336 #endif
1337 }
1338 
1339 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1340                                              const struct rusage *rusage)
1341 {
1342     struct target_rusage *target_rusage;
1343 
1344     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1345         return -TARGET_EFAULT;
1346     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1347     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1348     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1349     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1350     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1351     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1352     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1353     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1354     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1355     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1356     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1357     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1358     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1359     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1360     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1361     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1362     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1363     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1364     unlock_user_struct(target_rusage, target_addr, 1);
1365 
1366     return 0;
1367 }
1368 
1369 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1370 {
1371     abi_ulong target_rlim_swap;
1372     rlim_t result;
1373 
1374     target_rlim_swap = tswapal(target_rlim);
1375     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1376         return RLIM_INFINITY;
1377 
1378     result = target_rlim_swap;
1379     if (target_rlim_swap != (rlim_t)result)
1380         return RLIM_INFINITY;
1381 
1382     return result;
1383 }
1384 
1385 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1386 {
1387     abi_ulong target_rlim_swap;
1388     abi_ulong result;
1389 
1390     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1391         target_rlim_swap = TARGET_RLIM_INFINITY;
1392     else
1393         target_rlim_swap = rlim;
1394     result = tswapal(target_rlim_swap);
1395 
1396     return result;
1397 }
1398 
1399 static inline int target_to_host_resource(int code)
1400 {
1401     switch (code) {
1402     case TARGET_RLIMIT_AS:
1403         return RLIMIT_AS;
1404     case TARGET_RLIMIT_CORE:
1405         return RLIMIT_CORE;
1406     case TARGET_RLIMIT_CPU:
1407         return RLIMIT_CPU;
1408     case TARGET_RLIMIT_DATA:
1409         return RLIMIT_DATA;
1410     case TARGET_RLIMIT_FSIZE:
1411         return RLIMIT_FSIZE;
1412     case TARGET_RLIMIT_LOCKS:
1413         return RLIMIT_LOCKS;
1414     case TARGET_RLIMIT_MEMLOCK:
1415         return RLIMIT_MEMLOCK;
1416     case TARGET_RLIMIT_MSGQUEUE:
1417         return RLIMIT_MSGQUEUE;
1418     case TARGET_RLIMIT_NICE:
1419         return RLIMIT_NICE;
1420     case TARGET_RLIMIT_NOFILE:
1421         return RLIMIT_NOFILE;
1422     case TARGET_RLIMIT_NPROC:
1423         return RLIMIT_NPROC;
1424     case TARGET_RLIMIT_RSS:
1425         return RLIMIT_RSS;
1426     case TARGET_RLIMIT_RTPRIO:
1427         return RLIMIT_RTPRIO;
1428     case TARGET_RLIMIT_SIGPENDING:
1429         return RLIMIT_SIGPENDING;
1430     case TARGET_RLIMIT_STACK:
1431         return RLIMIT_STACK;
1432     default:
1433         return code;
1434     }
1435 }
1436 
1437 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1438                                               abi_ulong target_tv_addr)
1439 {
1440     struct target_timeval *target_tv;
1441 
1442     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1443         return -TARGET_EFAULT;
1444 
1445     __get_user(tv->tv_sec, &target_tv->tv_sec);
1446     __get_user(tv->tv_usec, &target_tv->tv_usec);
1447 
1448     unlock_user_struct(target_tv, target_tv_addr, 0);
1449 
1450     return 0;
1451 }
1452 
1453 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1454                                             const struct timeval *tv)
1455 {
1456     struct target_timeval *target_tv;
1457 
1458     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1459         return -TARGET_EFAULT;
1460 
1461     __put_user(tv->tv_sec, &target_tv->tv_sec);
1462     __put_user(tv->tv_usec, &target_tv->tv_usec);
1463 
1464     unlock_user_struct(target_tv, target_tv_addr, 1);
1465 
1466     return 0;
1467 }
1468 
1469 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1470                                                abi_ulong target_tz_addr)
1471 {
1472     struct target_timezone *target_tz;
1473 
1474     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1475         return -TARGET_EFAULT;
1476     }
1477 
1478     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1479     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1480 
1481     unlock_user_struct(target_tz, target_tz_addr, 0);
1482 
1483     return 0;
1484 }
1485 
1486 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1487 #include <mqueue.h>
1488 
1489 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1490                                               abi_ulong target_mq_attr_addr)
1491 {
1492     struct target_mq_attr *target_mq_attr;
1493 
1494     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1495                           target_mq_attr_addr, 1))
1496         return -TARGET_EFAULT;
1497 
1498     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1499     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1500     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1501     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1502 
1503     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1504 
1505     return 0;
1506 }
1507 
1508 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1509                                             const struct mq_attr *attr)
1510 {
1511     struct target_mq_attr *target_mq_attr;
1512 
1513     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1514                           target_mq_attr_addr, 0))
1515         return -TARGET_EFAULT;
1516 
1517     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1518     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1519     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1520     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1521 
1522     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1523 
1524     return 0;
1525 }
1526 #endif
1527 
1528 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1529 /* do_select() must return target values and target errnos. */
1530 static abi_long do_select(int n,
1531                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1532                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1533 {
1534     fd_set rfds, wfds, efds;
1535     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1536     struct timeval tv;
1537     struct timespec ts, *ts_ptr;
1538     abi_long ret;
1539 
1540     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1541     if (ret) {
1542         return ret;
1543     }
1544     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1545     if (ret) {
1546         return ret;
1547     }
1548     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1549     if (ret) {
1550         return ret;
1551     }
1552 
1553     if (target_tv_addr) {
1554         if (copy_from_user_timeval(&tv, target_tv_addr))
1555             return -TARGET_EFAULT;
1556         ts.tv_sec = tv.tv_sec;
1557         ts.tv_nsec = tv.tv_usec * 1000;
1558         ts_ptr = &ts;
1559     } else {
1560         ts_ptr = NULL;
1561     }
1562 
1563     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1564                                   ts_ptr, NULL));
1565 
1566     if (!is_error(ret)) {
1567         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1568             return -TARGET_EFAULT;
1569         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1570             return -TARGET_EFAULT;
1571         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1572             return -TARGET_EFAULT;
1573 
1574         if (target_tv_addr) {
1575             tv.tv_sec = ts.tv_sec;
1576             tv.tv_usec = ts.tv_nsec / 1000;
1577             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1578                 return -TARGET_EFAULT;
1579             }
1580         }
1581     }
1582 
1583     return ret;
1584 }
1585 
1586 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1587 static abi_long do_old_select(abi_ulong arg1)
1588 {
1589     struct target_sel_arg_struct *sel;
1590     abi_ulong inp, outp, exp, tvp;
1591     long nsel;
1592 
1593     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1594         return -TARGET_EFAULT;
1595     }
1596 
1597     nsel = tswapal(sel->n);
1598     inp = tswapal(sel->inp);
1599     outp = tswapal(sel->outp);
1600     exp = tswapal(sel->exp);
1601     tvp = tswapal(sel->tvp);
1602 
1603     unlock_user_struct(sel, arg1, 0);
1604 
1605     return do_select(nsel, inp, outp, exp, tvp);
1606 }
1607 #endif
1608 #endif
1609 
1610 static abi_long do_pipe2(int host_pipe[], int flags)
1611 {
1612 #ifdef CONFIG_PIPE2
1613     return pipe2(host_pipe, flags);
1614 #else
1615     return -ENOSYS;
1616 #endif
1617 }
1618 
1619 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1620                         int flags, int is_pipe2)
1621 {
1622     int host_pipe[2];
1623     abi_long ret;
1624     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1625 
1626     if (is_error(ret))
1627         return get_errno(ret);
1628 
1629     /* Several targets have special calling conventions for the original
1630        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1631     if (!is_pipe2) {
1632 #if defined(TARGET_ALPHA)
1633         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1634         return host_pipe[0];
1635 #elif defined(TARGET_MIPS)
1636         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1637         return host_pipe[0];
1638 #elif defined(TARGET_SH4)
1639         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1640         return host_pipe[0];
1641 #elif defined(TARGET_SPARC)
1642         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1643         return host_pipe[0];
1644 #endif
1645     }
1646 
1647     if (put_user_s32(host_pipe[0], pipedes)
1648         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1649         return -TARGET_EFAULT;
1650     return get_errno(ret);
1651 }
1652 
1653 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1654                                               abi_ulong target_addr,
1655                                               socklen_t len)
1656 {
1657     struct target_ip_mreqn *target_smreqn;
1658 
1659     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1660     if (!target_smreqn)
1661         return -TARGET_EFAULT;
1662     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1663     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1664     if (len == sizeof(struct target_ip_mreqn))
1665         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1666     unlock_user(target_smreqn, target_addr, 0);
1667 
1668     return 0;
1669 }
1670 
1671 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1672                                                abi_ulong target_addr,
1673                                                socklen_t len)
1674 {
1675     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1676     sa_family_t sa_family;
1677     struct target_sockaddr *target_saddr;
1678 
1679     if (fd_trans_target_to_host_addr(fd)) {
1680         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1681     }
1682 
1683     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1684     if (!target_saddr)
1685         return -TARGET_EFAULT;
1686 
1687     sa_family = tswap16(target_saddr->sa_family);
1688 
1689     /* Oops. The caller might send a incomplete sun_path; sun_path
1690      * must be terminated by \0 (see the manual page), but
1691      * unfortunately it is quite common to specify sockaddr_un
1692      * length as "strlen(x->sun_path)" while it should be
1693      * "strlen(...) + 1". We'll fix that here if needed.
1694      * Linux kernel has a similar feature.
1695      */
1696 
1697     if (sa_family == AF_UNIX) {
1698         if (len < unix_maxlen && len > 0) {
1699             char *cp = (char*)target_saddr;
1700 
1701             if ( cp[len-1] && !cp[len] )
1702                 len++;
1703         }
1704         if (len > unix_maxlen)
1705             len = unix_maxlen;
1706     }
1707 
1708     memcpy(addr, target_saddr, len);
1709     addr->sa_family = sa_family;
1710     if (sa_family == AF_NETLINK) {
1711         struct sockaddr_nl *nladdr;
1712 
1713         nladdr = (struct sockaddr_nl *)addr;
1714         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1715         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1716     } else if (sa_family == AF_PACKET) {
1717 	struct target_sockaddr_ll *lladdr;
1718 
1719 	lladdr = (struct target_sockaddr_ll *)addr;
1720 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1721 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1722     }
1723     unlock_user(target_saddr, target_addr, 0);
1724 
1725     return 0;
1726 }
1727 
1728 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1729                                                struct sockaddr *addr,
1730                                                socklen_t len)
1731 {
1732     struct target_sockaddr *target_saddr;
1733 
1734     if (len == 0) {
1735         return 0;
1736     }
1737     assert(addr);
1738 
1739     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1740     if (!target_saddr)
1741         return -TARGET_EFAULT;
1742     memcpy(target_saddr, addr, len);
1743     if (len >= offsetof(struct target_sockaddr, sa_family) +
1744         sizeof(target_saddr->sa_family)) {
1745         target_saddr->sa_family = tswap16(addr->sa_family);
1746     }
1747     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1748         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1749         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1750         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1751     } else if (addr->sa_family == AF_PACKET) {
1752         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1753         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1754         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1755     } else if (addr->sa_family == AF_INET6 &&
1756                len >= sizeof(struct target_sockaddr_in6)) {
1757         struct target_sockaddr_in6 *target_in6 =
1758                (struct target_sockaddr_in6 *)target_saddr;
1759         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1760     }
1761     unlock_user(target_saddr, target_addr, len);
1762 
1763     return 0;
1764 }
1765 
1766 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1767                                            struct target_msghdr *target_msgh)
1768 {
1769     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1770     abi_long msg_controllen;
1771     abi_ulong target_cmsg_addr;
1772     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1773     socklen_t space = 0;
1774 
1775     msg_controllen = tswapal(target_msgh->msg_controllen);
1776     if (msg_controllen < sizeof (struct target_cmsghdr))
1777         goto the_end;
1778     target_cmsg_addr = tswapal(target_msgh->msg_control);
1779     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1780     target_cmsg_start = target_cmsg;
1781     if (!target_cmsg)
1782         return -TARGET_EFAULT;
1783 
1784     while (cmsg && target_cmsg) {
1785         void *data = CMSG_DATA(cmsg);
1786         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1787 
1788         int len = tswapal(target_cmsg->cmsg_len)
1789             - sizeof(struct target_cmsghdr);
1790 
1791         space += CMSG_SPACE(len);
1792         if (space > msgh->msg_controllen) {
1793             space -= CMSG_SPACE(len);
1794             /* This is a QEMU bug, since we allocated the payload
1795              * area ourselves (unlike overflow in host-to-target
1796              * conversion, which is just the guest giving us a buffer
1797              * that's too small). It can't happen for the payload types
1798              * we currently support; if it becomes an issue in future
1799              * we would need to improve our allocation strategy to
1800              * something more intelligent than "twice the size of the
1801              * target buffer we're reading from".
1802              */
1803             gemu_log("Host cmsg overflow\n");
1804             break;
1805         }
1806 
1807         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1808             cmsg->cmsg_level = SOL_SOCKET;
1809         } else {
1810             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1811         }
1812         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1813         cmsg->cmsg_len = CMSG_LEN(len);
1814 
1815         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1816             int *fd = (int *)data;
1817             int *target_fd = (int *)target_data;
1818             int i, numfds = len / sizeof(int);
1819 
1820             for (i = 0; i < numfds; i++) {
1821                 __get_user(fd[i], target_fd + i);
1822             }
1823         } else if (cmsg->cmsg_level == SOL_SOCKET
1824                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1825             struct ucred *cred = (struct ucred *)data;
1826             struct target_ucred *target_cred =
1827                 (struct target_ucred *)target_data;
1828 
1829             __get_user(cred->pid, &target_cred->pid);
1830             __get_user(cred->uid, &target_cred->uid);
1831             __get_user(cred->gid, &target_cred->gid);
1832         } else {
1833             gemu_log("Unsupported ancillary data: %d/%d\n",
1834                                         cmsg->cmsg_level, cmsg->cmsg_type);
1835             memcpy(data, target_data, len);
1836         }
1837 
1838         cmsg = CMSG_NXTHDR(msgh, cmsg);
1839         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1840                                          target_cmsg_start);
1841     }
1842     unlock_user(target_cmsg, target_cmsg_addr, 0);
1843  the_end:
1844     msgh->msg_controllen = space;
1845     return 0;
1846 }
1847 
1848 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1849                                            struct msghdr *msgh)
1850 {
1851     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1852     abi_long msg_controllen;
1853     abi_ulong target_cmsg_addr;
1854     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1855     socklen_t space = 0;
1856 
1857     msg_controllen = tswapal(target_msgh->msg_controllen);
1858     if (msg_controllen < sizeof (struct target_cmsghdr))
1859         goto the_end;
1860     target_cmsg_addr = tswapal(target_msgh->msg_control);
1861     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1862     target_cmsg_start = target_cmsg;
1863     if (!target_cmsg)
1864         return -TARGET_EFAULT;
1865 
1866     while (cmsg && target_cmsg) {
1867         void *data = CMSG_DATA(cmsg);
1868         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1869 
1870         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1871         int tgt_len, tgt_space;
1872 
1873         /* We never copy a half-header but may copy half-data;
1874          * this is Linux's behaviour in put_cmsg(). Note that
1875          * truncation here is a guest problem (which we report
1876          * to the guest via the CTRUNC bit), unlike truncation
1877          * in target_to_host_cmsg, which is a QEMU bug.
1878          */
1879         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1880             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1881             break;
1882         }
1883 
1884         if (cmsg->cmsg_level == SOL_SOCKET) {
1885             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1886         } else {
1887             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1888         }
1889         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1890 
1891         /* Payload types which need a different size of payload on
1892          * the target must adjust tgt_len here.
1893          */
1894         tgt_len = len;
1895         switch (cmsg->cmsg_level) {
1896         case SOL_SOCKET:
1897             switch (cmsg->cmsg_type) {
1898             case SO_TIMESTAMP:
1899                 tgt_len = sizeof(struct target_timeval);
1900                 break;
1901             default:
1902                 break;
1903             }
1904             break;
1905         default:
1906             break;
1907         }
1908 
1909         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1910             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1911             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1912         }
1913 
1914         /* We must now copy-and-convert len bytes of payload
1915          * into tgt_len bytes of destination space. Bear in mind
1916          * that in both source and destination we may be dealing
1917          * with a truncated value!
1918          */
1919         switch (cmsg->cmsg_level) {
1920         case SOL_SOCKET:
1921             switch (cmsg->cmsg_type) {
1922             case SCM_RIGHTS:
1923             {
1924                 int *fd = (int *)data;
1925                 int *target_fd = (int *)target_data;
1926                 int i, numfds = tgt_len / sizeof(int);
1927 
1928                 for (i = 0; i < numfds; i++) {
1929                     __put_user(fd[i], target_fd + i);
1930                 }
1931                 break;
1932             }
1933             case SO_TIMESTAMP:
1934             {
1935                 struct timeval *tv = (struct timeval *)data;
1936                 struct target_timeval *target_tv =
1937                     (struct target_timeval *)target_data;
1938 
1939                 if (len != sizeof(struct timeval) ||
1940                     tgt_len != sizeof(struct target_timeval)) {
1941                     goto unimplemented;
1942                 }
1943 
1944                 /* copy struct timeval to target */
1945                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1946                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1947                 break;
1948             }
1949             case SCM_CREDENTIALS:
1950             {
1951                 struct ucred *cred = (struct ucred *)data;
1952                 struct target_ucred *target_cred =
1953                     (struct target_ucred *)target_data;
1954 
1955                 __put_user(cred->pid, &target_cred->pid);
1956                 __put_user(cred->uid, &target_cred->uid);
1957                 __put_user(cred->gid, &target_cred->gid);
1958                 break;
1959             }
1960             default:
1961                 goto unimplemented;
1962             }
1963             break;
1964 
1965         case SOL_IP:
1966             switch (cmsg->cmsg_type) {
1967             case IP_TTL:
1968             {
1969                 uint32_t *v = (uint32_t *)data;
1970                 uint32_t *t_int = (uint32_t *)target_data;
1971 
1972                 if (len != sizeof(uint32_t) ||
1973                     tgt_len != sizeof(uint32_t)) {
1974                     goto unimplemented;
1975                 }
1976                 __put_user(*v, t_int);
1977                 break;
1978             }
1979             case IP_RECVERR:
1980             {
1981                 struct errhdr_t {
1982                    struct sock_extended_err ee;
1983                    struct sockaddr_in offender;
1984                 };
1985                 struct errhdr_t *errh = (struct errhdr_t *)data;
1986                 struct errhdr_t *target_errh =
1987                     (struct errhdr_t *)target_data;
1988 
1989                 if (len != sizeof(struct errhdr_t) ||
1990                     tgt_len != sizeof(struct errhdr_t)) {
1991                     goto unimplemented;
1992                 }
1993                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1994                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1995                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1996                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1997                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1998                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1999                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2000                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2001                     (void *) &errh->offender, sizeof(errh->offender));
2002                 break;
2003             }
2004             default:
2005                 goto unimplemented;
2006             }
2007             break;
2008 
2009         case SOL_IPV6:
2010             switch (cmsg->cmsg_type) {
2011             case IPV6_HOPLIMIT:
2012             {
2013                 uint32_t *v = (uint32_t *)data;
2014                 uint32_t *t_int = (uint32_t *)target_data;
2015 
2016                 if (len != sizeof(uint32_t) ||
2017                     tgt_len != sizeof(uint32_t)) {
2018                     goto unimplemented;
2019                 }
2020                 __put_user(*v, t_int);
2021                 break;
2022             }
2023             case IPV6_RECVERR:
2024             {
2025                 struct errhdr6_t {
2026                    struct sock_extended_err ee;
2027                    struct sockaddr_in6 offender;
2028                 };
2029                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2030                 struct errhdr6_t *target_errh =
2031                     (struct errhdr6_t *)target_data;
2032 
2033                 if (len != sizeof(struct errhdr6_t) ||
2034                     tgt_len != sizeof(struct errhdr6_t)) {
2035                     goto unimplemented;
2036                 }
2037                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2038                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2039                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2040                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2041                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2042                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2043                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2044                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2045                     (void *) &errh->offender, sizeof(errh->offender));
2046                 break;
2047             }
2048             default:
2049                 goto unimplemented;
2050             }
2051             break;
2052 
2053         default:
2054         unimplemented:
2055             gemu_log("Unsupported ancillary data: %d/%d\n",
2056                                         cmsg->cmsg_level, cmsg->cmsg_type);
2057             memcpy(target_data, data, MIN(len, tgt_len));
2058             if (tgt_len > len) {
2059                 memset(target_data + len, 0, tgt_len - len);
2060             }
2061         }
2062 
2063         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2064         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2065         if (msg_controllen < tgt_space) {
2066             tgt_space = msg_controllen;
2067         }
2068         msg_controllen -= tgt_space;
2069         space += tgt_space;
2070         cmsg = CMSG_NXTHDR(msgh, cmsg);
2071         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2072                                          target_cmsg_start);
2073     }
2074     unlock_user(target_cmsg, target_cmsg_addr, space);
2075  the_end:
2076     target_msgh->msg_controllen = tswapal(space);
2077     return 0;
2078 }
2079 
2080 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2081 {
2082     nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2083     nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2084     nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2085     nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2086     nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2087 }
2088 
2089 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2090                                               size_t len,
2091                                               abi_long (*host_to_target_nlmsg)
2092                                                        (struct nlmsghdr *))
2093 {
2094     uint32_t nlmsg_len;
2095     abi_long ret;
2096 
2097     while (len > sizeof(struct nlmsghdr)) {
2098 
2099         nlmsg_len = nlh->nlmsg_len;
2100         if (nlmsg_len < sizeof(struct nlmsghdr) ||
2101             nlmsg_len > len) {
2102             break;
2103         }
2104 
2105         switch (nlh->nlmsg_type) {
2106         case NLMSG_DONE:
2107             tswap_nlmsghdr(nlh);
2108             return 0;
2109         case NLMSG_NOOP:
2110             break;
2111         case NLMSG_ERROR:
2112         {
2113             struct nlmsgerr *e = NLMSG_DATA(nlh);
2114             e->error = tswap32(e->error);
2115             tswap_nlmsghdr(&e->msg);
2116             tswap_nlmsghdr(nlh);
2117             return 0;
2118         }
2119         default:
2120             ret = host_to_target_nlmsg(nlh);
2121             if (ret < 0) {
2122                 tswap_nlmsghdr(nlh);
2123                 return ret;
2124             }
2125             break;
2126         }
2127         tswap_nlmsghdr(nlh);
2128         len -= NLMSG_ALIGN(nlmsg_len);
2129         nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2130     }
2131     return 0;
2132 }
2133 
2134 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2135                                               size_t len,
2136                                               abi_long (*target_to_host_nlmsg)
2137                                                        (struct nlmsghdr *))
2138 {
2139     int ret;
2140 
2141     while (len > sizeof(struct nlmsghdr)) {
2142         if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2143             tswap32(nlh->nlmsg_len) > len) {
2144             break;
2145         }
2146         tswap_nlmsghdr(nlh);
2147         switch (nlh->nlmsg_type) {
2148         case NLMSG_DONE:
2149             return 0;
2150         case NLMSG_NOOP:
2151             break;
2152         case NLMSG_ERROR:
2153         {
2154             struct nlmsgerr *e = NLMSG_DATA(nlh);
2155             e->error = tswap32(e->error);
2156             tswap_nlmsghdr(&e->msg);
2157             return 0;
2158         }
2159         default:
2160             ret = target_to_host_nlmsg(nlh);
2161             if (ret < 0) {
2162                 return ret;
2163             }
2164         }
2165         len -= NLMSG_ALIGN(nlh->nlmsg_len);
2166         nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2167     }
2168     return 0;
2169 }
2170 
2171 #ifdef CONFIG_RTNETLINK
2172 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2173                                                size_t len, void *context,
2174                                                abi_long (*host_to_target_nlattr)
2175                                                         (struct nlattr *,
2176                                                          void *context))
2177 {
2178     unsigned short nla_len;
2179     abi_long ret;
2180 
2181     while (len > sizeof(struct nlattr)) {
2182         nla_len = nlattr->nla_len;
2183         if (nla_len < sizeof(struct nlattr) ||
2184             nla_len > len) {
2185             break;
2186         }
2187         ret = host_to_target_nlattr(nlattr, context);
2188         nlattr->nla_len = tswap16(nlattr->nla_len);
2189         nlattr->nla_type = tswap16(nlattr->nla_type);
2190         if (ret < 0) {
2191             return ret;
2192         }
2193         len -= NLA_ALIGN(nla_len);
2194         nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2195     }
2196     return 0;
2197 }
2198 
2199 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2200                                                size_t len,
2201                                                abi_long (*host_to_target_rtattr)
2202                                                         (struct rtattr *))
2203 {
2204     unsigned short rta_len;
2205     abi_long ret;
2206 
2207     while (len > sizeof(struct rtattr)) {
2208         rta_len = rtattr->rta_len;
2209         if (rta_len < sizeof(struct rtattr) ||
2210             rta_len > len) {
2211             break;
2212         }
2213         ret = host_to_target_rtattr(rtattr);
2214         rtattr->rta_len = tswap16(rtattr->rta_len);
2215         rtattr->rta_type = tswap16(rtattr->rta_type);
2216         if (ret < 0) {
2217             return ret;
2218         }
2219         len -= RTA_ALIGN(rta_len);
2220         rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2221     }
2222     return 0;
2223 }
2224 
2225 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2226 
2227 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2228                                                   void *context)
2229 {
2230     uint16_t *u16;
2231     uint32_t *u32;
2232     uint64_t *u64;
2233 
2234     switch (nlattr->nla_type) {
2235     /* no data */
2236     case QEMU_IFLA_BR_FDB_FLUSH:
2237         break;
2238     /* binary */
2239     case QEMU_IFLA_BR_GROUP_ADDR:
2240         break;
2241     /* uint8_t */
2242     case QEMU_IFLA_BR_VLAN_FILTERING:
2243     case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2244     case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2245     case QEMU_IFLA_BR_MCAST_ROUTER:
2246     case QEMU_IFLA_BR_MCAST_SNOOPING:
2247     case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2248     case QEMU_IFLA_BR_MCAST_QUERIER:
2249     case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2250     case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2251     case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2252     case QEMU_IFLA_BR_VLAN_STATS_ENABLED:
2253     case QEMU_IFLA_BR_MCAST_STATS_ENABLED:
2254     case QEMU_IFLA_BR_MCAST_IGMP_VERSION:
2255     case QEMU_IFLA_BR_MCAST_MLD_VERSION:
2256         break;
2257     /* uint16_t */
2258     case QEMU_IFLA_BR_PRIORITY:
2259     case QEMU_IFLA_BR_VLAN_PROTOCOL:
2260     case QEMU_IFLA_BR_GROUP_FWD_MASK:
2261     case QEMU_IFLA_BR_ROOT_PORT:
2262     case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2263         u16 = NLA_DATA(nlattr);
2264         *u16 = tswap16(*u16);
2265         break;
2266     /* uint32_t */
2267     case QEMU_IFLA_BR_FORWARD_DELAY:
2268     case QEMU_IFLA_BR_HELLO_TIME:
2269     case QEMU_IFLA_BR_MAX_AGE:
2270     case QEMU_IFLA_BR_AGEING_TIME:
2271     case QEMU_IFLA_BR_STP_STATE:
2272     case QEMU_IFLA_BR_ROOT_PATH_COST:
2273     case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2274     case QEMU_IFLA_BR_MCAST_HASH_MAX:
2275     case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2276     case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2277         u32 = NLA_DATA(nlattr);
2278         *u32 = tswap32(*u32);
2279         break;
2280     /* uint64_t */
2281     case QEMU_IFLA_BR_HELLO_TIMER:
2282     case QEMU_IFLA_BR_TCN_TIMER:
2283     case QEMU_IFLA_BR_GC_TIMER:
2284     case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2285     case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2286     case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2287     case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2288     case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2289     case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2290     case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2291         u64 = NLA_DATA(nlattr);
2292         *u64 = tswap64(*u64);
2293         break;
2294     /* ifla_bridge_id: uin8_t[] */
2295     case QEMU_IFLA_BR_ROOT_ID:
2296     case QEMU_IFLA_BR_BRIDGE_ID:
2297         break;
2298     default:
2299         gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2300         break;
2301     }
2302     return 0;
2303 }
2304 
2305 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2306                                                         void *context)
2307 {
2308     uint16_t *u16;
2309     uint32_t *u32;
2310     uint64_t *u64;
2311 
2312     switch (nlattr->nla_type) {
2313     /* uint8_t */
2314     case QEMU_IFLA_BRPORT_STATE:
2315     case QEMU_IFLA_BRPORT_MODE:
2316     case QEMU_IFLA_BRPORT_GUARD:
2317     case QEMU_IFLA_BRPORT_PROTECT:
2318     case QEMU_IFLA_BRPORT_FAST_LEAVE:
2319     case QEMU_IFLA_BRPORT_LEARNING:
2320     case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2321     case QEMU_IFLA_BRPORT_PROXYARP:
2322     case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2323     case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2324     case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2325     case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2326     case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2327     case QEMU_IFLA_BRPORT_MCAST_FLOOD:
2328     case QEMU_IFLA_BRPORT_MCAST_TO_UCAST:
2329     case QEMU_IFLA_BRPORT_VLAN_TUNNEL:
2330     case QEMU_IFLA_BRPORT_BCAST_FLOOD:
2331     case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS:
2332         break;
2333     /* uint16_t */
2334     case QEMU_IFLA_BRPORT_PRIORITY:
2335     case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2336     case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2337     case QEMU_IFLA_BRPORT_ID:
2338     case QEMU_IFLA_BRPORT_NO:
2339     case QEMU_IFLA_BRPORT_GROUP_FWD_MASK:
2340         u16 = NLA_DATA(nlattr);
2341         *u16 = tswap16(*u16);
2342         break;
2343     /* uin32_t */
2344     case QEMU_IFLA_BRPORT_COST:
2345         u32 = NLA_DATA(nlattr);
2346         *u32 = tswap32(*u32);
2347         break;
2348     /* uint64_t */
2349     case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2350     case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2351     case QEMU_IFLA_BRPORT_HOLD_TIMER:
2352         u64 = NLA_DATA(nlattr);
2353         *u64 = tswap64(*u64);
2354         break;
2355     /* ifla_bridge_id: uint8_t[] */
2356     case QEMU_IFLA_BRPORT_ROOT_ID:
2357     case QEMU_IFLA_BRPORT_BRIDGE_ID:
2358         break;
2359     default:
2360         gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2361         break;
2362     }
2363     return 0;
2364 }
2365 
2366 static abi_long host_to_target_data_tun_nlattr(struct nlattr *nlattr,
2367                                                   void *context)
2368 {
2369     uint32_t *u32;
2370 
2371     switch (nlattr->nla_type) {
2372     /* uint8_t */
2373     case QEMU_IFLA_TUN_TYPE:
2374     case QEMU_IFLA_TUN_PI:
2375     case QEMU_IFLA_TUN_VNET_HDR:
2376     case QEMU_IFLA_TUN_PERSIST:
2377     case QEMU_IFLA_TUN_MULTI_QUEUE:
2378         break;
2379     /* uint32_t */
2380     case QEMU_IFLA_TUN_NUM_QUEUES:
2381     case QEMU_IFLA_TUN_NUM_DISABLED_QUEUES:
2382     case QEMU_IFLA_TUN_OWNER:
2383     case QEMU_IFLA_TUN_GROUP:
2384         u32 = NLA_DATA(nlattr);
2385         *u32 = tswap32(*u32);
2386         break;
2387     default:
2388         gemu_log("Unknown QEMU_IFLA_TUN type %d\n", nlattr->nla_type);
2389         break;
2390     }
2391     return 0;
2392 }
2393 
2394 struct linkinfo_context {
2395     int len;
2396     char *name;
2397     int slave_len;
2398     char *slave_name;
2399 };
2400 
2401 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2402                                                     void *context)
2403 {
2404     struct linkinfo_context *li_context = context;
2405 
2406     switch (nlattr->nla_type) {
2407     /* string */
2408     case QEMU_IFLA_INFO_KIND:
2409         li_context->name = NLA_DATA(nlattr);
2410         li_context->len = nlattr->nla_len - NLA_HDRLEN;
2411         break;
2412     case QEMU_IFLA_INFO_SLAVE_KIND:
2413         li_context->slave_name = NLA_DATA(nlattr);
2414         li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2415         break;
2416     /* stats */
2417     case QEMU_IFLA_INFO_XSTATS:
2418         /* FIXME: only used by CAN */
2419         break;
2420     /* nested */
2421     case QEMU_IFLA_INFO_DATA:
2422         if (strncmp(li_context->name, "bridge",
2423                     li_context->len) == 0) {
2424             return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2425                                                   nlattr->nla_len,
2426                                                   NULL,
2427                                              host_to_target_data_bridge_nlattr);
2428         } else if (strncmp(li_context->name, "tun",
2429                     li_context->len) == 0) {
2430             return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2431                                                   nlattr->nla_len,
2432                                                   NULL,
2433                                                 host_to_target_data_tun_nlattr);
2434         } else {
2435             gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2436         }
2437         break;
2438     case QEMU_IFLA_INFO_SLAVE_DATA:
2439         if (strncmp(li_context->slave_name, "bridge",
2440                     li_context->slave_len) == 0) {
2441             return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2442                                                   nlattr->nla_len,
2443                                                   NULL,
2444                                        host_to_target_slave_data_bridge_nlattr);
2445         } else {
2446             gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2447                      li_context->slave_name);
2448         }
2449         break;
2450     default:
2451         gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2452         break;
2453     }
2454 
2455     return 0;
2456 }
2457 
2458 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2459                                                 void *context)
2460 {
2461     uint32_t *u32;
2462     int i;
2463 
2464     switch (nlattr->nla_type) {
2465     case QEMU_IFLA_INET_CONF:
2466         u32 = NLA_DATA(nlattr);
2467         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2468              i++) {
2469             u32[i] = tswap32(u32[i]);
2470         }
2471         break;
2472     default:
2473         gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2474     }
2475     return 0;
2476 }
2477 
2478 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2479                                                 void *context)
2480 {
2481     uint32_t *u32;
2482     uint64_t *u64;
2483     struct ifla_cacheinfo *ci;
2484     int i;
2485 
2486     switch (nlattr->nla_type) {
2487     /* binaries */
2488     case QEMU_IFLA_INET6_TOKEN:
2489         break;
2490     /* uint8_t */
2491     case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2492         break;
2493     /* uint32_t */
2494     case QEMU_IFLA_INET6_FLAGS:
2495         u32 = NLA_DATA(nlattr);
2496         *u32 = tswap32(*u32);
2497         break;
2498     /* uint32_t[] */
2499     case QEMU_IFLA_INET6_CONF:
2500         u32 = NLA_DATA(nlattr);
2501         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2502              i++) {
2503             u32[i] = tswap32(u32[i]);
2504         }
2505         break;
2506     /* ifla_cacheinfo */
2507     case QEMU_IFLA_INET6_CACHEINFO:
2508         ci = NLA_DATA(nlattr);
2509         ci->max_reasm_len = tswap32(ci->max_reasm_len);
2510         ci->tstamp = tswap32(ci->tstamp);
2511         ci->reachable_time = tswap32(ci->reachable_time);
2512         ci->retrans_time = tswap32(ci->retrans_time);
2513         break;
2514     /* uint64_t[] */
2515     case QEMU_IFLA_INET6_STATS:
2516     case QEMU_IFLA_INET6_ICMP6STATS:
2517         u64 = NLA_DATA(nlattr);
2518         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2519              i++) {
2520             u64[i] = tswap64(u64[i]);
2521         }
2522         break;
2523     default:
2524         gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2525     }
2526     return 0;
2527 }
2528 
2529 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2530                                                     void *context)
2531 {
2532     switch (nlattr->nla_type) {
2533     case AF_INET:
2534         return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2535                                               NULL,
2536                                              host_to_target_data_inet_nlattr);
2537     case AF_INET6:
2538         return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2539                                               NULL,
2540                                              host_to_target_data_inet6_nlattr);
2541     default:
2542         gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2543         break;
2544     }
2545     return 0;
2546 }
2547 
2548 static abi_long host_to_target_data_xdp_nlattr(struct nlattr *nlattr,
2549                                                void *context)
2550 {
2551     uint32_t *u32;
2552 
2553     switch (nlattr->nla_type) {
2554     /* uint8_t */
2555     case QEMU_IFLA_XDP_ATTACHED:
2556         break;
2557     /* uint32_t */
2558     case QEMU_IFLA_XDP_PROG_ID:
2559         u32 = NLA_DATA(nlattr);
2560         *u32 = tswap32(*u32);
2561         break;
2562     default:
2563         gemu_log("Unknown host XDP type: %d\n", nlattr->nla_type);
2564         break;
2565     }
2566     return 0;
2567 }
2568 
2569 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2570 {
2571     uint32_t *u32;
2572     struct rtnl_link_stats *st;
2573     struct rtnl_link_stats64 *st64;
2574     struct rtnl_link_ifmap *map;
2575     struct linkinfo_context li_context;
2576 
2577     switch (rtattr->rta_type) {
2578     /* binary stream */
2579     case QEMU_IFLA_ADDRESS:
2580     case QEMU_IFLA_BROADCAST:
2581     /* string */
2582     case QEMU_IFLA_IFNAME:
2583     case QEMU_IFLA_QDISC:
2584         break;
2585     /* uin8_t */
2586     case QEMU_IFLA_OPERSTATE:
2587     case QEMU_IFLA_LINKMODE:
2588     case QEMU_IFLA_CARRIER:
2589     case QEMU_IFLA_PROTO_DOWN:
2590         break;
2591     /* uint32_t */
2592     case QEMU_IFLA_MTU:
2593     case QEMU_IFLA_LINK:
2594     case QEMU_IFLA_WEIGHT:
2595     case QEMU_IFLA_TXQLEN:
2596     case QEMU_IFLA_CARRIER_CHANGES:
2597     case QEMU_IFLA_NUM_RX_QUEUES:
2598     case QEMU_IFLA_NUM_TX_QUEUES:
2599     case QEMU_IFLA_PROMISCUITY:
2600     case QEMU_IFLA_EXT_MASK:
2601     case QEMU_IFLA_LINK_NETNSID:
2602     case QEMU_IFLA_GROUP:
2603     case QEMU_IFLA_MASTER:
2604     case QEMU_IFLA_NUM_VF:
2605     case QEMU_IFLA_GSO_MAX_SEGS:
2606     case QEMU_IFLA_GSO_MAX_SIZE:
2607     case QEMU_IFLA_CARRIER_UP_COUNT:
2608     case QEMU_IFLA_CARRIER_DOWN_COUNT:
2609         u32 = RTA_DATA(rtattr);
2610         *u32 = tswap32(*u32);
2611         break;
2612     /* struct rtnl_link_stats */
2613     case QEMU_IFLA_STATS:
2614         st = RTA_DATA(rtattr);
2615         st->rx_packets = tswap32(st->rx_packets);
2616         st->tx_packets = tswap32(st->tx_packets);
2617         st->rx_bytes = tswap32(st->rx_bytes);
2618         st->tx_bytes = tswap32(st->tx_bytes);
2619         st->rx_errors = tswap32(st->rx_errors);
2620         st->tx_errors = tswap32(st->tx_errors);
2621         st->rx_dropped = tswap32(st->rx_dropped);
2622         st->tx_dropped = tswap32(st->tx_dropped);
2623         st->multicast = tswap32(st->multicast);
2624         st->collisions = tswap32(st->collisions);
2625 
2626         /* detailed rx_errors: */
2627         st->rx_length_errors = tswap32(st->rx_length_errors);
2628         st->rx_over_errors = tswap32(st->rx_over_errors);
2629         st->rx_crc_errors = tswap32(st->rx_crc_errors);
2630         st->rx_frame_errors = tswap32(st->rx_frame_errors);
2631         st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2632         st->rx_missed_errors = tswap32(st->rx_missed_errors);
2633 
2634         /* detailed tx_errors */
2635         st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2636         st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2637         st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2638         st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2639         st->tx_window_errors = tswap32(st->tx_window_errors);
2640 
2641         /* for cslip etc */
2642         st->rx_compressed = tswap32(st->rx_compressed);
2643         st->tx_compressed = tswap32(st->tx_compressed);
2644         break;
2645     /* struct rtnl_link_stats64 */
2646     case QEMU_IFLA_STATS64:
2647         st64 = RTA_DATA(rtattr);
2648         st64->rx_packets = tswap64(st64->rx_packets);
2649         st64->tx_packets = tswap64(st64->tx_packets);
2650         st64->rx_bytes = tswap64(st64->rx_bytes);
2651         st64->tx_bytes = tswap64(st64->tx_bytes);
2652         st64->rx_errors = tswap64(st64->rx_errors);
2653         st64->tx_errors = tswap64(st64->tx_errors);
2654         st64->rx_dropped = tswap64(st64->rx_dropped);
2655         st64->tx_dropped = tswap64(st64->tx_dropped);
2656         st64->multicast = tswap64(st64->multicast);
2657         st64->collisions = tswap64(st64->collisions);
2658 
2659         /* detailed rx_errors: */
2660         st64->rx_length_errors = tswap64(st64->rx_length_errors);
2661         st64->rx_over_errors = tswap64(st64->rx_over_errors);
2662         st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2663         st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2664         st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2665         st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2666 
2667         /* detailed tx_errors */
2668         st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2669         st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2670         st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2671         st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2672         st64->tx_window_errors = tswap64(st64->tx_window_errors);
2673 
2674         /* for cslip etc */
2675         st64->rx_compressed = tswap64(st64->rx_compressed);
2676         st64->tx_compressed = tswap64(st64->tx_compressed);
2677         break;
2678     /* struct rtnl_link_ifmap */
2679     case QEMU_IFLA_MAP:
2680         map = RTA_DATA(rtattr);
2681         map->mem_start = tswap64(map->mem_start);
2682         map->mem_end = tswap64(map->mem_end);
2683         map->base_addr = tswap64(map->base_addr);
2684         map->irq = tswap16(map->irq);
2685         break;
2686     /* nested */
2687     case QEMU_IFLA_LINKINFO:
2688         memset(&li_context, 0, sizeof(li_context));
2689         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2690                                               &li_context,
2691                                            host_to_target_data_linkinfo_nlattr);
2692     case QEMU_IFLA_AF_SPEC:
2693         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2694                                               NULL,
2695                                              host_to_target_data_spec_nlattr);
2696     case QEMU_IFLA_XDP:
2697         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2698                                               NULL,
2699                                                 host_to_target_data_xdp_nlattr);
2700     default:
2701         gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2702         break;
2703     }
2704     return 0;
2705 }
2706 
2707 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2708 {
2709     uint32_t *u32;
2710     struct ifa_cacheinfo *ci;
2711 
2712     switch (rtattr->rta_type) {
2713     /* binary: depends on family type */
2714     case IFA_ADDRESS:
2715     case IFA_LOCAL:
2716         break;
2717     /* string */
2718     case IFA_LABEL:
2719         break;
2720     /* u32 */
2721     case IFA_FLAGS:
2722     case IFA_BROADCAST:
2723         u32 = RTA_DATA(rtattr);
2724         *u32 = tswap32(*u32);
2725         break;
2726     /* struct ifa_cacheinfo */
2727     case IFA_CACHEINFO:
2728         ci = RTA_DATA(rtattr);
2729         ci->ifa_prefered = tswap32(ci->ifa_prefered);
2730         ci->ifa_valid = tswap32(ci->ifa_valid);
2731         ci->cstamp = tswap32(ci->cstamp);
2732         ci->tstamp = tswap32(ci->tstamp);
2733         break;
2734     default:
2735         gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2736         break;
2737     }
2738     return 0;
2739 }
2740 
2741 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2742 {
2743     uint32_t *u32;
2744     struct rta_cacheinfo *ci;
2745 
2746     switch (rtattr->rta_type) {
2747     /* binary: depends on family type */
2748     case QEMU_RTA_GATEWAY:
2749     case QEMU_RTA_DST:
2750     case QEMU_RTA_PREFSRC:
2751         break;
2752     /* u8 */
2753     case QEMU_RTA_PREF:
2754         break;
2755     /* u32 */
2756     case QEMU_RTA_PRIORITY:
2757     case QEMU_RTA_TABLE:
2758     case QEMU_RTA_OIF:
2759         u32 = RTA_DATA(rtattr);
2760         *u32 = tswap32(*u32);
2761         break;
2762     /* struct rta_cacheinfo */
2763     case QEMU_RTA_CACHEINFO:
2764         ci = RTA_DATA(rtattr);
2765         ci->rta_clntref = tswap32(ci->rta_clntref);
2766         ci->rta_lastuse = tswap32(ci->rta_lastuse);
2767         ci->rta_expires = tswap32(ci->rta_expires);
2768         ci->rta_error = tswap32(ci->rta_error);
2769         ci->rta_used = tswap32(ci->rta_used);
2770 #if defined(RTNETLINK_HAVE_PEERINFO)
2771         ci->rta_id = tswap32(ci->rta_id);
2772         ci->rta_ts = tswap32(ci->rta_ts);
2773         ci->rta_tsage = tswap32(ci->rta_tsage);
2774 #endif
2775         break;
2776     default:
2777         gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2778         break;
2779     }
2780     return 0;
2781 }
2782 
2783 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2784                                          uint32_t rtattr_len)
2785 {
2786     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2787                                           host_to_target_data_link_rtattr);
2788 }
2789 
2790 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2791                                          uint32_t rtattr_len)
2792 {
2793     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2794                                           host_to_target_data_addr_rtattr);
2795 }
2796 
2797 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2798                                          uint32_t rtattr_len)
2799 {
2800     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2801                                           host_to_target_data_route_rtattr);
2802 }
2803 
2804 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2805 {
2806     uint32_t nlmsg_len;
2807     struct ifinfomsg *ifi;
2808     struct ifaddrmsg *ifa;
2809     struct rtmsg *rtm;
2810 
2811     nlmsg_len = nlh->nlmsg_len;
2812     switch (nlh->nlmsg_type) {
2813     case RTM_NEWLINK:
2814     case RTM_DELLINK:
2815     case RTM_GETLINK:
2816         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2817             ifi = NLMSG_DATA(nlh);
2818             ifi->ifi_type = tswap16(ifi->ifi_type);
2819             ifi->ifi_index = tswap32(ifi->ifi_index);
2820             ifi->ifi_flags = tswap32(ifi->ifi_flags);
2821             ifi->ifi_change = tswap32(ifi->ifi_change);
2822             host_to_target_link_rtattr(IFLA_RTA(ifi),
2823                                        nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2824         }
2825         break;
2826     case RTM_NEWADDR:
2827     case RTM_DELADDR:
2828     case RTM_GETADDR:
2829         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2830             ifa = NLMSG_DATA(nlh);
2831             ifa->ifa_index = tswap32(ifa->ifa_index);
2832             host_to_target_addr_rtattr(IFA_RTA(ifa),
2833                                        nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2834         }
2835         break;
2836     case RTM_NEWROUTE:
2837     case RTM_DELROUTE:
2838     case RTM_GETROUTE:
2839         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2840             rtm = NLMSG_DATA(nlh);
2841             rtm->rtm_flags = tswap32(rtm->rtm_flags);
2842             host_to_target_route_rtattr(RTM_RTA(rtm),
2843                                         nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2844         }
2845         break;
2846     default:
2847         return -TARGET_EINVAL;
2848     }
2849     return 0;
2850 }
2851 
2852 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2853                                                   size_t len)
2854 {
2855     return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2856 }
2857 
2858 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2859                                                size_t len,
2860                                                abi_long (*target_to_host_rtattr)
2861                                                         (struct rtattr *))
2862 {
2863     abi_long ret;
2864 
2865     while (len >= sizeof(struct rtattr)) {
2866         if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2867             tswap16(rtattr->rta_len) > len) {
2868             break;
2869         }
2870         rtattr->rta_len = tswap16(rtattr->rta_len);
2871         rtattr->rta_type = tswap16(rtattr->rta_type);
2872         ret = target_to_host_rtattr(rtattr);
2873         if (ret < 0) {
2874             return ret;
2875         }
2876         len -= RTA_ALIGN(rtattr->rta_len);
2877         rtattr = (struct rtattr *)(((char *)rtattr) +
2878                  RTA_ALIGN(rtattr->rta_len));
2879     }
2880     return 0;
2881 }
2882 
2883 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2884 {
2885     switch (rtattr->rta_type) {
2886     default:
2887         gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2888         break;
2889     }
2890     return 0;
2891 }
2892 
2893 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2894 {
2895     switch (rtattr->rta_type) {
2896     /* binary: depends on family type */
2897     case IFA_LOCAL:
2898     case IFA_ADDRESS:
2899         break;
2900     default:
2901         gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2902         break;
2903     }
2904     return 0;
2905 }
2906 
2907 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2908 {
2909     uint32_t *u32;
2910     switch (rtattr->rta_type) {
2911     /* binary: depends on family type */
2912     case QEMU_RTA_DST:
2913     case QEMU_RTA_SRC:
2914     case QEMU_RTA_GATEWAY:
2915         break;
2916     /* u32 */
2917     case QEMU_RTA_PRIORITY:
2918     case QEMU_RTA_OIF:
2919         u32 = RTA_DATA(rtattr);
2920         *u32 = tswap32(*u32);
2921         break;
2922     default:
2923         gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2924         break;
2925     }
2926     return 0;
2927 }
2928 
2929 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2930                                        uint32_t rtattr_len)
2931 {
2932     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2933                                    target_to_host_data_link_rtattr);
2934 }
2935 
2936 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2937                                      uint32_t rtattr_len)
2938 {
2939     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2940                                    target_to_host_data_addr_rtattr);
2941 }
2942 
2943 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2944                                      uint32_t rtattr_len)
2945 {
2946     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2947                                    target_to_host_data_route_rtattr);
2948 }
2949 
2950 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2951 {
2952     struct ifinfomsg *ifi;
2953     struct ifaddrmsg *ifa;
2954     struct rtmsg *rtm;
2955 
2956     switch (nlh->nlmsg_type) {
2957     case RTM_GETLINK:
2958         break;
2959     case RTM_NEWLINK:
2960     case RTM_DELLINK:
2961         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2962             ifi = NLMSG_DATA(nlh);
2963             ifi->ifi_type = tswap16(ifi->ifi_type);
2964             ifi->ifi_index = tswap32(ifi->ifi_index);
2965             ifi->ifi_flags = tswap32(ifi->ifi_flags);
2966             ifi->ifi_change = tswap32(ifi->ifi_change);
2967             target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2968                                        NLMSG_LENGTH(sizeof(*ifi)));
2969         }
2970         break;
2971     case RTM_GETADDR:
2972     case RTM_NEWADDR:
2973     case RTM_DELADDR:
2974         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2975             ifa = NLMSG_DATA(nlh);
2976             ifa->ifa_index = tswap32(ifa->ifa_index);
2977             target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2978                                        NLMSG_LENGTH(sizeof(*ifa)));
2979         }
2980         break;
2981     case RTM_GETROUTE:
2982         break;
2983     case RTM_NEWROUTE:
2984     case RTM_DELROUTE:
2985         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2986             rtm = NLMSG_DATA(nlh);
2987             rtm->rtm_flags = tswap32(rtm->rtm_flags);
2988             target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2989                                         NLMSG_LENGTH(sizeof(*rtm)));
2990         }
2991         break;
2992     default:
2993         return -TARGET_EOPNOTSUPP;
2994     }
2995     return 0;
2996 }
2997 
2998 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2999 {
3000     return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
3001 }
3002 #endif /* CONFIG_RTNETLINK */
3003 
3004 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
3005 {
3006     switch (nlh->nlmsg_type) {
3007     default:
3008         gemu_log("Unknown host audit message type %d\n",
3009                  nlh->nlmsg_type);
3010         return -TARGET_EINVAL;
3011     }
3012     return 0;
3013 }
3014 
3015 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
3016                                                   size_t len)
3017 {
3018     return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
3019 }
3020 
3021 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
3022 {
3023     switch (nlh->nlmsg_type) {
3024     case AUDIT_USER:
3025     case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
3026     case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
3027         break;
3028     default:
3029         gemu_log("Unknown target audit message type %d\n",
3030                  nlh->nlmsg_type);
3031         return -TARGET_EINVAL;
3032     }
3033 
3034     return 0;
3035 }
3036 
3037 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
3038 {
3039     return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
3040 }
3041 
3042 /* do_setsockopt() Must return target values and target errnos. */
3043 static abi_long do_setsockopt(int sockfd, int level, int optname,
3044                               abi_ulong optval_addr, socklen_t optlen)
3045 {
3046     abi_long ret;
3047     int val;
3048     struct ip_mreqn *ip_mreq;
3049     struct ip_mreq_source *ip_mreq_source;
3050 
3051     switch(level) {
3052     case SOL_TCP:
3053         /* TCP options all take an 'int' value.  */
3054         if (optlen < sizeof(uint32_t))
3055             return -TARGET_EINVAL;
3056 
3057         if (get_user_u32(val, optval_addr))
3058             return -TARGET_EFAULT;
3059         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
3060         break;
3061     case SOL_IP:
3062         switch(optname) {
3063         case IP_TOS:
3064         case IP_TTL:
3065         case IP_HDRINCL:
3066         case IP_ROUTER_ALERT:
3067         case IP_RECVOPTS:
3068         case IP_RETOPTS:
3069         case IP_PKTINFO:
3070         case IP_MTU_DISCOVER:
3071         case IP_RECVERR:
3072         case IP_RECVTTL:
3073         case IP_RECVTOS:
3074 #ifdef IP_FREEBIND
3075         case IP_FREEBIND:
3076 #endif
3077         case IP_MULTICAST_TTL:
3078         case IP_MULTICAST_LOOP:
3079             val = 0;
3080             if (optlen >= sizeof(uint32_t)) {
3081                 if (get_user_u32(val, optval_addr))
3082                     return -TARGET_EFAULT;
3083             } else if (optlen >= 1) {
3084                 if (get_user_u8(val, optval_addr))
3085                     return -TARGET_EFAULT;
3086             }
3087             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
3088             break;
3089         case IP_ADD_MEMBERSHIP:
3090         case IP_DROP_MEMBERSHIP:
3091             if (optlen < sizeof (struct target_ip_mreq) ||
3092                 optlen > sizeof (struct target_ip_mreqn))
3093                 return -TARGET_EINVAL;
3094 
3095             ip_mreq = (struct ip_mreqn *) alloca(optlen);
3096             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
3097             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
3098             break;
3099 
3100         case IP_BLOCK_SOURCE:
3101         case IP_UNBLOCK_SOURCE:
3102         case IP_ADD_SOURCE_MEMBERSHIP:
3103         case IP_DROP_SOURCE_MEMBERSHIP:
3104             if (optlen != sizeof (struct target_ip_mreq_source))
3105                 return -TARGET_EINVAL;
3106 
3107             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3108             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
3109             unlock_user (ip_mreq_source, optval_addr, 0);
3110             break;
3111 
3112         default:
3113             goto unimplemented;
3114         }
3115         break;
3116     case SOL_IPV6:
3117         switch (optname) {
3118         case IPV6_MTU_DISCOVER:
3119         case IPV6_MTU:
3120         case IPV6_V6ONLY:
3121         case IPV6_RECVPKTINFO:
3122         case IPV6_UNICAST_HOPS:
3123         case IPV6_MULTICAST_HOPS:
3124         case IPV6_MULTICAST_LOOP:
3125         case IPV6_RECVERR:
3126         case IPV6_RECVHOPLIMIT:
3127         case IPV6_2292HOPLIMIT:
3128         case IPV6_CHECKSUM:
3129             val = 0;
3130             if (optlen < sizeof(uint32_t)) {
3131                 return -TARGET_EINVAL;
3132             }
3133             if (get_user_u32(val, optval_addr)) {
3134                 return -TARGET_EFAULT;
3135             }
3136             ret = get_errno(setsockopt(sockfd, level, optname,
3137                                        &val, sizeof(val)));
3138             break;
3139         case IPV6_PKTINFO:
3140         {
3141             struct in6_pktinfo pki;
3142 
3143             if (optlen < sizeof(pki)) {
3144                 return -TARGET_EINVAL;
3145             }
3146 
3147             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
3148                 return -TARGET_EFAULT;
3149             }
3150 
3151             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
3152 
3153             ret = get_errno(setsockopt(sockfd, level, optname,
3154                                        &pki, sizeof(pki)));
3155             break;
3156         }
3157         default:
3158             goto unimplemented;
3159         }
3160         break;
3161     case SOL_ICMPV6:
3162         switch (optname) {
3163         case ICMPV6_FILTER:
3164         {
3165             struct icmp6_filter icmp6f;
3166 
3167             if (optlen > sizeof(icmp6f)) {
3168                 optlen = sizeof(icmp6f);
3169             }
3170 
3171             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
3172                 return -TARGET_EFAULT;
3173             }
3174 
3175             for (val = 0; val < 8; val++) {
3176                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3177             }
3178 
3179             ret = get_errno(setsockopt(sockfd, level, optname,
3180                                        &icmp6f, optlen));
3181             break;
3182         }
3183         default:
3184             goto unimplemented;
3185         }
3186         break;
3187     case SOL_RAW:
3188         switch (optname) {
3189         case ICMP_FILTER:
3190         case IPV6_CHECKSUM:
3191             /* those take an u32 value */
3192             if (optlen < sizeof(uint32_t)) {
3193                 return -TARGET_EINVAL;
3194             }
3195 
3196             if (get_user_u32(val, optval_addr)) {
3197                 return -TARGET_EFAULT;
3198             }
3199             ret = get_errno(setsockopt(sockfd, level, optname,
3200                                        &val, sizeof(val)));
3201             break;
3202 
3203         default:
3204             goto unimplemented;
3205         }
3206         break;
3207     case TARGET_SOL_SOCKET:
3208         switch (optname) {
3209         case TARGET_SO_RCVTIMEO:
3210         {
3211                 struct timeval tv;
3212 
3213                 optname = SO_RCVTIMEO;
3214 
3215 set_timeout:
3216                 if (optlen != sizeof(struct target_timeval)) {
3217                     return -TARGET_EINVAL;
3218                 }
3219 
3220                 if (copy_from_user_timeval(&tv, optval_addr)) {
3221                     return -TARGET_EFAULT;
3222                 }
3223 
3224                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3225                                 &tv, sizeof(tv)));
3226                 return ret;
3227         }
3228         case TARGET_SO_SNDTIMEO:
3229                 optname = SO_SNDTIMEO;
3230                 goto set_timeout;
3231         case TARGET_SO_ATTACH_FILTER:
3232         {
3233                 struct target_sock_fprog *tfprog;
3234                 struct target_sock_filter *tfilter;
3235                 struct sock_fprog fprog;
3236                 struct sock_filter *filter;
3237                 int i;
3238 
3239                 if (optlen != sizeof(*tfprog)) {
3240                     return -TARGET_EINVAL;
3241                 }
3242                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3243                     return -TARGET_EFAULT;
3244                 }
3245                 if (!lock_user_struct(VERIFY_READ, tfilter,
3246                                       tswapal(tfprog->filter), 0)) {
3247                     unlock_user_struct(tfprog, optval_addr, 1);
3248                     return -TARGET_EFAULT;
3249                 }
3250 
3251                 fprog.len = tswap16(tfprog->len);
3252                 filter = g_try_new(struct sock_filter, fprog.len);
3253                 if (filter == NULL) {
3254                     unlock_user_struct(tfilter, tfprog->filter, 1);
3255                     unlock_user_struct(tfprog, optval_addr, 1);
3256                     return -TARGET_ENOMEM;
3257                 }
3258                 for (i = 0; i < fprog.len; i++) {
3259                     filter[i].code = tswap16(tfilter[i].code);
3260                     filter[i].jt = tfilter[i].jt;
3261                     filter[i].jf = tfilter[i].jf;
3262                     filter[i].k = tswap32(tfilter[i].k);
3263                 }
3264                 fprog.filter = filter;
3265 
3266                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3267                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3268                 g_free(filter);
3269 
3270                 unlock_user_struct(tfilter, tfprog->filter, 1);
3271                 unlock_user_struct(tfprog, optval_addr, 1);
3272                 return ret;
3273         }
3274 	case TARGET_SO_BINDTODEVICE:
3275 	{
3276 		char *dev_ifname, *addr_ifname;
3277 
3278 		if (optlen > IFNAMSIZ - 1) {
3279 		    optlen = IFNAMSIZ - 1;
3280 		}
3281 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3282 		if (!dev_ifname) {
3283 		    return -TARGET_EFAULT;
3284 		}
3285 		optname = SO_BINDTODEVICE;
3286 		addr_ifname = alloca(IFNAMSIZ);
3287 		memcpy(addr_ifname, dev_ifname, optlen);
3288 		addr_ifname[optlen] = 0;
3289 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3290                                            addr_ifname, optlen));
3291 		unlock_user (dev_ifname, optval_addr, 0);
3292 		return ret;
3293 	}
3294             /* Options with 'int' argument.  */
3295         case TARGET_SO_DEBUG:
3296 		optname = SO_DEBUG;
3297 		break;
3298         case TARGET_SO_REUSEADDR:
3299 		optname = SO_REUSEADDR;
3300 		break;
3301         case TARGET_SO_TYPE:
3302 		optname = SO_TYPE;
3303 		break;
3304         case TARGET_SO_ERROR:
3305 		optname = SO_ERROR;
3306 		break;
3307         case TARGET_SO_DONTROUTE:
3308 		optname = SO_DONTROUTE;
3309 		break;
3310         case TARGET_SO_BROADCAST:
3311 		optname = SO_BROADCAST;
3312 		break;
3313         case TARGET_SO_SNDBUF:
3314 		optname = SO_SNDBUF;
3315 		break;
3316         case TARGET_SO_SNDBUFFORCE:
3317                 optname = SO_SNDBUFFORCE;
3318                 break;
3319         case TARGET_SO_RCVBUF:
3320 		optname = SO_RCVBUF;
3321 		break;
3322         case TARGET_SO_RCVBUFFORCE:
3323                 optname = SO_RCVBUFFORCE;
3324                 break;
3325         case TARGET_SO_KEEPALIVE:
3326 		optname = SO_KEEPALIVE;
3327 		break;
3328         case TARGET_SO_OOBINLINE:
3329 		optname = SO_OOBINLINE;
3330 		break;
3331         case TARGET_SO_NO_CHECK:
3332 		optname = SO_NO_CHECK;
3333 		break;
3334         case TARGET_SO_PRIORITY:
3335 		optname = SO_PRIORITY;
3336 		break;
3337 #ifdef SO_BSDCOMPAT
3338         case TARGET_SO_BSDCOMPAT:
3339 		optname = SO_BSDCOMPAT;
3340 		break;
3341 #endif
3342         case TARGET_SO_PASSCRED:
3343 		optname = SO_PASSCRED;
3344 		break;
3345         case TARGET_SO_PASSSEC:
3346                 optname = SO_PASSSEC;
3347                 break;
3348         case TARGET_SO_TIMESTAMP:
3349 		optname = SO_TIMESTAMP;
3350 		break;
3351         case TARGET_SO_RCVLOWAT:
3352 		optname = SO_RCVLOWAT;
3353 		break;
3354         default:
3355             goto unimplemented;
3356         }
3357 	if (optlen < sizeof(uint32_t))
3358             return -TARGET_EINVAL;
3359 
3360 	if (get_user_u32(val, optval_addr))
3361             return -TARGET_EFAULT;
3362 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3363         break;
3364     default:
3365     unimplemented:
3366         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3367         ret = -TARGET_ENOPROTOOPT;
3368     }
3369     return ret;
3370 }
3371 
3372 /* do_getsockopt() Must return target values and target errnos. */
3373 static abi_long do_getsockopt(int sockfd, int level, int optname,
3374                               abi_ulong optval_addr, abi_ulong optlen)
3375 {
3376     abi_long ret;
3377     int len, val;
3378     socklen_t lv;
3379 
3380     switch(level) {
3381     case TARGET_SOL_SOCKET:
3382         level = SOL_SOCKET;
3383         switch (optname) {
3384         /* These don't just return a single integer */
3385         case TARGET_SO_LINGER:
3386         case TARGET_SO_RCVTIMEO:
3387         case TARGET_SO_SNDTIMEO:
3388         case TARGET_SO_PEERNAME:
3389             goto unimplemented;
3390         case TARGET_SO_PEERCRED: {
3391             struct ucred cr;
3392             socklen_t crlen;
3393             struct target_ucred *tcr;
3394 
3395             if (get_user_u32(len, optlen)) {
3396                 return -TARGET_EFAULT;
3397             }
3398             if (len < 0) {
3399                 return -TARGET_EINVAL;
3400             }
3401 
3402             crlen = sizeof(cr);
3403             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3404                                        &cr, &crlen));
3405             if (ret < 0) {
3406                 return ret;
3407             }
3408             if (len > crlen) {
3409                 len = crlen;
3410             }
3411             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3412                 return -TARGET_EFAULT;
3413             }
3414             __put_user(cr.pid, &tcr->pid);
3415             __put_user(cr.uid, &tcr->uid);
3416             __put_user(cr.gid, &tcr->gid);
3417             unlock_user_struct(tcr, optval_addr, 1);
3418             if (put_user_u32(len, optlen)) {
3419                 return -TARGET_EFAULT;
3420             }
3421             break;
3422         }
3423         /* Options with 'int' argument.  */
3424         case TARGET_SO_DEBUG:
3425             optname = SO_DEBUG;
3426             goto int_case;
3427         case TARGET_SO_REUSEADDR:
3428             optname = SO_REUSEADDR;
3429             goto int_case;
3430         case TARGET_SO_TYPE:
3431             optname = SO_TYPE;
3432             goto int_case;
3433         case TARGET_SO_ERROR:
3434             optname = SO_ERROR;
3435             goto int_case;
3436         case TARGET_SO_DONTROUTE:
3437             optname = SO_DONTROUTE;
3438             goto int_case;
3439         case TARGET_SO_BROADCAST:
3440             optname = SO_BROADCAST;
3441             goto int_case;
3442         case TARGET_SO_SNDBUF:
3443             optname = SO_SNDBUF;
3444             goto int_case;
3445         case TARGET_SO_RCVBUF:
3446             optname = SO_RCVBUF;
3447             goto int_case;
3448         case TARGET_SO_KEEPALIVE:
3449             optname = SO_KEEPALIVE;
3450             goto int_case;
3451         case TARGET_SO_OOBINLINE:
3452             optname = SO_OOBINLINE;
3453             goto int_case;
3454         case TARGET_SO_NO_CHECK:
3455             optname = SO_NO_CHECK;
3456             goto int_case;
3457         case TARGET_SO_PRIORITY:
3458             optname = SO_PRIORITY;
3459             goto int_case;
3460 #ifdef SO_BSDCOMPAT
3461         case TARGET_SO_BSDCOMPAT:
3462             optname = SO_BSDCOMPAT;
3463             goto int_case;
3464 #endif
3465         case TARGET_SO_PASSCRED:
3466             optname = SO_PASSCRED;
3467             goto int_case;
3468         case TARGET_SO_TIMESTAMP:
3469             optname = SO_TIMESTAMP;
3470             goto int_case;
3471         case TARGET_SO_RCVLOWAT:
3472             optname = SO_RCVLOWAT;
3473             goto int_case;
3474         case TARGET_SO_ACCEPTCONN:
3475             optname = SO_ACCEPTCONN;
3476             goto int_case;
3477         default:
3478             goto int_case;
3479         }
3480         break;
3481     case SOL_TCP:
3482         /* TCP options all take an 'int' value.  */
3483     int_case:
3484         if (get_user_u32(len, optlen))
3485             return -TARGET_EFAULT;
3486         if (len < 0)
3487             return -TARGET_EINVAL;
3488         lv = sizeof(lv);
3489         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3490         if (ret < 0)
3491             return ret;
3492         if (optname == SO_TYPE) {
3493             val = host_to_target_sock_type(val);
3494         }
3495         if (len > lv)
3496             len = lv;
3497         if (len == 4) {
3498             if (put_user_u32(val, optval_addr))
3499                 return -TARGET_EFAULT;
3500         } else {
3501             if (put_user_u8(val, optval_addr))
3502                 return -TARGET_EFAULT;
3503         }
3504         if (put_user_u32(len, optlen))
3505             return -TARGET_EFAULT;
3506         break;
3507     case SOL_IP:
3508         switch(optname) {
3509         case IP_TOS:
3510         case IP_TTL:
3511         case IP_HDRINCL:
3512         case IP_ROUTER_ALERT:
3513         case IP_RECVOPTS:
3514         case IP_RETOPTS:
3515         case IP_PKTINFO:
3516         case IP_MTU_DISCOVER:
3517         case IP_RECVERR:
3518         case IP_RECVTOS:
3519 #ifdef IP_FREEBIND
3520         case IP_FREEBIND:
3521 #endif
3522         case IP_MULTICAST_TTL:
3523         case IP_MULTICAST_LOOP:
3524             if (get_user_u32(len, optlen))
3525                 return -TARGET_EFAULT;
3526             if (len < 0)
3527                 return -TARGET_EINVAL;
3528             lv = sizeof(lv);
3529             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3530             if (ret < 0)
3531                 return ret;
3532             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3533                 len = 1;
3534                 if (put_user_u32(len, optlen)
3535                     || put_user_u8(val, optval_addr))
3536                     return -TARGET_EFAULT;
3537             } else {
3538                 if (len > sizeof(int))
3539                     len = sizeof(int);
3540                 if (put_user_u32(len, optlen)
3541                     || put_user_u32(val, optval_addr))
3542                     return -TARGET_EFAULT;
3543             }
3544             break;
3545         default:
3546             ret = -TARGET_ENOPROTOOPT;
3547             break;
3548         }
3549         break;
3550     default:
3551     unimplemented:
3552         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3553                  level, optname);
3554         ret = -TARGET_EOPNOTSUPP;
3555         break;
3556     }
3557     return ret;
3558 }
3559 
3560 /* Convert target low/high pair representing file offset into the host
3561  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3562  * as the kernel doesn't handle them either.
3563  */
3564 static void target_to_host_low_high(abi_ulong tlow,
3565                                     abi_ulong thigh,
3566                                     unsigned long *hlow,
3567                                     unsigned long *hhigh)
3568 {
3569     uint64_t off = tlow |
3570         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3571         TARGET_LONG_BITS / 2;
3572 
3573     *hlow = off;
3574     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3575 }
3576 
3577 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3578                                 abi_ulong count, int copy)
3579 {
3580     struct target_iovec *target_vec;
3581     struct iovec *vec;
3582     abi_ulong total_len, max_len;
3583     int i;
3584     int err = 0;
3585     bool bad_address = false;
3586 
3587     if (count == 0) {
3588         errno = 0;
3589         return NULL;
3590     }
3591     if (count > IOV_MAX) {
3592         errno = EINVAL;
3593         return NULL;
3594     }
3595 
3596     vec = g_try_new0(struct iovec, count);
3597     if (vec == NULL) {
3598         errno = ENOMEM;
3599         return NULL;
3600     }
3601 
3602     target_vec = lock_user(VERIFY_READ, target_addr,
3603                            count * sizeof(struct target_iovec), 1);
3604     if (target_vec == NULL) {
3605         err = EFAULT;
3606         goto fail2;
3607     }
3608 
3609     /* ??? If host page size > target page size, this will result in a
3610        value larger than what we can actually support.  */
3611     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3612     total_len = 0;
3613 
3614     for (i = 0; i < count; i++) {
3615         abi_ulong base = tswapal(target_vec[i].iov_base);
3616         abi_long len = tswapal(target_vec[i].iov_len);
3617 
3618         if (len < 0) {
3619             err = EINVAL;
3620             goto fail;
3621         } else if (len == 0) {
3622             /* Zero length pointer is ignored.  */
3623             vec[i].iov_base = 0;
3624         } else {
3625             vec[i].iov_base = lock_user(type, base, len, copy);
3626             /* If the first buffer pointer is bad, this is a fault.  But
3627              * subsequent bad buffers will result in a partial write; this
3628              * is realized by filling the vector with null pointers and
3629              * zero lengths. */
3630             if (!vec[i].iov_base) {
3631                 if (i == 0) {
3632                     err = EFAULT;
3633                     goto fail;
3634                 } else {
3635                     bad_address = true;
3636                 }
3637             }
3638             if (bad_address) {
3639                 len = 0;
3640             }
3641             if (len > max_len - total_len) {
3642                 len = max_len - total_len;
3643             }
3644         }
3645         vec[i].iov_len = len;
3646         total_len += len;
3647     }
3648 
3649     unlock_user(target_vec, target_addr, 0);
3650     return vec;
3651 
3652  fail:
3653     while (--i >= 0) {
3654         if (tswapal(target_vec[i].iov_len) > 0) {
3655             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3656         }
3657     }
3658     unlock_user(target_vec, target_addr, 0);
3659  fail2:
3660     g_free(vec);
3661     errno = err;
3662     return NULL;
3663 }
3664 
3665 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3666                          abi_ulong count, int copy)
3667 {
3668     struct target_iovec *target_vec;
3669     int i;
3670 
3671     target_vec = lock_user(VERIFY_READ, target_addr,
3672                            count * sizeof(struct target_iovec), 1);
3673     if (target_vec) {
3674         for (i = 0; i < count; i++) {
3675             abi_ulong base = tswapal(target_vec[i].iov_base);
3676             abi_long len = tswapal(target_vec[i].iov_len);
3677             if (len < 0) {
3678                 break;
3679             }
3680             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3681         }
3682         unlock_user(target_vec, target_addr, 0);
3683     }
3684 
3685     g_free(vec);
3686 }
3687 
3688 static inline int target_to_host_sock_type(int *type)
3689 {
3690     int host_type = 0;
3691     int target_type = *type;
3692 
3693     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3694     case TARGET_SOCK_DGRAM:
3695         host_type = SOCK_DGRAM;
3696         break;
3697     case TARGET_SOCK_STREAM:
3698         host_type = SOCK_STREAM;
3699         break;
3700     default:
3701         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3702         break;
3703     }
3704     if (target_type & TARGET_SOCK_CLOEXEC) {
3705 #if defined(SOCK_CLOEXEC)
3706         host_type |= SOCK_CLOEXEC;
3707 #else
3708         return -TARGET_EINVAL;
3709 #endif
3710     }
3711     if (target_type & TARGET_SOCK_NONBLOCK) {
3712 #if defined(SOCK_NONBLOCK)
3713         host_type |= SOCK_NONBLOCK;
3714 #elif !defined(O_NONBLOCK)
3715         return -TARGET_EINVAL;
3716 #endif
3717     }
3718     *type = host_type;
3719     return 0;
3720 }
3721 
3722 /* Try to emulate socket type flags after socket creation.  */
3723 static int sock_flags_fixup(int fd, int target_type)
3724 {
3725 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3726     if (target_type & TARGET_SOCK_NONBLOCK) {
3727         int flags = fcntl(fd, F_GETFL);
3728         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3729             close(fd);
3730             return -TARGET_EINVAL;
3731         }
3732     }
3733 #endif
3734     return fd;
3735 }
3736 
3737 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3738                                                abi_ulong target_addr,
3739                                                socklen_t len)
3740 {
3741     struct sockaddr *addr = host_addr;
3742     struct target_sockaddr *target_saddr;
3743 
3744     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3745     if (!target_saddr) {
3746         return -TARGET_EFAULT;
3747     }
3748 
3749     memcpy(addr, target_saddr, len);
3750     addr->sa_family = tswap16(target_saddr->sa_family);
3751     /* spkt_protocol is big-endian */
3752 
3753     unlock_user(target_saddr, target_addr, 0);
3754     return 0;
3755 }
3756 
3757 static TargetFdTrans target_packet_trans = {
3758     .target_to_host_addr = packet_target_to_host_sockaddr,
3759 };
3760 
3761 #ifdef CONFIG_RTNETLINK
3762 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3763 {
3764     abi_long ret;
3765 
3766     ret = target_to_host_nlmsg_route(buf, len);
3767     if (ret < 0) {
3768         return ret;
3769     }
3770 
3771     return len;
3772 }
3773 
3774 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3775 {
3776     abi_long ret;
3777 
3778     ret = host_to_target_nlmsg_route(buf, len);
3779     if (ret < 0) {
3780         return ret;
3781     }
3782 
3783     return len;
3784 }
3785 
3786 static TargetFdTrans target_netlink_route_trans = {
3787     .target_to_host_data = netlink_route_target_to_host,
3788     .host_to_target_data = netlink_route_host_to_target,
3789 };
3790 #endif /* CONFIG_RTNETLINK */
3791 
3792 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3793 {
3794     abi_long ret;
3795 
3796     ret = target_to_host_nlmsg_audit(buf, len);
3797     if (ret < 0) {
3798         return ret;
3799     }
3800 
3801     return len;
3802 }
3803 
3804 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3805 {
3806     abi_long ret;
3807 
3808     ret = host_to_target_nlmsg_audit(buf, len);
3809     if (ret < 0) {
3810         return ret;
3811     }
3812 
3813     return len;
3814 }
3815 
3816 static TargetFdTrans target_netlink_audit_trans = {
3817     .target_to_host_data = netlink_audit_target_to_host,
3818     .host_to_target_data = netlink_audit_host_to_target,
3819 };
3820 
3821 /* do_socket() Must return target values and target errnos. */
3822 static abi_long do_socket(int domain, int type, int protocol)
3823 {
3824     int target_type = type;
3825     int ret;
3826 
3827     ret = target_to_host_sock_type(&type);
3828     if (ret) {
3829         return ret;
3830     }
3831 
3832     if (domain == PF_NETLINK && !(
3833 #ifdef CONFIG_RTNETLINK
3834          protocol == NETLINK_ROUTE ||
3835 #endif
3836          protocol == NETLINK_KOBJECT_UEVENT ||
3837          protocol == NETLINK_AUDIT)) {
3838         return -EPFNOSUPPORT;
3839     }
3840 
3841     if (domain == AF_PACKET ||
3842         (domain == AF_INET && type == SOCK_PACKET)) {
3843         protocol = tswap16(protocol);
3844     }
3845 
3846     ret = get_errno(socket(domain, type, protocol));
3847     if (ret >= 0) {
3848         ret = sock_flags_fixup(ret, target_type);
3849         if (type == SOCK_PACKET) {
3850             /* Manage an obsolete case :
3851              * if socket type is SOCK_PACKET, bind by name
3852              */
3853             fd_trans_register(ret, &target_packet_trans);
3854         } else if (domain == PF_NETLINK) {
3855             switch (protocol) {
3856 #ifdef CONFIG_RTNETLINK
3857             case NETLINK_ROUTE:
3858                 fd_trans_register(ret, &target_netlink_route_trans);
3859                 break;
3860 #endif
3861             case NETLINK_KOBJECT_UEVENT:
3862                 /* nothing to do: messages are strings */
3863                 break;
3864             case NETLINK_AUDIT:
3865                 fd_trans_register(ret, &target_netlink_audit_trans);
3866                 break;
3867             default:
3868                 g_assert_not_reached();
3869             }
3870         }
3871     }
3872     return ret;
3873 }
3874 
3875 /* do_bind() Must return target values and target errnos. */
3876 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3877                         socklen_t addrlen)
3878 {
3879     void *addr;
3880     abi_long ret;
3881 
3882     if ((int)addrlen < 0) {
3883         return -TARGET_EINVAL;
3884     }
3885 
3886     addr = alloca(addrlen+1);
3887 
3888     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3889     if (ret)
3890         return ret;
3891 
3892     return get_errno(bind(sockfd, addr, addrlen));
3893 }
3894 
3895 /* do_connect() Must return target values and target errnos. */
3896 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3897                            socklen_t addrlen)
3898 {
3899     void *addr;
3900     abi_long ret;
3901 
3902     if ((int)addrlen < 0) {
3903         return -TARGET_EINVAL;
3904     }
3905 
3906     addr = alloca(addrlen+1);
3907 
3908     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3909     if (ret)
3910         return ret;
3911 
3912     return get_errno(safe_connect(sockfd, addr, addrlen));
3913 }
3914 
3915 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3916 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3917                                       int flags, int send)
3918 {
3919     abi_long ret, len;
3920     struct msghdr msg;
3921     abi_ulong count;
3922     struct iovec *vec;
3923     abi_ulong target_vec;
3924 
3925     if (msgp->msg_name) {
3926         msg.msg_namelen = tswap32(msgp->msg_namelen);
3927         msg.msg_name = alloca(msg.msg_namelen+1);
3928         ret = target_to_host_sockaddr(fd, msg.msg_name,
3929                                       tswapal(msgp->msg_name),
3930                                       msg.msg_namelen);
3931         if (ret == -TARGET_EFAULT) {
3932             /* For connected sockets msg_name and msg_namelen must
3933              * be ignored, so returning EFAULT immediately is wrong.
3934              * Instead, pass a bad msg_name to the host kernel, and
3935              * let it decide whether to return EFAULT or not.
3936              */
3937             msg.msg_name = (void *)-1;
3938         } else if (ret) {
3939             goto out2;
3940         }
3941     } else {
3942         msg.msg_name = NULL;
3943         msg.msg_namelen = 0;
3944     }
3945     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3946     msg.msg_control = alloca(msg.msg_controllen);
3947     memset(msg.msg_control, 0, msg.msg_controllen);
3948 
3949     msg.msg_flags = tswap32(msgp->msg_flags);
3950 
3951     count = tswapal(msgp->msg_iovlen);
3952     target_vec = tswapal(msgp->msg_iov);
3953 
3954     if (count > IOV_MAX) {
3955         /* sendrcvmsg returns a different errno for this condition than
3956          * readv/writev, so we must catch it here before lock_iovec() does.
3957          */
3958         ret = -TARGET_EMSGSIZE;
3959         goto out2;
3960     }
3961 
3962     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3963                      target_vec, count, send);
3964     if (vec == NULL) {
3965         ret = -host_to_target_errno(errno);
3966         goto out2;
3967     }
3968     msg.msg_iovlen = count;
3969     msg.msg_iov = vec;
3970 
3971     if (send) {
3972         if (fd_trans_target_to_host_data(fd)) {
3973             void *host_msg;
3974 
3975             host_msg = g_malloc(msg.msg_iov->iov_len);
3976             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3977             ret = fd_trans_target_to_host_data(fd)(host_msg,
3978                                                    msg.msg_iov->iov_len);
3979             if (ret >= 0) {
3980                 msg.msg_iov->iov_base = host_msg;
3981                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3982             }
3983             g_free(host_msg);
3984         } else {
3985             ret = target_to_host_cmsg(&msg, msgp);
3986             if (ret == 0) {
3987                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3988             }
3989         }
3990     } else {
3991         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3992         if (!is_error(ret)) {
3993             len = ret;
3994             if (fd_trans_host_to_target_data(fd)) {
3995                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3996                                                MIN(msg.msg_iov->iov_len, len));
3997             } else {
3998                 ret = host_to_target_cmsg(msgp, &msg);
3999             }
4000             if (!is_error(ret)) {
4001                 msgp->msg_namelen = tswap32(msg.msg_namelen);
4002                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
4003                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
4004                                     msg.msg_name, msg.msg_namelen);
4005                     if (ret) {
4006                         goto out;
4007                     }
4008                 }
4009 
4010                 ret = len;
4011             }
4012         }
4013     }
4014 
4015 out:
4016     unlock_iovec(vec, target_vec, count, !send);
4017 out2:
4018     return ret;
4019 }
4020 
4021 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
4022                                int flags, int send)
4023 {
4024     abi_long ret;
4025     struct target_msghdr *msgp;
4026 
4027     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
4028                           msgp,
4029                           target_msg,
4030                           send ? 1 : 0)) {
4031         return -TARGET_EFAULT;
4032     }
4033     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
4034     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
4035     return ret;
4036 }
4037 
4038 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
4039  * so it might not have this *mmsg-specific flag either.
4040  */
4041 #ifndef MSG_WAITFORONE
4042 #define MSG_WAITFORONE 0x10000
4043 #endif
4044 
4045 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
4046                                 unsigned int vlen, unsigned int flags,
4047                                 int send)
4048 {
4049     struct target_mmsghdr *mmsgp;
4050     abi_long ret = 0;
4051     int i;
4052 
4053     if (vlen > UIO_MAXIOV) {
4054         vlen = UIO_MAXIOV;
4055     }
4056 
4057     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
4058     if (!mmsgp) {
4059         return -TARGET_EFAULT;
4060     }
4061 
4062     for (i = 0; i < vlen; i++) {
4063         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
4064         if (is_error(ret)) {
4065             break;
4066         }
4067         mmsgp[i].msg_len = tswap32(ret);
4068         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
4069         if (flags & MSG_WAITFORONE) {
4070             flags |= MSG_DONTWAIT;
4071         }
4072     }
4073 
4074     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
4075 
4076     /* Return number of datagrams sent if we sent any at all;
4077      * otherwise return the error.
4078      */
4079     if (i) {
4080         return i;
4081     }
4082     return ret;
4083 }
4084 
4085 /* do_accept4() Must return target values and target errnos. */
4086 static abi_long do_accept4(int fd, abi_ulong target_addr,
4087                            abi_ulong target_addrlen_addr, int flags)
4088 {
4089     socklen_t addrlen;
4090     void *addr;
4091     abi_long ret;
4092     int host_flags;
4093 
4094     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
4095 
4096     if (target_addr == 0) {
4097         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
4098     }
4099 
4100     /* linux returns EINVAL if addrlen pointer is invalid */
4101     if (get_user_u32(addrlen, target_addrlen_addr))
4102         return -TARGET_EINVAL;
4103 
4104     if ((int)addrlen < 0) {
4105         return -TARGET_EINVAL;
4106     }
4107 
4108     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4109         return -TARGET_EINVAL;
4110 
4111     addr = alloca(addrlen);
4112 
4113     ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
4114     if (!is_error(ret)) {
4115         host_to_target_sockaddr(target_addr, addr, addrlen);
4116         if (put_user_u32(addrlen, target_addrlen_addr))
4117             ret = -TARGET_EFAULT;
4118     }
4119     return ret;
4120 }
4121 
4122 /* do_getpeername() Must return target values and target errnos. */
4123 static abi_long do_getpeername(int fd, abi_ulong target_addr,
4124                                abi_ulong target_addrlen_addr)
4125 {
4126     socklen_t addrlen;
4127     void *addr;
4128     abi_long ret;
4129 
4130     if (get_user_u32(addrlen, target_addrlen_addr))
4131         return -TARGET_EFAULT;
4132 
4133     if ((int)addrlen < 0) {
4134         return -TARGET_EINVAL;
4135     }
4136 
4137     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4138         return -TARGET_EFAULT;
4139 
4140     addr = alloca(addrlen);
4141 
4142     ret = get_errno(getpeername(fd, addr, &addrlen));
4143     if (!is_error(ret)) {
4144         host_to_target_sockaddr(target_addr, addr, addrlen);
4145         if (put_user_u32(addrlen, target_addrlen_addr))
4146             ret = -TARGET_EFAULT;
4147     }
4148     return ret;
4149 }
4150 
4151 /* do_getsockname() Must return target values and target errnos. */
4152 static abi_long do_getsockname(int fd, abi_ulong target_addr,
4153                                abi_ulong target_addrlen_addr)
4154 {
4155     socklen_t addrlen;
4156     void *addr;
4157     abi_long ret;
4158 
4159     if (get_user_u32(addrlen, target_addrlen_addr))
4160         return -TARGET_EFAULT;
4161 
4162     if ((int)addrlen < 0) {
4163         return -TARGET_EINVAL;
4164     }
4165 
4166     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4167         return -TARGET_EFAULT;
4168 
4169     addr = alloca(addrlen);
4170 
4171     ret = get_errno(getsockname(fd, addr, &addrlen));
4172     if (!is_error(ret)) {
4173         host_to_target_sockaddr(target_addr, addr, addrlen);
4174         if (put_user_u32(addrlen, target_addrlen_addr))
4175             ret = -TARGET_EFAULT;
4176     }
4177     return ret;
4178 }
4179 
4180 /* do_socketpair() Must return target values and target errnos. */
4181 static abi_long do_socketpair(int domain, int type, int protocol,
4182                               abi_ulong target_tab_addr)
4183 {
4184     int tab[2];
4185     abi_long ret;
4186 
4187     target_to_host_sock_type(&type);
4188 
4189     ret = get_errno(socketpair(domain, type, protocol, tab));
4190     if (!is_error(ret)) {
4191         if (put_user_s32(tab[0], target_tab_addr)
4192             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4193             ret = -TARGET_EFAULT;
4194     }
4195     return ret;
4196 }
4197 
4198 /* do_sendto() Must return target values and target errnos. */
4199 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4200                           abi_ulong target_addr, socklen_t addrlen)
4201 {
4202     void *addr;
4203     void *host_msg;
4204     void *copy_msg = NULL;
4205     abi_long ret;
4206 
4207     if ((int)addrlen < 0) {
4208         return -TARGET_EINVAL;
4209     }
4210 
4211     host_msg = lock_user(VERIFY_READ, msg, len, 1);
4212     if (!host_msg)
4213         return -TARGET_EFAULT;
4214     if (fd_trans_target_to_host_data(fd)) {
4215         copy_msg = host_msg;
4216         host_msg = g_malloc(len);
4217         memcpy(host_msg, copy_msg, len);
4218         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4219         if (ret < 0) {
4220             goto fail;
4221         }
4222     }
4223     if (target_addr) {
4224         addr = alloca(addrlen+1);
4225         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4226         if (ret) {
4227             goto fail;
4228         }
4229         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4230     } else {
4231         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4232     }
4233 fail:
4234     if (copy_msg) {
4235         g_free(host_msg);
4236         host_msg = copy_msg;
4237     }
4238     unlock_user(host_msg, msg, 0);
4239     return ret;
4240 }
4241 
4242 /* do_recvfrom() Must return target values and target errnos. */
4243 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4244                             abi_ulong target_addr,
4245                             abi_ulong target_addrlen)
4246 {
4247     socklen_t addrlen;
4248     void *addr;
4249     void *host_msg;
4250     abi_long ret;
4251 
4252     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4253     if (!host_msg)
4254         return -TARGET_EFAULT;
4255     if (target_addr) {
4256         if (get_user_u32(addrlen, target_addrlen)) {
4257             ret = -TARGET_EFAULT;
4258             goto fail;
4259         }
4260         if ((int)addrlen < 0) {
4261             ret = -TARGET_EINVAL;
4262             goto fail;
4263         }
4264         addr = alloca(addrlen);
4265         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4266                                       addr, &addrlen));
4267     } else {
4268         addr = NULL; /* To keep compiler quiet.  */
4269         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4270     }
4271     if (!is_error(ret)) {
4272         if (fd_trans_host_to_target_data(fd)) {
4273             abi_long trans;
4274             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
4275             if (is_error(trans)) {
4276                 ret = trans;
4277                 goto fail;
4278             }
4279         }
4280         if (target_addr) {
4281             host_to_target_sockaddr(target_addr, addr, addrlen);
4282             if (put_user_u32(addrlen, target_addrlen)) {
4283                 ret = -TARGET_EFAULT;
4284                 goto fail;
4285             }
4286         }
4287         unlock_user(host_msg, msg, len);
4288     } else {
4289 fail:
4290         unlock_user(host_msg, msg, 0);
4291     }
4292     return ret;
4293 }
4294 
4295 #ifdef TARGET_NR_socketcall
4296 /* do_socketcall() must return target values and target errnos. */
4297 static abi_long do_socketcall(int num, abi_ulong vptr)
4298 {
4299     static const unsigned nargs[] = { /* number of arguments per operation */
4300         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
4301         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
4302         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
4303         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
4304         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
4305         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4306         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4307         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
4308         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
4309         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
4310         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
4311         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
4312         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
4313         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
4314         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
4315         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
4316         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
4317         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
4318         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
4319         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
4320     };
4321     abi_long a[6]; /* max 6 args */
4322     unsigned i;
4323 
4324     /* check the range of the first argument num */
4325     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4326     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4327         return -TARGET_EINVAL;
4328     }
4329     /* ensure we have space for args */
4330     if (nargs[num] > ARRAY_SIZE(a)) {
4331         return -TARGET_EINVAL;
4332     }
4333     /* collect the arguments in a[] according to nargs[] */
4334     for (i = 0; i < nargs[num]; ++i) {
4335         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4336             return -TARGET_EFAULT;
4337         }
4338     }
4339     /* now when we have the args, invoke the appropriate underlying function */
4340     switch (num) {
4341     case TARGET_SYS_SOCKET: /* domain, type, protocol */
4342         return do_socket(a[0], a[1], a[2]);
4343     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4344         return do_bind(a[0], a[1], a[2]);
4345     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4346         return do_connect(a[0], a[1], a[2]);
4347     case TARGET_SYS_LISTEN: /* sockfd, backlog */
4348         return get_errno(listen(a[0], a[1]));
4349     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4350         return do_accept4(a[0], a[1], a[2], 0);
4351     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4352         return do_getsockname(a[0], a[1], a[2]);
4353     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4354         return do_getpeername(a[0], a[1], a[2]);
4355     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4356         return do_socketpair(a[0], a[1], a[2], a[3]);
4357     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4358         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4359     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4360         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4361     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4362         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4363     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4364         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4365     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4366         return get_errno(shutdown(a[0], a[1]));
4367     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4368         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4369     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4370         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4371     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4372         return do_sendrecvmsg(a[0], a[1], a[2], 1);
4373     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4374         return do_sendrecvmsg(a[0], a[1], a[2], 0);
4375     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4376         return do_accept4(a[0], a[1], a[2], a[3]);
4377     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4378         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4379     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4380         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4381     default:
4382         gemu_log("Unsupported socketcall: %d\n", num);
4383         return -TARGET_EINVAL;
4384     }
4385 }
4386 #endif
4387 
4388 #define N_SHM_REGIONS	32
4389 
4390 static struct shm_region {
4391     abi_ulong start;
4392     abi_ulong size;
4393     bool in_use;
4394 } shm_regions[N_SHM_REGIONS];
4395 
4396 #ifndef TARGET_SEMID64_DS
4397 /* asm-generic version of this struct */
4398 struct target_semid64_ds
4399 {
4400   struct target_ipc_perm sem_perm;
4401   abi_ulong sem_otime;
4402 #if TARGET_ABI_BITS == 32
4403   abi_ulong __unused1;
4404 #endif
4405   abi_ulong sem_ctime;
4406 #if TARGET_ABI_BITS == 32
4407   abi_ulong __unused2;
4408 #endif
4409   abi_ulong sem_nsems;
4410   abi_ulong __unused3;
4411   abi_ulong __unused4;
4412 };
4413 #endif
4414 
4415 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4416                                                abi_ulong target_addr)
4417 {
4418     struct target_ipc_perm *target_ip;
4419     struct target_semid64_ds *target_sd;
4420 
4421     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4422         return -TARGET_EFAULT;
4423     target_ip = &(target_sd->sem_perm);
4424     host_ip->__key = tswap32(target_ip->__key);
4425     host_ip->uid = tswap32(target_ip->uid);
4426     host_ip->gid = tswap32(target_ip->gid);
4427     host_ip->cuid = tswap32(target_ip->cuid);
4428     host_ip->cgid = tswap32(target_ip->cgid);
4429 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4430     host_ip->mode = tswap32(target_ip->mode);
4431 #else
4432     host_ip->mode = tswap16(target_ip->mode);
4433 #endif
4434 #if defined(TARGET_PPC)
4435     host_ip->__seq = tswap32(target_ip->__seq);
4436 #else
4437     host_ip->__seq = tswap16(target_ip->__seq);
4438 #endif
4439     unlock_user_struct(target_sd, target_addr, 0);
4440     return 0;
4441 }
4442 
4443 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4444                                                struct ipc_perm *host_ip)
4445 {
4446     struct target_ipc_perm *target_ip;
4447     struct target_semid64_ds *target_sd;
4448 
4449     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4450         return -TARGET_EFAULT;
4451     target_ip = &(target_sd->sem_perm);
4452     target_ip->__key = tswap32(host_ip->__key);
4453     target_ip->uid = tswap32(host_ip->uid);
4454     target_ip->gid = tswap32(host_ip->gid);
4455     target_ip->cuid = tswap32(host_ip->cuid);
4456     target_ip->cgid = tswap32(host_ip->cgid);
4457 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4458     target_ip->mode = tswap32(host_ip->mode);
4459 #else
4460     target_ip->mode = tswap16(host_ip->mode);
4461 #endif
4462 #if defined(TARGET_PPC)
4463     target_ip->__seq = tswap32(host_ip->__seq);
4464 #else
4465     target_ip->__seq = tswap16(host_ip->__seq);
4466 #endif
4467     unlock_user_struct(target_sd, target_addr, 1);
4468     return 0;
4469 }
4470 
4471 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4472                                                abi_ulong target_addr)
4473 {
4474     struct target_semid64_ds *target_sd;
4475 
4476     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4477         return -TARGET_EFAULT;
4478     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4479         return -TARGET_EFAULT;
4480     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4481     host_sd->sem_otime = tswapal(target_sd->sem_otime);
4482     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4483     unlock_user_struct(target_sd, target_addr, 0);
4484     return 0;
4485 }
4486 
4487 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4488                                                struct semid_ds *host_sd)
4489 {
4490     struct target_semid64_ds *target_sd;
4491 
4492     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4493         return -TARGET_EFAULT;
4494     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4495         return -TARGET_EFAULT;
4496     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4497     target_sd->sem_otime = tswapal(host_sd->sem_otime);
4498     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4499     unlock_user_struct(target_sd, target_addr, 1);
4500     return 0;
4501 }
4502 
4503 struct target_seminfo {
4504     int semmap;
4505     int semmni;
4506     int semmns;
4507     int semmnu;
4508     int semmsl;
4509     int semopm;
4510     int semume;
4511     int semusz;
4512     int semvmx;
4513     int semaem;
4514 };
4515 
4516 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4517                                               struct seminfo *host_seminfo)
4518 {
4519     struct target_seminfo *target_seminfo;
4520     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4521         return -TARGET_EFAULT;
4522     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4523     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4524     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4525     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4526     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4527     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4528     __put_user(host_seminfo->semume, &target_seminfo->semume);
4529     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4530     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4531     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4532     unlock_user_struct(target_seminfo, target_addr, 1);
4533     return 0;
4534 }
4535 
4536 union semun {
4537 	int val;
4538 	struct semid_ds *buf;
4539 	unsigned short *array;
4540 	struct seminfo *__buf;
4541 };
4542 
4543 union target_semun {
4544 	int val;
4545 	abi_ulong buf;
4546 	abi_ulong array;
4547 	abi_ulong __buf;
4548 };
4549 
4550 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4551                                                abi_ulong target_addr)
4552 {
4553     int nsems;
4554     unsigned short *array;
4555     union semun semun;
4556     struct semid_ds semid_ds;
4557     int i, ret;
4558 
4559     semun.buf = &semid_ds;
4560 
4561     ret = semctl(semid, 0, IPC_STAT, semun);
4562     if (ret == -1)
4563         return get_errno(ret);
4564 
4565     nsems = semid_ds.sem_nsems;
4566 
4567     *host_array = g_try_new(unsigned short, nsems);
4568     if (!*host_array) {
4569         return -TARGET_ENOMEM;
4570     }
4571     array = lock_user(VERIFY_READ, target_addr,
4572                       nsems*sizeof(unsigned short), 1);
4573     if (!array) {
4574         g_free(*host_array);
4575         return -TARGET_EFAULT;
4576     }
4577 
4578     for(i=0; i<nsems; i++) {
4579         __get_user((*host_array)[i], &array[i]);
4580     }
4581     unlock_user(array, target_addr, 0);
4582 
4583     return 0;
4584 }
4585 
4586 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4587                                                unsigned short **host_array)
4588 {
4589     int nsems;
4590     unsigned short *array;
4591     union semun semun;
4592     struct semid_ds semid_ds;
4593     int i, ret;
4594 
4595     semun.buf = &semid_ds;
4596 
4597     ret = semctl(semid, 0, IPC_STAT, semun);
4598     if (ret == -1)
4599         return get_errno(ret);
4600 
4601     nsems = semid_ds.sem_nsems;
4602 
4603     array = lock_user(VERIFY_WRITE, target_addr,
4604                       nsems*sizeof(unsigned short), 0);
4605     if (!array)
4606         return -TARGET_EFAULT;
4607 
4608     for(i=0; i<nsems; i++) {
4609         __put_user((*host_array)[i], &array[i]);
4610     }
4611     g_free(*host_array);
4612     unlock_user(array, target_addr, 1);
4613 
4614     return 0;
4615 }
4616 
4617 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4618                                  abi_ulong target_arg)
4619 {
4620     union target_semun target_su = { .buf = target_arg };
4621     union semun arg;
4622     struct semid_ds dsarg;
4623     unsigned short *array = NULL;
4624     struct seminfo seminfo;
4625     abi_long ret = -TARGET_EINVAL;
4626     abi_long err;
4627     cmd &= 0xff;
4628 
4629     switch( cmd ) {
4630 	case GETVAL:
4631 	case SETVAL:
4632             /* In 64 bit cross-endian situations, we will erroneously pick up
4633              * the wrong half of the union for the "val" element.  To rectify
4634              * this, the entire 8-byte structure is byteswapped, followed by
4635 	     * a swap of the 4 byte val field. In other cases, the data is
4636 	     * already in proper host byte order. */
4637 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4638 		target_su.buf = tswapal(target_su.buf);
4639 		arg.val = tswap32(target_su.val);
4640 	    } else {
4641 		arg.val = target_su.val;
4642 	    }
4643             ret = get_errno(semctl(semid, semnum, cmd, arg));
4644             break;
4645 	case GETALL:
4646 	case SETALL:
4647             err = target_to_host_semarray(semid, &array, target_su.array);
4648             if (err)
4649                 return err;
4650             arg.array = array;
4651             ret = get_errno(semctl(semid, semnum, cmd, arg));
4652             err = host_to_target_semarray(semid, target_su.array, &array);
4653             if (err)
4654                 return err;
4655             break;
4656 	case IPC_STAT:
4657 	case IPC_SET:
4658 	case SEM_STAT:
4659             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4660             if (err)
4661                 return err;
4662             arg.buf = &dsarg;
4663             ret = get_errno(semctl(semid, semnum, cmd, arg));
4664             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4665             if (err)
4666                 return err;
4667             break;
4668 	case IPC_INFO:
4669 	case SEM_INFO:
4670             arg.__buf = &seminfo;
4671             ret = get_errno(semctl(semid, semnum, cmd, arg));
4672             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4673             if (err)
4674                 return err;
4675             break;
4676 	case IPC_RMID:
4677 	case GETPID:
4678 	case GETNCNT:
4679 	case GETZCNT:
4680             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4681             break;
4682     }
4683 
4684     return ret;
4685 }
4686 
4687 struct target_sembuf {
4688     unsigned short sem_num;
4689     short sem_op;
4690     short sem_flg;
4691 };
4692 
4693 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4694                                              abi_ulong target_addr,
4695                                              unsigned nsops)
4696 {
4697     struct target_sembuf *target_sembuf;
4698     int i;
4699 
4700     target_sembuf = lock_user(VERIFY_READ, target_addr,
4701                               nsops*sizeof(struct target_sembuf), 1);
4702     if (!target_sembuf)
4703         return -TARGET_EFAULT;
4704 
4705     for(i=0; i<nsops; i++) {
4706         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4707         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4708         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4709     }
4710 
4711     unlock_user(target_sembuf, target_addr, 0);
4712 
4713     return 0;
4714 }
4715 
4716 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4717 {
4718     struct sembuf sops[nsops];
4719 
4720     if (target_to_host_sembuf(sops, ptr, nsops))
4721         return -TARGET_EFAULT;
4722 
4723     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4724 }
4725 
4726 struct target_msqid_ds
4727 {
4728     struct target_ipc_perm msg_perm;
4729     abi_ulong msg_stime;
4730 #if TARGET_ABI_BITS == 32
4731     abi_ulong __unused1;
4732 #endif
4733     abi_ulong msg_rtime;
4734 #if TARGET_ABI_BITS == 32
4735     abi_ulong __unused2;
4736 #endif
4737     abi_ulong msg_ctime;
4738 #if TARGET_ABI_BITS == 32
4739     abi_ulong __unused3;
4740 #endif
4741     abi_ulong __msg_cbytes;
4742     abi_ulong msg_qnum;
4743     abi_ulong msg_qbytes;
4744     abi_ulong msg_lspid;
4745     abi_ulong msg_lrpid;
4746     abi_ulong __unused4;
4747     abi_ulong __unused5;
4748 };
4749 
4750 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4751                                                abi_ulong target_addr)
4752 {
4753     struct target_msqid_ds *target_md;
4754 
4755     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4756         return -TARGET_EFAULT;
4757     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4758         return -TARGET_EFAULT;
4759     host_md->msg_stime = tswapal(target_md->msg_stime);
4760     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4761     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4762     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4763     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4764     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4765     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4766     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4767     unlock_user_struct(target_md, target_addr, 0);
4768     return 0;
4769 }
4770 
4771 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4772                                                struct msqid_ds *host_md)
4773 {
4774     struct target_msqid_ds *target_md;
4775 
4776     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4777         return -TARGET_EFAULT;
4778     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4779         return -TARGET_EFAULT;
4780     target_md->msg_stime = tswapal(host_md->msg_stime);
4781     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4782     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4783     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4784     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4785     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4786     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4787     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4788     unlock_user_struct(target_md, target_addr, 1);
4789     return 0;
4790 }
4791 
4792 struct target_msginfo {
4793     int msgpool;
4794     int msgmap;
4795     int msgmax;
4796     int msgmnb;
4797     int msgmni;
4798     int msgssz;
4799     int msgtql;
4800     unsigned short int msgseg;
4801 };
4802 
4803 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4804                                               struct msginfo *host_msginfo)
4805 {
4806     struct target_msginfo *target_msginfo;
4807     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4808         return -TARGET_EFAULT;
4809     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4810     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4811     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4812     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4813     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4814     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4815     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4816     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4817     unlock_user_struct(target_msginfo, target_addr, 1);
4818     return 0;
4819 }
4820 
4821 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4822 {
4823     struct msqid_ds dsarg;
4824     struct msginfo msginfo;
4825     abi_long ret = -TARGET_EINVAL;
4826 
4827     cmd &= 0xff;
4828 
4829     switch (cmd) {
4830     case IPC_STAT:
4831     case IPC_SET:
4832     case MSG_STAT:
4833         if (target_to_host_msqid_ds(&dsarg,ptr))
4834             return -TARGET_EFAULT;
4835         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4836         if (host_to_target_msqid_ds(ptr,&dsarg))
4837             return -TARGET_EFAULT;
4838         break;
4839     case IPC_RMID:
4840         ret = get_errno(msgctl(msgid, cmd, NULL));
4841         break;
4842     case IPC_INFO:
4843     case MSG_INFO:
4844         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4845         if (host_to_target_msginfo(ptr, &msginfo))
4846             return -TARGET_EFAULT;
4847         break;
4848     }
4849 
4850     return ret;
4851 }
4852 
4853 struct target_msgbuf {
4854     abi_long mtype;
4855     char	mtext[1];
4856 };
4857 
4858 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4859                                  ssize_t msgsz, int msgflg)
4860 {
4861     struct target_msgbuf *target_mb;
4862     struct msgbuf *host_mb;
4863     abi_long ret = 0;
4864 
4865     if (msgsz < 0) {
4866         return -TARGET_EINVAL;
4867     }
4868 
4869     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4870         return -TARGET_EFAULT;
4871     host_mb = g_try_malloc(msgsz + sizeof(long));
4872     if (!host_mb) {
4873         unlock_user_struct(target_mb, msgp, 0);
4874         return -TARGET_ENOMEM;
4875     }
4876     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4877     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4878     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4879     g_free(host_mb);
4880     unlock_user_struct(target_mb, msgp, 0);
4881 
4882     return ret;
4883 }
4884 
4885 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4886                                  ssize_t msgsz, abi_long msgtyp,
4887                                  int msgflg)
4888 {
4889     struct target_msgbuf *target_mb;
4890     char *target_mtext;
4891     struct msgbuf *host_mb;
4892     abi_long ret = 0;
4893 
4894     if (msgsz < 0) {
4895         return -TARGET_EINVAL;
4896     }
4897 
4898     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4899         return -TARGET_EFAULT;
4900 
4901     host_mb = g_try_malloc(msgsz + sizeof(long));
4902     if (!host_mb) {
4903         ret = -TARGET_ENOMEM;
4904         goto end;
4905     }
4906     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4907 
4908     if (ret > 0) {
4909         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4910         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4911         if (!target_mtext) {
4912             ret = -TARGET_EFAULT;
4913             goto end;
4914         }
4915         memcpy(target_mb->mtext, host_mb->mtext, ret);
4916         unlock_user(target_mtext, target_mtext_addr, ret);
4917     }
4918 
4919     target_mb->mtype = tswapal(host_mb->mtype);
4920 
4921 end:
4922     if (target_mb)
4923         unlock_user_struct(target_mb, msgp, 1);
4924     g_free(host_mb);
4925     return ret;
4926 }
4927 
4928 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4929                                                abi_ulong target_addr)
4930 {
4931     struct target_shmid_ds *target_sd;
4932 
4933     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4934         return -TARGET_EFAULT;
4935     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4936         return -TARGET_EFAULT;
4937     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4938     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4939     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4940     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4941     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4942     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4943     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4944     unlock_user_struct(target_sd, target_addr, 0);
4945     return 0;
4946 }
4947 
4948 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4949                                                struct shmid_ds *host_sd)
4950 {
4951     struct target_shmid_ds *target_sd;
4952 
4953     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4954         return -TARGET_EFAULT;
4955     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4956         return -TARGET_EFAULT;
4957     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4958     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4959     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4960     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4961     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4962     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4963     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4964     unlock_user_struct(target_sd, target_addr, 1);
4965     return 0;
4966 }
4967 
4968 struct  target_shminfo {
4969     abi_ulong shmmax;
4970     abi_ulong shmmin;
4971     abi_ulong shmmni;
4972     abi_ulong shmseg;
4973     abi_ulong shmall;
4974 };
4975 
4976 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4977                                               struct shminfo *host_shminfo)
4978 {
4979     struct target_shminfo *target_shminfo;
4980     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4981         return -TARGET_EFAULT;
4982     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4983     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4984     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4985     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4986     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4987     unlock_user_struct(target_shminfo, target_addr, 1);
4988     return 0;
4989 }
4990 
4991 struct target_shm_info {
4992     int used_ids;
4993     abi_ulong shm_tot;
4994     abi_ulong shm_rss;
4995     abi_ulong shm_swp;
4996     abi_ulong swap_attempts;
4997     abi_ulong swap_successes;
4998 };
4999 
5000 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
5001                                                struct shm_info *host_shm_info)
5002 {
5003     struct target_shm_info *target_shm_info;
5004     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
5005         return -TARGET_EFAULT;
5006     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
5007     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
5008     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
5009     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
5010     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
5011     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
5012     unlock_user_struct(target_shm_info, target_addr, 1);
5013     return 0;
5014 }
5015 
5016 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
5017 {
5018     struct shmid_ds dsarg;
5019     struct shminfo shminfo;
5020     struct shm_info shm_info;
5021     abi_long ret = -TARGET_EINVAL;
5022 
5023     cmd &= 0xff;
5024 
5025     switch(cmd) {
5026     case IPC_STAT:
5027     case IPC_SET:
5028     case SHM_STAT:
5029         if (target_to_host_shmid_ds(&dsarg, buf))
5030             return -TARGET_EFAULT;
5031         ret = get_errno(shmctl(shmid, cmd, &dsarg));
5032         if (host_to_target_shmid_ds(buf, &dsarg))
5033             return -TARGET_EFAULT;
5034         break;
5035     case IPC_INFO:
5036         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
5037         if (host_to_target_shminfo(buf, &shminfo))
5038             return -TARGET_EFAULT;
5039         break;
5040     case SHM_INFO:
5041         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
5042         if (host_to_target_shm_info(buf, &shm_info))
5043             return -TARGET_EFAULT;
5044         break;
5045     case IPC_RMID:
5046     case SHM_LOCK:
5047     case SHM_UNLOCK:
5048         ret = get_errno(shmctl(shmid, cmd, NULL));
5049         break;
5050     }
5051 
5052     return ret;
5053 }
5054 
5055 #ifndef TARGET_FORCE_SHMLBA
5056 /* For most architectures, SHMLBA is the same as the page size;
5057  * some architectures have larger values, in which case they should
5058  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
5059  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
5060  * and defining its own value for SHMLBA.
5061  *
5062  * The kernel also permits SHMLBA to be set by the architecture to a
5063  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
5064  * this means that addresses are rounded to the large size if
5065  * SHM_RND is set but addresses not aligned to that size are not rejected
5066  * as long as they are at least page-aligned. Since the only architecture
5067  * which uses this is ia64 this code doesn't provide for that oddity.
5068  */
5069 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
5070 {
5071     return TARGET_PAGE_SIZE;
5072 }
5073 #endif
5074 
5075 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
5076                                  int shmid, abi_ulong shmaddr, int shmflg)
5077 {
5078     abi_long raddr;
5079     void *host_raddr;
5080     struct shmid_ds shm_info;
5081     int i,ret;
5082     abi_ulong shmlba;
5083 
5084     /* find out the length of the shared memory segment */
5085     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
5086     if (is_error(ret)) {
5087         /* can't get length, bail out */
5088         return ret;
5089     }
5090 
5091     shmlba = target_shmlba(cpu_env);
5092 
5093     if (shmaddr & (shmlba - 1)) {
5094         if (shmflg & SHM_RND) {
5095             shmaddr &= ~(shmlba - 1);
5096         } else {
5097             return -TARGET_EINVAL;
5098         }
5099     }
5100     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
5101         return -TARGET_EINVAL;
5102     }
5103 
5104     mmap_lock();
5105 
5106     if (shmaddr)
5107         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
5108     else {
5109         abi_ulong mmap_start;
5110 
5111         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
5112 
5113         if (mmap_start == -1) {
5114             errno = ENOMEM;
5115             host_raddr = (void *)-1;
5116         } else
5117             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
5118     }
5119 
5120     if (host_raddr == (void *)-1) {
5121         mmap_unlock();
5122         return get_errno((long)host_raddr);
5123     }
5124     raddr=h2g((unsigned long)host_raddr);
5125 
5126     page_set_flags(raddr, raddr + shm_info.shm_segsz,
5127                    PAGE_VALID | PAGE_READ |
5128                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
5129 
5130     for (i = 0; i < N_SHM_REGIONS; i++) {
5131         if (!shm_regions[i].in_use) {
5132             shm_regions[i].in_use = true;
5133             shm_regions[i].start = raddr;
5134             shm_regions[i].size = shm_info.shm_segsz;
5135             break;
5136         }
5137     }
5138 
5139     mmap_unlock();
5140     return raddr;
5141 
5142 }
5143 
5144 static inline abi_long do_shmdt(abi_ulong shmaddr)
5145 {
5146     int i;
5147     abi_long rv;
5148 
5149     mmap_lock();
5150 
5151     for (i = 0; i < N_SHM_REGIONS; ++i) {
5152         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
5153             shm_regions[i].in_use = false;
5154             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
5155             break;
5156         }
5157     }
5158     rv = get_errno(shmdt(g2h(shmaddr)));
5159 
5160     mmap_unlock();
5161 
5162     return rv;
5163 }
5164 
5165 #ifdef TARGET_NR_ipc
5166 /* ??? This only works with linear mappings.  */
5167 /* do_ipc() must return target values and target errnos. */
5168 static abi_long do_ipc(CPUArchState *cpu_env,
5169                        unsigned int call, abi_long first,
5170                        abi_long second, abi_long third,
5171                        abi_long ptr, abi_long fifth)
5172 {
5173     int version;
5174     abi_long ret = 0;
5175 
5176     version = call >> 16;
5177     call &= 0xffff;
5178 
5179     switch (call) {
5180     case IPCOP_semop:
5181         ret = do_semop(first, ptr, second);
5182         break;
5183 
5184     case IPCOP_semget:
5185         ret = get_errno(semget(first, second, third));
5186         break;
5187 
5188     case IPCOP_semctl: {
5189         /* The semun argument to semctl is passed by value, so dereference the
5190          * ptr argument. */
5191         abi_ulong atptr;
5192         get_user_ual(atptr, ptr);
5193         ret = do_semctl(first, second, third, atptr);
5194         break;
5195     }
5196 
5197     case IPCOP_msgget:
5198         ret = get_errno(msgget(first, second));
5199         break;
5200 
5201     case IPCOP_msgsnd:
5202         ret = do_msgsnd(first, ptr, second, third);
5203         break;
5204 
5205     case IPCOP_msgctl:
5206         ret = do_msgctl(first, second, ptr);
5207         break;
5208 
5209     case IPCOP_msgrcv:
5210         switch (version) {
5211         case 0:
5212             {
5213                 struct target_ipc_kludge {
5214                     abi_long msgp;
5215                     abi_long msgtyp;
5216                 } *tmp;
5217 
5218                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5219                     ret = -TARGET_EFAULT;
5220                     break;
5221                 }
5222 
5223                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5224 
5225                 unlock_user_struct(tmp, ptr, 0);
5226                 break;
5227             }
5228         default:
5229             ret = do_msgrcv(first, ptr, second, fifth, third);
5230         }
5231         break;
5232 
5233     case IPCOP_shmat:
5234         switch (version) {
5235         default:
5236         {
5237             abi_ulong raddr;
5238             raddr = do_shmat(cpu_env, first, ptr, second);
5239             if (is_error(raddr))
5240                 return get_errno(raddr);
5241             if (put_user_ual(raddr, third))
5242                 return -TARGET_EFAULT;
5243             break;
5244         }
5245         case 1:
5246             ret = -TARGET_EINVAL;
5247             break;
5248         }
5249 	break;
5250     case IPCOP_shmdt:
5251         ret = do_shmdt(ptr);
5252 	break;
5253 
5254     case IPCOP_shmget:
5255 	/* IPC_* flag values are the same on all linux platforms */
5256 	ret = get_errno(shmget(first, second, third));
5257 	break;
5258 
5259 	/* IPC_* and SHM_* command values are the same on all linux platforms */
5260     case IPCOP_shmctl:
5261         ret = do_shmctl(first, second, ptr);
5262         break;
5263     default:
5264 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5265 	ret = -TARGET_ENOSYS;
5266 	break;
5267     }
5268     return ret;
5269 }
5270 #endif
5271 
5272 /* kernel structure types definitions */
5273 
5274 #define STRUCT(name, ...) STRUCT_ ## name,
5275 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5276 enum {
5277 #include "syscall_types.h"
5278 STRUCT_MAX
5279 };
5280 #undef STRUCT
5281 #undef STRUCT_SPECIAL
5282 
5283 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
5284 #define STRUCT_SPECIAL(name)
5285 #include "syscall_types.h"
5286 #undef STRUCT
5287 #undef STRUCT_SPECIAL
5288 
5289 typedef struct IOCTLEntry IOCTLEntry;
5290 
5291 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5292                              int fd, int cmd, abi_long arg);
5293 
5294 struct IOCTLEntry {
5295     int target_cmd;
5296     unsigned int host_cmd;
5297     const char *name;
5298     int access;
5299     do_ioctl_fn *do_ioctl;
5300     const argtype arg_type[5];
5301 };
5302 
5303 #define IOC_R 0x0001
5304 #define IOC_W 0x0002
5305 #define IOC_RW (IOC_R | IOC_W)
5306 
5307 #define MAX_STRUCT_SIZE 4096
5308 
5309 #ifdef CONFIG_FIEMAP
5310 /* So fiemap access checks don't overflow on 32 bit systems.
5311  * This is very slightly smaller than the limit imposed by
5312  * the underlying kernel.
5313  */
5314 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
5315                             / sizeof(struct fiemap_extent))
5316 
5317 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5318                                        int fd, int cmd, abi_long arg)
5319 {
5320     /* The parameter for this ioctl is a struct fiemap followed
5321      * by an array of struct fiemap_extent whose size is set
5322      * in fiemap->fm_extent_count. The array is filled in by the
5323      * ioctl.
5324      */
5325     int target_size_in, target_size_out;
5326     struct fiemap *fm;
5327     const argtype *arg_type = ie->arg_type;
5328     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5329     void *argptr, *p;
5330     abi_long ret;
5331     int i, extent_size = thunk_type_size(extent_arg_type, 0);
5332     uint32_t outbufsz;
5333     int free_fm = 0;
5334 
5335     assert(arg_type[0] == TYPE_PTR);
5336     assert(ie->access == IOC_RW);
5337     arg_type++;
5338     target_size_in = thunk_type_size(arg_type, 0);
5339     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5340     if (!argptr) {
5341         return -TARGET_EFAULT;
5342     }
5343     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5344     unlock_user(argptr, arg, 0);
5345     fm = (struct fiemap *)buf_temp;
5346     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5347         return -TARGET_EINVAL;
5348     }
5349 
5350     outbufsz = sizeof (*fm) +
5351         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5352 
5353     if (outbufsz > MAX_STRUCT_SIZE) {
5354         /* We can't fit all the extents into the fixed size buffer.
5355          * Allocate one that is large enough and use it instead.
5356          */
5357         fm = g_try_malloc(outbufsz);
5358         if (!fm) {
5359             return -TARGET_ENOMEM;
5360         }
5361         memcpy(fm, buf_temp, sizeof(struct fiemap));
5362         free_fm = 1;
5363     }
5364     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5365     if (!is_error(ret)) {
5366         target_size_out = target_size_in;
5367         /* An extent_count of 0 means we were only counting the extents
5368          * so there are no structs to copy
5369          */
5370         if (fm->fm_extent_count != 0) {
5371             target_size_out += fm->fm_mapped_extents * extent_size;
5372         }
5373         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5374         if (!argptr) {
5375             ret = -TARGET_EFAULT;
5376         } else {
5377             /* Convert the struct fiemap */
5378             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5379             if (fm->fm_extent_count != 0) {
5380                 p = argptr + target_size_in;
5381                 /* ...and then all the struct fiemap_extents */
5382                 for (i = 0; i < fm->fm_mapped_extents; i++) {
5383                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5384                                   THUNK_TARGET);
5385                     p += extent_size;
5386                 }
5387             }
5388             unlock_user(argptr, arg, target_size_out);
5389         }
5390     }
5391     if (free_fm) {
5392         g_free(fm);
5393     }
5394     return ret;
5395 }
5396 #endif
5397 
5398 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5399                                 int fd, int cmd, abi_long arg)
5400 {
5401     const argtype *arg_type = ie->arg_type;
5402     int target_size;
5403     void *argptr;
5404     int ret;
5405     struct ifconf *host_ifconf;
5406     uint32_t outbufsz;
5407     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5408     int target_ifreq_size;
5409     int nb_ifreq;
5410     int free_buf = 0;
5411     int i;
5412     int target_ifc_len;
5413     abi_long target_ifc_buf;
5414     int host_ifc_len;
5415     char *host_ifc_buf;
5416 
5417     assert(arg_type[0] == TYPE_PTR);
5418     assert(ie->access == IOC_RW);
5419 
5420     arg_type++;
5421     target_size = thunk_type_size(arg_type, 0);
5422 
5423     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5424     if (!argptr)
5425         return -TARGET_EFAULT;
5426     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5427     unlock_user(argptr, arg, 0);
5428 
5429     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5430     target_ifc_len = host_ifconf->ifc_len;
5431     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5432 
5433     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5434     nb_ifreq = target_ifc_len / target_ifreq_size;
5435     host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5436 
5437     outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5438     if (outbufsz > MAX_STRUCT_SIZE) {
5439         /* We can't fit all the extents into the fixed size buffer.
5440          * Allocate one that is large enough and use it instead.
5441          */
5442         host_ifconf = malloc(outbufsz);
5443         if (!host_ifconf) {
5444             return -TARGET_ENOMEM;
5445         }
5446         memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5447         free_buf = 1;
5448     }
5449     host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5450 
5451     host_ifconf->ifc_len = host_ifc_len;
5452     host_ifconf->ifc_buf = host_ifc_buf;
5453 
5454     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5455     if (!is_error(ret)) {
5456 	/* convert host ifc_len to target ifc_len */
5457 
5458         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5459         target_ifc_len = nb_ifreq * target_ifreq_size;
5460         host_ifconf->ifc_len = target_ifc_len;
5461 
5462 	/* restore target ifc_buf */
5463 
5464         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5465 
5466 	/* copy struct ifconf to target user */
5467 
5468         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5469         if (!argptr)
5470             return -TARGET_EFAULT;
5471         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5472         unlock_user(argptr, arg, target_size);
5473 
5474 	/* copy ifreq[] to target user */
5475 
5476         argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5477         for (i = 0; i < nb_ifreq ; i++) {
5478             thunk_convert(argptr + i * target_ifreq_size,
5479                           host_ifc_buf + i * sizeof(struct ifreq),
5480                           ifreq_arg_type, THUNK_TARGET);
5481         }
5482         unlock_user(argptr, target_ifc_buf, target_ifc_len);
5483     }
5484 
5485     if (free_buf) {
5486         free(host_ifconf);
5487     }
5488 
5489     return ret;
5490 }
5491 
5492 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5493                             int cmd, abi_long arg)
5494 {
5495     void *argptr;
5496     struct dm_ioctl *host_dm;
5497     abi_long guest_data;
5498     uint32_t guest_data_size;
5499     int target_size;
5500     const argtype *arg_type = ie->arg_type;
5501     abi_long ret;
5502     void *big_buf = NULL;
5503     char *host_data;
5504 
5505     arg_type++;
5506     target_size = thunk_type_size(arg_type, 0);
5507     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5508     if (!argptr) {
5509         ret = -TARGET_EFAULT;
5510         goto out;
5511     }
5512     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5513     unlock_user(argptr, arg, 0);
5514 
5515     /* buf_temp is too small, so fetch things into a bigger buffer */
5516     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5517     memcpy(big_buf, buf_temp, target_size);
5518     buf_temp = big_buf;
5519     host_dm = big_buf;
5520 
5521     guest_data = arg + host_dm->data_start;
5522     if ((guest_data - arg) < 0) {
5523         ret = -TARGET_EINVAL;
5524         goto out;
5525     }
5526     guest_data_size = host_dm->data_size - host_dm->data_start;
5527     host_data = (char*)host_dm + host_dm->data_start;
5528 
5529     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5530     if (!argptr) {
5531         ret = -TARGET_EFAULT;
5532         goto out;
5533     }
5534 
5535     switch (ie->host_cmd) {
5536     case DM_REMOVE_ALL:
5537     case DM_LIST_DEVICES:
5538     case DM_DEV_CREATE:
5539     case DM_DEV_REMOVE:
5540     case DM_DEV_SUSPEND:
5541     case DM_DEV_STATUS:
5542     case DM_DEV_WAIT:
5543     case DM_TABLE_STATUS:
5544     case DM_TABLE_CLEAR:
5545     case DM_TABLE_DEPS:
5546     case DM_LIST_VERSIONS:
5547         /* no input data */
5548         break;
5549     case DM_DEV_RENAME:
5550     case DM_DEV_SET_GEOMETRY:
5551         /* data contains only strings */
5552         memcpy(host_data, argptr, guest_data_size);
5553         break;
5554     case DM_TARGET_MSG:
5555         memcpy(host_data, argptr, guest_data_size);
5556         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5557         break;
5558     case DM_TABLE_LOAD:
5559     {
5560         void *gspec = argptr;
5561         void *cur_data = host_data;
5562         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5563         int spec_size = thunk_type_size(arg_type, 0);
5564         int i;
5565 
5566         for (i = 0; i < host_dm->target_count; i++) {
5567             struct dm_target_spec *spec = cur_data;
5568             uint32_t next;
5569             int slen;
5570 
5571             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5572             slen = strlen((char*)gspec + spec_size) + 1;
5573             next = spec->next;
5574             spec->next = sizeof(*spec) + slen;
5575             strcpy((char*)&spec[1], gspec + spec_size);
5576             gspec += next;
5577             cur_data += spec->next;
5578         }
5579         break;
5580     }
5581     default:
5582         ret = -TARGET_EINVAL;
5583         unlock_user(argptr, guest_data, 0);
5584         goto out;
5585     }
5586     unlock_user(argptr, guest_data, 0);
5587 
5588     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5589     if (!is_error(ret)) {
5590         guest_data = arg + host_dm->data_start;
5591         guest_data_size = host_dm->data_size - host_dm->data_start;
5592         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5593         switch (ie->host_cmd) {
5594         case DM_REMOVE_ALL:
5595         case DM_DEV_CREATE:
5596         case DM_DEV_REMOVE:
5597         case DM_DEV_RENAME:
5598         case DM_DEV_SUSPEND:
5599         case DM_DEV_STATUS:
5600         case DM_TABLE_LOAD:
5601         case DM_TABLE_CLEAR:
5602         case DM_TARGET_MSG:
5603         case DM_DEV_SET_GEOMETRY:
5604             /* no return data */
5605             break;
5606         case DM_LIST_DEVICES:
5607         {
5608             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5609             uint32_t remaining_data = guest_data_size;
5610             void *cur_data = argptr;
5611             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5612             int nl_size = 12; /* can't use thunk_size due to alignment */
5613 
5614             while (1) {
5615                 uint32_t next = nl->next;
5616                 if (next) {
5617                     nl->next = nl_size + (strlen(nl->name) + 1);
5618                 }
5619                 if (remaining_data < nl->next) {
5620                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5621                     break;
5622                 }
5623                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5624                 strcpy(cur_data + nl_size, nl->name);
5625                 cur_data += nl->next;
5626                 remaining_data -= nl->next;
5627                 if (!next) {
5628                     break;
5629                 }
5630                 nl = (void*)nl + next;
5631             }
5632             break;
5633         }
5634         case DM_DEV_WAIT:
5635         case DM_TABLE_STATUS:
5636         {
5637             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5638             void *cur_data = argptr;
5639             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5640             int spec_size = thunk_type_size(arg_type, 0);
5641             int i;
5642 
5643             for (i = 0; i < host_dm->target_count; i++) {
5644                 uint32_t next = spec->next;
5645                 int slen = strlen((char*)&spec[1]) + 1;
5646                 spec->next = (cur_data - argptr) + spec_size + slen;
5647                 if (guest_data_size < spec->next) {
5648                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5649                     break;
5650                 }
5651                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5652                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5653                 cur_data = argptr + spec->next;
5654                 spec = (void*)host_dm + host_dm->data_start + next;
5655             }
5656             break;
5657         }
5658         case DM_TABLE_DEPS:
5659         {
5660             void *hdata = (void*)host_dm + host_dm->data_start;
5661             int count = *(uint32_t*)hdata;
5662             uint64_t *hdev = hdata + 8;
5663             uint64_t *gdev = argptr + 8;
5664             int i;
5665 
5666             *(uint32_t*)argptr = tswap32(count);
5667             for (i = 0; i < count; i++) {
5668                 *gdev = tswap64(*hdev);
5669                 gdev++;
5670                 hdev++;
5671             }
5672             break;
5673         }
5674         case DM_LIST_VERSIONS:
5675         {
5676             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5677             uint32_t remaining_data = guest_data_size;
5678             void *cur_data = argptr;
5679             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5680             int vers_size = thunk_type_size(arg_type, 0);
5681 
5682             while (1) {
5683                 uint32_t next = vers->next;
5684                 if (next) {
5685                     vers->next = vers_size + (strlen(vers->name) + 1);
5686                 }
5687                 if (remaining_data < vers->next) {
5688                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5689                     break;
5690                 }
5691                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5692                 strcpy(cur_data + vers_size, vers->name);
5693                 cur_data += vers->next;
5694                 remaining_data -= vers->next;
5695                 if (!next) {
5696                     break;
5697                 }
5698                 vers = (void*)vers + next;
5699             }
5700             break;
5701         }
5702         default:
5703             unlock_user(argptr, guest_data, 0);
5704             ret = -TARGET_EINVAL;
5705             goto out;
5706         }
5707         unlock_user(argptr, guest_data, guest_data_size);
5708 
5709         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5710         if (!argptr) {
5711             ret = -TARGET_EFAULT;
5712             goto out;
5713         }
5714         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5715         unlock_user(argptr, arg, target_size);
5716     }
5717 out:
5718     g_free(big_buf);
5719     return ret;
5720 }
5721 
5722 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5723                                int cmd, abi_long arg)
5724 {
5725     void *argptr;
5726     int target_size;
5727     const argtype *arg_type = ie->arg_type;
5728     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5729     abi_long ret;
5730 
5731     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5732     struct blkpg_partition host_part;
5733 
5734     /* Read and convert blkpg */
5735     arg_type++;
5736     target_size = thunk_type_size(arg_type, 0);
5737     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5738     if (!argptr) {
5739         ret = -TARGET_EFAULT;
5740         goto out;
5741     }
5742     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5743     unlock_user(argptr, arg, 0);
5744 
5745     switch (host_blkpg->op) {
5746     case BLKPG_ADD_PARTITION:
5747     case BLKPG_DEL_PARTITION:
5748         /* payload is struct blkpg_partition */
5749         break;
5750     default:
5751         /* Unknown opcode */
5752         ret = -TARGET_EINVAL;
5753         goto out;
5754     }
5755 
5756     /* Read and convert blkpg->data */
5757     arg = (abi_long)(uintptr_t)host_blkpg->data;
5758     target_size = thunk_type_size(part_arg_type, 0);
5759     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5760     if (!argptr) {
5761         ret = -TARGET_EFAULT;
5762         goto out;
5763     }
5764     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5765     unlock_user(argptr, arg, 0);
5766 
5767     /* Swizzle the data pointer to our local copy and call! */
5768     host_blkpg->data = &host_part;
5769     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5770 
5771 out:
5772     return ret;
5773 }
5774 
5775 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5776                                 int fd, int cmd, abi_long arg)
5777 {
5778     const argtype *arg_type = ie->arg_type;
5779     const StructEntry *se;
5780     const argtype *field_types;
5781     const int *dst_offsets, *src_offsets;
5782     int target_size;
5783     void *argptr;
5784     abi_ulong *target_rt_dev_ptr;
5785     unsigned long *host_rt_dev_ptr;
5786     abi_long ret;
5787     int i;
5788 
5789     assert(ie->access == IOC_W);
5790     assert(*arg_type == TYPE_PTR);
5791     arg_type++;
5792     assert(*arg_type == TYPE_STRUCT);
5793     target_size = thunk_type_size(arg_type, 0);
5794     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5795     if (!argptr) {
5796         return -TARGET_EFAULT;
5797     }
5798     arg_type++;
5799     assert(*arg_type == (int)STRUCT_rtentry);
5800     se = struct_entries + *arg_type++;
5801     assert(se->convert[0] == NULL);
5802     /* convert struct here to be able to catch rt_dev string */
5803     field_types = se->field_types;
5804     dst_offsets = se->field_offsets[THUNK_HOST];
5805     src_offsets = se->field_offsets[THUNK_TARGET];
5806     for (i = 0; i < se->nb_fields; i++) {
5807         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5808             assert(*field_types == TYPE_PTRVOID);
5809             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5810             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5811             if (*target_rt_dev_ptr != 0) {
5812                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5813                                                   tswapal(*target_rt_dev_ptr));
5814                 if (!*host_rt_dev_ptr) {
5815                     unlock_user(argptr, arg, 0);
5816                     return -TARGET_EFAULT;
5817                 }
5818             } else {
5819                 *host_rt_dev_ptr = 0;
5820             }
5821             field_types++;
5822             continue;
5823         }
5824         field_types = thunk_convert(buf_temp + dst_offsets[i],
5825                                     argptr + src_offsets[i],
5826                                     field_types, THUNK_HOST);
5827     }
5828     unlock_user(argptr, arg, 0);
5829 
5830     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5831     if (*host_rt_dev_ptr != 0) {
5832         unlock_user((void *)*host_rt_dev_ptr,
5833                     *target_rt_dev_ptr, 0);
5834     }
5835     return ret;
5836 }
5837 
5838 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5839                                      int fd, int cmd, abi_long arg)
5840 {
5841     int sig = target_to_host_signal(arg);
5842     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5843 }
5844 
5845 #ifdef TIOCGPTPEER
5846 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5847                                      int fd, int cmd, abi_long arg)
5848 {
5849     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5850     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5851 }
5852 #endif
5853 
5854 static IOCTLEntry ioctl_entries[] = {
5855 #define IOCTL(cmd, access, ...) \
5856     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5857 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5858     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5859 #define IOCTL_IGNORE(cmd) \
5860     { TARGET_ ## cmd, 0, #cmd },
5861 #include "ioctls.h"
5862     { 0, 0, },
5863 };
5864 
5865 /* ??? Implement proper locking for ioctls.  */
5866 /* do_ioctl() Must return target values and target errnos. */
5867 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5868 {
5869     const IOCTLEntry *ie;
5870     const argtype *arg_type;
5871     abi_long ret;
5872     uint8_t buf_temp[MAX_STRUCT_SIZE];
5873     int target_size;
5874     void *argptr;
5875 
5876     ie = ioctl_entries;
5877     for(;;) {
5878         if (ie->target_cmd == 0) {
5879             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5880             return -TARGET_ENOSYS;
5881         }
5882         if (ie->target_cmd == cmd)
5883             break;
5884         ie++;
5885     }
5886     arg_type = ie->arg_type;
5887 #if defined(DEBUG)
5888     gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5889 #endif
5890     if (ie->do_ioctl) {
5891         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5892     } else if (!ie->host_cmd) {
5893         /* Some architectures define BSD ioctls in their headers
5894            that are not implemented in Linux.  */
5895         return -TARGET_ENOSYS;
5896     }
5897 
5898     switch(arg_type[0]) {
5899     case TYPE_NULL:
5900         /* no argument */
5901         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5902         break;
5903     case TYPE_PTRVOID:
5904     case TYPE_INT:
5905         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5906         break;
5907     case TYPE_PTR:
5908         arg_type++;
5909         target_size = thunk_type_size(arg_type, 0);
5910         switch(ie->access) {
5911         case IOC_R:
5912             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5913             if (!is_error(ret)) {
5914                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5915                 if (!argptr)
5916                     return -TARGET_EFAULT;
5917                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5918                 unlock_user(argptr, arg, target_size);
5919             }
5920             break;
5921         case IOC_W:
5922             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5923             if (!argptr)
5924                 return -TARGET_EFAULT;
5925             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5926             unlock_user(argptr, arg, 0);
5927             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5928             break;
5929         default:
5930         case IOC_RW:
5931             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5932             if (!argptr)
5933                 return -TARGET_EFAULT;
5934             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5935             unlock_user(argptr, arg, 0);
5936             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5937             if (!is_error(ret)) {
5938                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5939                 if (!argptr)
5940                     return -TARGET_EFAULT;
5941                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5942                 unlock_user(argptr, arg, target_size);
5943             }
5944             break;
5945         }
5946         break;
5947     default:
5948         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5949                  (long)cmd, arg_type[0]);
5950         ret = -TARGET_ENOSYS;
5951         break;
5952     }
5953     return ret;
5954 }
5955 
5956 static const bitmask_transtbl iflag_tbl[] = {
5957         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5958         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5959         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5960         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5961         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5962         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5963         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5964         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5965         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5966         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5967         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5968         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5969         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5970         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5971         { 0, 0, 0, 0 }
5972 };
5973 
5974 static const bitmask_transtbl oflag_tbl[] = {
5975 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5976 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5977 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5978 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5979 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5980 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5981 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5982 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5983 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5984 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5985 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5986 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5987 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5988 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5989 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5990 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5991 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5992 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5993 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5994 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5995 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5996 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5997 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5998 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5999 	{ 0, 0, 0, 0 }
6000 };
6001 
6002 static const bitmask_transtbl cflag_tbl[] = {
6003 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
6004 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
6005 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
6006 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
6007 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
6008 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
6009 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
6010 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
6011 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
6012 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
6013 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
6014 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
6015 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
6016 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
6017 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
6018 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
6019 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
6020 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
6021 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
6022 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
6023 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
6024 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
6025 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
6026 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
6027 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
6028 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
6029 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
6030 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
6031 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
6032 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
6033 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
6034 	{ 0, 0, 0, 0 }
6035 };
6036 
6037 static const bitmask_transtbl lflag_tbl[] = {
6038 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
6039 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
6040 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
6041 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
6042 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
6043 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
6044 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
6045 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
6046 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
6047 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
6048 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
6049 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
6050 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
6051 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
6052 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
6053 	{ 0, 0, 0, 0 }
6054 };
6055 
6056 static void target_to_host_termios (void *dst, const void *src)
6057 {
6058     struct host_termios *host = dst;
6059     const struct target_termios *target = src;
6060 
6061     host->c_iflag =
6062         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
6063     host->c_oflag =
6064         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
6065     host->c_cflag =
6066         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
6067     host->c_lflag =
6068         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
6069     host->c_line = target->c_line;
6070 
6071     memset(host->c_cc, 0, sizeof(host->c_cc));
6072     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
6073     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
6074     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
6075     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
6076     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
6077     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
6078     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
6079     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
6080     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
6081     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
6082     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
6083     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
6084     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
6085     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
6086     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
6087     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
6088     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
6089 }
6090 
6091 static void host_to_target_termios (void *dst, const void *src)
6092 {
6093     struct target_termios *target = dst;
6094     const struct host_termios *host = src;
6095 
6096     target->c_iflag =
6097         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
6098     target->c_oflag =
6099         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
6100     target->c_cflag =
6101         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
6102     target->c_lflag =
6103         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
6104     target->c_line = host->c_line;
6105 
6106     memset(target->c_cc, 0, sizeof(target->c_cc));
6107     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6108     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6109     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6110     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6111     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6112     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6113     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6114     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6115     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6116     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6117     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6118     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6119     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6120     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6121     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6122     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6123     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6124 }
6125 
6126 static const StructEntry struct_termios_def = {
6127     .convert = { host_to_target_termios, target_to_host_termios },
6128     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6129     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6130 };
6131 
6132 static bitmask_transtbl mmap_flags_tbl[] = {
6133     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6134     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6135     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6136     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6137       MAP_ANONYMOUS, MAP_ANONYMOUS },
6138     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6139       MAP_GROWSDOWN, MAP_GROWSDOWN },
6140     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6141       MAP_DENYWRITE, MAP_DENYWRITE },
6142     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6143       MAP_EXECUTABLE, MAP_EXECUTABLE },
6144     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6145     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6146       MAP_NORESERVE, MAP_NORESERVE },
6147     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6148     /* MAP_STACK had been ignored by the kernel for quite some time.
6149        Recognize it for the target insofar as we do not want to pass
6150        it through to the host.  */
6151     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6152     { 0, 0, 0, 0 }
6153 };
6154 
6155 #if defined(TARGET_I386)
6156 
6157 /* NOTE: there is really one LDT for all the threads */
6158 static uint8_t *ldt_table;
6159 
6160 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6161 {
6162     int size;
6163     void *p;
6164 
6165     if (!ldt_table)
6166         return 0;
6167     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6168     if (size > bytecount)
6169         size = bytecount;
6170     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6171     if (!p)
6172         return -TARGET_EFAULT;
6173     /* ??? Should this by byteswapped?  */
6174     memcpy(p, ldt_table, size);
6175     unlock_user(p, ptr, size);
6176     return size;
6177 }
6178 
6179 /* XXX: add locking support */
6180 static abi_long write_ldt(CPUX86State *env,
6181                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6182 {
6183     struct target_modify_ldt_ldt_s ldt_info;
6184     struct target_modify_ldt_ldt_s *target_ldt_info;
6185     int seg_32bit, contents, read_exec_only, limit_in_pages;
6186     int seg_not_present, useable, lm;
6187     uint32_t *lp, entry_1, entry_2;
6188 
6189     if (bytecount != sizeof(ldt_info))
6190         return -TARGET_EINVAL;
6191     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6192         return -TARGET_EFAULT;
6193     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6194     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6195     ldt_info.limit = tswap32(target_ldt_info->limit);
6196     ldt_info.flags = tswap32(target_ldt_info->flags);
6197     unlock_user_struct(target_ldt_info, ptr, 0);
6198 
6199     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6200         return -TARGET_EINVAL;
6201     seg_32bit = ldt_info.flags & 1;
6202     contents = (ldt_info.flags >> 1) & 3;
6203     read_exec_only = (ldt_info.flags >> 3) & 1;
6204     limit_in_pages = (ldt_info.flags >> 4) & 1;
6205     seg_not_present = (ldt_info.flags >> 5) & 1;
6206     useable = (ldt_info.flags >> 6) & 1;
6207 #ifdef TARGET_ABI32
6208     lm = 0;
6209 #else
6210     lm = (ldt_info.flags >> 7) & 1;
6211 #endif
6212     if (contents == 3) {
6213         if (oldmode)
6214             return -TARGET_EINVAL;
6215         if (seg_not_present == 0)
6216             return -TARGET_EINVAL;
6217     }
6218     /* allocate the LDT */
6219     if (!ldt_table) {
6220         env->ldt.base = target_mmap(0,
6221                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6222                                     PROT_READ|PROT_WRITE,
6223                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6224         if (env->ldt.base == -1)
6225             return -TARGET_ENOMEM;
6226         memset(g2h(env->ldt.base), 0,
6227                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6228         env->ldt.limit = 0xffff;
6229         ldt_table = g2h(env->ldt.base);
6230     }
6231 
6232     /* NOTE: same code as Linux kernel */
6233     /* Allow LDTs to be cleared by the user. */
6234     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6235         if (oldmode ||
6236             (contents == 0		&&
6237              read_exec_only == 1	&&
6238              seg_32bit == 0		&&
6239              limit_in_pages == 0	&&
6240              seg_not_present == 1	&&
6241              useable == 0 )) {
6242             entry_1 = 0;
6243             entry_2 = 0;
6244             goto install;
6245         }
6246     }
6247 
6248     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6249         (ldt_info.limit & 0x0ffff);
6250     entry_2 = (ldt_info.base_addr & 0xff000000) |
6251         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6252         (ldt_info.limit & 0xf0000) |
6253         ((read_exec_only ^ 1) << 9) |
6254         (contents << 10) |
6255         ((seg_not_present ^ 1) << 15) |
6256         (seg_32bit << 22) |
6257         (limit_in_pages << 23) |
6258         (lm << 21) |
6259         0x7000;
6260     if (!oldmode)
6261         entry_2 |= (useable << 20);
6262 
6263     /* Install the new entry ...  */
6264 install:
6265     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6266     lp[0] = tswap32(entry_1);
6267     lp[1] = tswap32(entry_2);
6268     return 0;
6269 }
6270 
6271 /* specific and weird i386 syscalls */
6272 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6273                               unsigned long bytecount)
6274 {
6275     abi_long ret;
6276 
6277     switch (func) {
6278     case 0:
6279         ret = read_ldt(ptr, bytecount);
6280         break;
6281     case 1:
6282         ret = write_ldt(env, ptr, bytecount, 1);
6283         break;
6284     case 0x11:
6285         ret = write_ldt(env, ptr, bytecount, 0);
6286         break;
6287     default:
6288         ret = -TARGET_ENOSYS;
6289         break;
6290     }
6291     return ret;
6292 }
6293 
6294 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6295 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6296 {
6297     uint64_t *gdt_table = g2h(env->gdt.base);
6298     struct target_modify_ldt_ldt_s ldt_info;
6299     struct target_modify_ldt_ldt_s *target_ldt_info;
6300     int seg_32bit, contents, read_exec_only, limit_in_pages;
6301     int seg_not_present, useable, lm;
6302     uint32_t *lp, entry_1, entry_2;
6303     int i;
6304 
6305     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6306     if (!target_ldt_info)
6307         return -TARGET_EFAULT;
6308     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6309     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6310     ldt_info.limit = tswap32(target_ldt_info->limit);
6311     ldt_info.flags = tswap32(target_ldt_info->flags);
6312     if (ldt_info.entry_number == -1) {
6313         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6314             if (gdt_table[i] == 0) {
6315                 ldt_info.entry_number = i;
6316                 target_ldt_info->entry_number = tswap32(i);
6317                 break;
6318             }
6319         }
6320     }
6321     unlock_user_struct(target_ldt_info, ptr, 1);
6322 
6323     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6324         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6325            return -TARGET_EINVAL;
6326     seg_32bit = ldt_info.flags & 1;
6327     contents = (ldt_info.flags >> 1) & 3;
6328     read_exec_only = (ldt_info.flags >> 3) & 1;
6329     limit_in_pages = (ldt_info.flags >> 4) & 1;
6330     seg_not_present = (ldt_info.flags >> 5) & 1;
6331     useable = (ldt_info.flags >> 6) & 1;
6332 #ifdef TARGET_ABI32
6333     lm = 0;
6334 #else
6335     lm = (ldt_info.flags >> 7) & 1;
6336 #endif
6337 
6338     if (contents == 3) {
6339         if (seg_not_present == 0)
6340             return -TARGET_EINVAL;
6341     }
6342 
6343     /* NOTE: same code as Linux kernel */
6344     /* Allow LDTs to be cleared by the user. */
6345     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6346         if ((contents == 0             &&
6347              read_exec_only == 1       &&
6348              seg_32bit == 0            &&
6349              limit_in_pages == 0       &&
6350              seg_not_present == 1      &&
6351              useable == 0 )) {
6352             entry_1 = 0;
6353             entry_2 = 0;
6354             goto install;
6355         }
6356     }
6357 
6358     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6359         (ldt_info.limit & 0x0ffff);
6360     entry_2 = (ldt_info.base_addr & 0xff000000) |
6361         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6362         (ldt_info.limit & 0xf0000) |
6363         ((read_exec_only ^ 1) << 9) |
6364         (contents << 10) |
6365         ((seg_not_present ^ 1) << 15) |
6366         (seg_32bit << 22) |
6367         (limit_in_pages << 23) |
6368         (useable << 20) |
6369         (lm << 21) |
6370         0x7000;
6371 
6372     /* Install the new entry ...  */
6373 install:
6374     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6375     lp[0] = tswap32(entry_1);
6376     lp[1] = tswap32(entry_2);
6377     return 0;
6378 }
6379 
6380 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6381 {
6382     struct target_modify_ldt_ldt_s *target_ldt_info;
6383     uint64_t *gdt_table = g2h(env->gdt.base);
6384     uint32_t base_addr, limit, flags;
6385     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6386     int seg_not_present, useable, lm;
6387     uint32_t *lp, entry_1, entry_2;
6388 
6389     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6390     if (!target_ldt_info)
6391         return -TARGET_EFAULT;
6392     idx = tswap32(target_ldt_info->entry_number);
6393     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6394         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6395         unlock_user_struct(target_ldt_info, ptr, 1);
6396         return -TARGET_EINVAL;
6397     }
6398     lp = (uint32_t *)(gdt_table + idx);
6399     entry_1 = tswap32(lp[0]);
6400     entry_2 = tswap32(lp[1]);
6401 
6402     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6403     contents = (entry_2 >> 10) & 3;
6404     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6405     seg_32bit = (entry_2 >> 22) & 1;
6406     limit_in_pages = (entry_2 >> 23) & 1;
6407     useable = (entry_2 >> 20) & 1;
6408 #ifdef TARGET_ABI32
6409     lm = 0;
6410 #else
6411     lm = (entry_2 >> 21) & 1;
6412 #endif
6413     flags = (seg_32bit << 0) | (contents << 1) |
6414         (read_exec_only << 3) | (limit_in_pages << 4) |
6415         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6416     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6417     base_addr = (entry_1 >> 16) |
6418         (entry_2 & 0xff000000) |
6419         ((entry_2 & 0xff) << 16);
6420     target_ldt_info->base_addr = tswapal(base_addr);
6421     target_ldt_info->limit = tswap32(limit);
6422     target_ldt_info->flags = tswap32(flags);
6423     unlock_user_struct(target_ldt_info, ptr, 1);
6424     return 0;
6425 }
6426 #endif /* TARGET_I386 && TARGET_ABI32 */
6427 
6428 #ifndef TARGET_ABI32
6429 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6430 {
6431     abi_long ret = 0;
6432     abi_ulong val;
6433     int idx;
6434 
6435     switch(code) {
6436     case TARGET_ARCH_SET_GS:
6437     case TARGET_ARCH_SET_FS:
6438         if (code == TARGET_ARCH_SET_GS)
6439             idx = R_GS;
6440         else
6441             idx = R_FS;
6442         cpu_x86_load_seg(env, idx, 0);
6443         env->segs[idx].base = addr;
6444         break;
6445     case TARGET_ARCH_GET_GS:
6446     case TARGET_ARCH_GET_FS:
6447         if (code == TARGET_ARCH_GET_GS)
6448             idx = R_GS;
6449         else
6450             idx = R_FS;
6451         val = env->segs[idx].base;
6452         if (put_user(val, addr, abi_ulong))
6453             ret = -TARGET_EFAULT;
6454         break;
6455     default:
6456         ret = -TARGET_EINVAL;
6457         break;
6458     }
6459     return ret;
6460 }
6461 #endif
6462 
6463 #endif /* defined(TARGET_I386) */
6464 
6465 #define NEW_STACK_SIZE 0x40000
6466 
6467 
6468 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6469 typedef struct {
6470     CPUArchState *env;
6471     pthread_mutex_t mutex;
6472     pthread_cond_t cond;
6473     pthread_t thread;
6474     uint32_t tid;
6475     abi_ulong child_tidptr;
6476     abi_ulong parent_tidptr;
6477     sigset_t sigmask;
6478 } new_thread_info;
6479 
6480 static void *clone_func(void *arg)
6481 {
6482     new_thread_info *info = arg;
6483     CPUArchState *env;
6484     CPUState *cpu;
6485     TaskState *ts;
6486 
6487     rcu_register_thread();
6488     tcg_register_thread();
6489     env = info->env;
6490     cpu = ENV_GET_CPU(env);
6491     thread_cpu = cpu;
6492     ts = (TaskState *)cpu->opaque;
6493     info->tid = gettid();
6494     task_settid(ts);
6495     if (info->child_tidptr)
6496         put_user_u32(info->tid, info->child_tidptr);
6497     if (info->parent_tidptr)
6498         put_user_u32(info->tid, info->parent_tidptr);
6499     /* Enable signals.  */
6500     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6501     /* Signal to the parent that we're ready.  */
6502     pthread_mutex_lock(&info->mutex);
6503     pthread_cond_broadcast(&info->cond);
6504     pthread_mutex_unlock(&info->mutex);
6505     /* Wait until the parent has finished initializing the tls state.  */
6506     pthread_mutex_lock(&clone_lock);
6507     pthread_mutex_unlock(&clone_lock);
6508     cpu_loop(env);
6509     /* never exits */
6510     return NULL;
6511 }
6512 
6513 /* do_fork() Must return host values and target errnos (unlike most
6514    do_*() functions). */
6515 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6516                    abi_ulong parent_tidptr, target_ulong newtls,
6517                    abi_ulong child_tidptr)
6518 {
6519     CPUState *cpu = ENV_GET_CPU(env);
6520     int ret;
6521     TaskState *ts;
6522     CPUState *new_cpu;
6523     CPUArchState *new_env;
6524     sigset_t sigmask;
6525 
6526     flags &= ~CLONE_IGNORED_FLAGS;
6527 
6528     /* Emulate vfork() with fork() */
6529     if (flags & CLONE_VFORK)
6530         flags &= ~(CLONE_VFORK | CLONE_VM);
6531 
6532     if (flags & CLONE_VM) {
6533         TaskState *parent_ts = (TaskState *)cpu->opaque;
6534         new_thread_info info;
6535         pthread_attr_t attr;
6536 
6537         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6538             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6539             return -TARGET_EINVAL;
6540         }
6541 
6542         ts = g_new0(TaskState, 1);
6543         init_task_state(ts);
6544 
6545         /* Grab a mutex so that thread setup appears atomic.  */
6546         pthread_mutex_lock(&clone_lock);
6547 
6548         /* we create a new CPU instance. */
6549         new_env = cpu_copy(env);
6550         /* Init regs that differ from the parent.  */
6551         cpu_clone_regs(new_env, newsp);
6552         new_cpu = ENV_GET_CPU(new_env);
6553         new_cpu->opaque = ts;
6554         ts->bprm = parent_ts->bprm;
6555         ts->info = parent_ts->info;
6556         ts->signal_mask = parent_ts->signal_mask;
6557 
6558         if (flags & CLONE_CHILD_CLEARTID) {
6559             ts->child_tidptr = child_tidptr;
6560         }
6561 
6562         if (flags & CLONE_SETTLS) {
6563             cpu_set_tls (new_env, newtls);
6564         }
6565 
6566         memset(&info, 0, sizeof(info));
6567         pthread_mutex_init(&info.mutex, NULL);
6568         pthread_mutex_lock(&info.mutex);
6569         pthread_cond_init(&info.cond, NULL);
6570         info.env = new_env;
6571         if (flags & CLONE_CHILD_SETTID) {
6572             info.child_tidptr = child_tidptr;
6573         }
6574         if (flags & CLONE_PARENT_SETTID) {
6575             info.parent_tidptr = parent_tidptr;
6576         }
6577 
6578         ret = pthread_attr_init(&attr);
6579         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6580         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6581         /* It is not safe to deliver signals until the child has finished
6582            initializing, so temporarily block all signals.  */
6583         sigfillset(&sigmask);
6584         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6585 
6586         /* If this is our first additional thread, we need to ensure we
6587          * generate code for parallel execution and flush old translations.
6588          */
6589         if (!parallel_cpus) {
6590             parallel_cpus = true;
6591             tb_flush(cpu);
6592         }
6593 
6594         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6595         /* TODO: Free new CPU state if thread creation failed.  */
6596 
6597         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6598         pthread_attr_destroy(&attr);
6599         if (ret == 0) {
6600             /* Wait for the child to initialize.  */
6601             pthread_cond_wait(&info.cond, &info.mutex);
6602             ret = info.tid;
6603         } else {
6604             ret = -1;
6605         }
6606         pthread_mutex_unlock(&info.mutex);
6607         pthread_cond_destroy(&info.cond);
6608         pthread_mutex_destroy(&info.mutex);
6609         pthread_mutex_unlock(&clone_lock);
6610     } else {
6611         /* if no CLONE_VM, we consider it is a fork */
6612         if (flags & CLONE_INVALID_FORK_FLAGS) {
6613             return -TARGET_EINVAL;
6614         }
6615 
6616         /* We can't support custom termination signals */
6617         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6618             return -TARGET_EINVAL;
6619         }
6620 
6621         if (block_signals()) {
6622             return -TARGET_ERESTARTSYS;
6623         }
6624 
6625         fork_start();
6626         ret = fork();
6627         if (ret == 0) {
6628             /* Child Process.  */
6629             cpu_clone_regs(env, newsp);
6630             fork_end(1);
6631             /* There is a race condition here.  The parent process could
6632                theoretically read the TID in the child process before the child
6633                tid is set.  This would require using either ptrace
6634                (not implemented) or having *_tidptr to point at a shared memory
6635                mapping.  We can't repeat the spinlock hack used above because
6636                the child process gets its own copy of the lock.  */
6637             if (flags & CLONE_CHILD_SETTID)
6638                 put_user_u32(gettid(), child_tidptr);
6639             if (flags & CLONE_PARENT_SETTID)
6640                 put_user_u32(gettid(), parent_tidptr);
6641             ts = (TaskState *)cpu->opaque;
6642             if (flags & CLONE_SETTLS)
6643                 cpu_set_tls (env, newtls);
6644             if (flags & CLONE_CHILD_CLEARTID)
6645                 ts->child_tidptr = child_tidptr;
6646         } else {
6647             fork_end(0);
6648         }
6649     }
6650     return ret;
6651 }
6652 
6653 /* warning : doesn't handle linux specific flags... */
6654 static int target_to_host_fcntl_cmd(int cmd)
6655 {
6656     int ret;
6657 
6658     switch(cmd) {
6659     case TARGET_F_DUPFD:
6660     case TARGET_F_GETFD:
6661     case TARGET_F_SETFD:
6662     case TARGET_F_GETFL:
6663     case TARGET_F_SETFL:
6664         ret = cmd;
6665         break;
6666     case TARGET_F_GETLK:
6667         ret = F_GETLK64;
6668         break;
6669     case TARGET_F_SETLK:
6670         ret = F_SETLK64;
6671         break;
6672     case TARGET_F_SETLKW:
6673         ret = F_SETLKW64;
6674         break;
6675     case TARGET_F_GETOWN:
6676         ret = F_GETOWN;
6677         break;
6678     case TARGET_F_SETOWN:
6679         ret = F_SETOWN;
6680         break;
6681     case TARGET_F_GETSIG:
6682         ret = F_GETSIG;
6683         break;
6684     case TARGET_F_SETSIG:
6685         ret = F_SETSIG;
6686         break;
6687 #if TARGET_ABI_BITS == 32
6688     case TARGET_F_GETLK64:
6689         ret = F_GETLK64;
6690         break;
6691     case TARGET_F_SETLK64:
6692         ret = F_SETLK64;
6693         break;
6694     case TARGET_F_SETLKW64:
6695         ret = F_SETLKW64;
6696         break;
6697 #endif
6698     case TARGET_F_SETLEASE:
6699         ret = F_SETLEASE;
6700         break;
6701     case TARGET_F_GETLEASE:
6702         ret = F_GETLEASE;
6703         break;
6704 #ifdef F_DUPFD_CLOEXEC
6705     case TARGET_F_DUPFD_CLOEXEC:
6706         ret = F_DUPFD_CLOEXEC;
6707         break;
6708 #endif
6709     case TARGET_F_NOTIFY:
6710         ret = F_NOTIFY;
6711         break;
6712 #ifdef F_GETOWN_EX
6713     case TARGET_F_GETOWN_EX:
6714         ret = F_GETOWN_EX;
6715         break;
6716 #endif
6717 #ifdef F_SETOWN_EX
6718     case TARGET_F_SETOWN_EX:
6719         ret = F_SETOWN_EX;
6720         break;
6721 #endif
6722 #ifdef F_SETPIPE_SZ
6723     case TARGET_F_SETPIPE_SZ:
6724         ret = F_SETPIPE_SZ;
6725         break;
6726     case TARGET_F_GETPIPE_SZ:
6727         ret = F_GETPIPE_SZ;
6728         break;
6729 #endif
6730     default:
6731         ret = -TARGET_EINVAL;
6732         break;
6733     }
6734 
6735 #if defined(__powerpc64__)
6736     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6737      * is not supported by kernel. The glibc fcntl call actually adjusts
6738      * them to 5, 6 and 7 before making the syscall(). Since we make the
6739      * syscall directly, adjust to what is supported by the kernel.
6740      */
6741     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6742         ret -= F_GETLK64 - 5;
6743     }
6744 #endif
6745 
6746     return ret;
6747 }
6748 
6749 #define FLOCK_TRANSTBL \
6750     switch (type) { \
6751     TRANSTBL_CONVERT(F_RDLCK); \
6752     TRANSTBL_CONVERT(F_WRLCK); \
6753     TRANSTBL_CONVERT(F_UNLCK); \
6754     TRANSTBL_CONVERT(F_EXLCK); \
6755     TRANSTBL_CONVERT(F_SHLCK); \
6756     }
6757 
6758 static int target_to_host_flock(int type)
6759 {
6760 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6761     FLOCK_TRANSTBL
6762 #undef  TRANSTBL_CONVERT
6763     return -TARGET_EINVAL;
6764 }
6765 
6766 static int host_to_target_flock(int type)
6767 {
6768 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6769     FLOCK_TRANSTBL
6770 #undef  TRANSTBL_CONVERT
6771     /* if we don't know how to convert the value coming
6772      * from the host we copy to the target field as-is
6773      */
6774     return type;
6775 }
6776 
6777 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6778                                             abi_ulong target_flock_addr)
6779 {
6780     struct target_flock *target_fl;
6781     int l_type;
6782 
6783     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6784         return -TARGET_EFAULT;
6785     }
6786 
6787     __get_user(l_type, &target_fl->l_type);
6788     l_type = target_to_host_flock(l_type);
6789     if (l_type < 0) {
6790         return l_type;
6791     }
6792     fl->l_type = l_type;
6793     __get_user(fl->l_whence, &target_fl->l_whence);
6794     __get_user(fl->l_start, &target_fl->l_start);
6795     __get_user(fl->l_len, &target_fl->l_len);
6796     __get_user(fl->l_pid, &target_fl->l_pid);
6797     unlock_user_struct(target_fl, target_flock_addr, 0);
6798     return 0;
6799 }
6800 
6801 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6802                                           const struct flock64 *fl)
6803 {
6804     struct target_flock *target_fl;
6805     short l_type;
6806 
6807     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6808         return -TARGET_EFAULT;
6809     }
6810 
6811     l_type = host_to_target_flock(fl->l_type);
6812     __put_user(l_type, &target_fl->l_type);
6813     __put_user(fl->l_whence, &target_fl->l_whence);
6814     __put_user(fl->l_start, &target_fl->l_start);
6815     __put_user(fl->l_len, &target_fl->l_len);
6816     __put_user(fl->l_pid, &target_fl->l_pid);
6817     unlock_user_struct(target_fl, target_flock_addr, 1);
6818     return 0;
6819 }
6820 
6821 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6822 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6823 
6824 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6825 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6826                                                    abi_ulong target_flock_addr)
6827 {
6828     struct target_oabi_flock64 *target_fl;
6829     int l_type;
6830 
6831     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6832         return -TARGET_EFAULT;
6833     }
6834 
6835     __get_user(l_type, &target_fl->l_type);
6836     l_type = target_to_host_flock(l_type);
6837     if (l_type < 0) {
6838         return l_type;
6839     }
6840     fl->l_type = l_type;
6841     __get_user(fl->l_whence, &target_fl->l_whence);
6842     __get_user(fl->l_start, &target_fl->l_start);
6843     __get_user(fl->l_len, &target_fl->l_len);
6844     __get_user(fl->l_pid, &target_fl->l_pid);
6845     unlock_user_struct(target_fl, target_flock_addr, 0);
6846     return 0;
6847 }
6848 
6849 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6850                                                  const struct flock64 *fl)
6851 {
6852     struct target_oabi_flock64 *target_fl;
6853     short l_type;
6854 
6855     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6856         return -TARGET_EFAULT;
6857     }
6858 
6859     l_type = host_to_target_flock(fl->l_type);
6860     __put_user(l_type, &target_fl->l_type);
6861     __put_user(fl->l_whence, &target_fl->l_whence);
6862     __put_user(fl->l_start, &target_fl->l_start);
6863     __put_user(fl->l_len, &target_fl->l_len);
6864     __put_user(fl->l_pid, &target_fl->l_pid);
6865     unlock_user_struct(target_fl, target_flock_addr, 1);
6866     return 0;
6867 }
6868 #endif
6869 
6870 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6871                                               abi_ulong target_flock_addr)
6872 {
6873     struct target_flock64 *target_fl;
6874     int l_type;
6875 
6876     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6877         return -TARGET_EFAULT;
6878     }
6879 
6880     __get_user(l_type, &target_fl->l_type);
6881     l_type = target_to_host_flock(l_type);
6882     if (l_type < 0) {
6883         return l_type;
6884     }
6885     fl->l_type = l_type;
6886     __get_user(fl->l_whence, &target_fl->l_whence);
6887     __get_user(fl->l_start, &target_fl->l_start);
6888     __get_user(fl->l_len, &target_fl->l_len);
6889     __get_user(fl->l_pid, &target_fl->l_pid);
6890     unlock_user_struct(target_fl, target_flock_addr, 0);
6891     return 0;
6892 }
6893 
6894 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6895                                             const struct flock64 *fl)
6896 {
6897     struct target_flock64 *target_fl;
6898     short l_type;
6899 
6900     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6901         return -TARGET_EFAULT;
6902     }
6903 
6904     l_type = host_to_target_flock(fl->l_type);
6905     __put_user(l_type, &target_fl->l_type);
6906     __put_user(fl->l_whence, &target_fl->l_whence);
6907     __put_user(fl->l_start, &target_fl->l_start);
6908     __put_user(fl->l_len, &target_fl->l_len);
6909     __put_user(fl->l_pid, &target_fl->l_pid);
6910     unlock_user_struct(target_fl, target_flock_addr, 1);
6911     return 0;
6912 }
6913 
6914 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6915 {
6916     struct flock64 fl64;
6917 #ifdef F_GETOWN_EX
6918     struct f_owner_ex fox;
6919     struct target_f_owner_ex *target_fox;
6920 #endif
6921     abi_long ret;
6922     int host_cmd = target_to_host_fcntl_cmd(cmd);
6923 
6924     if (host_cmd == -TARGET_EINVAL)
6925 	    return host_cmd;
6926 
6927     switch(cmd) {
6928     case TARGET_F_GETLK:
6929         ret = copy_from_user_flock(&fl64, arg);
6930         if (ret) {
6931             return ret;
6932         }
6933         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6934         if (ret == 0) {
6935             ret = copy_to_user_flock(arg, &fl64);
6936         }
6937         break;
6938 
6939     case TARGET_F_SETLK:
6940     case TARGET_F_SETLKW:
6941         ret = copy_from_user_flock(&fl64, arg);
6942         if (ret) {
6943             return ret;
6944         }
6945         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6946         break;
6947 
6948     case TARGET_F_GETLK64:
6949         ret = copy_from_user_flock64(&fl64, arg);
6950         if (ret) {
6951             return ret;
6952         }
6953         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6954         if (ret == 0) {
6955             ret = copy_to_user_flock64(arg, &fl64);
6956         }
6957         break;
6958     case TARGET_F_SETLK64:
6959     case TARGET_F_SETLKW64:
6960         ret = copy_from_user_flock64(&fl64, arg);
6961         if (ret) {
6962             return ret;
6963         }
6964         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6965         break;
6966 
6967     case TARGET_F_GETFL:
6968         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6969         if (ret >= 0) {
6970             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6971         }
6972         break;
6973 
6974     case TARGET_F_SETFL:
6975         ret = get_errno(safe_fcntl(fd, host_cmd,
6976                                    target_to_host_bitmask(arg,
6977                                                           fcntl_flags_tbl)));
6978         break;
6979 
6980 #ifdef F_GETOWN_EX
6981     case TARGET_F_GETOWN_EX:
6982         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6983         if (ret >= 0) {
6984             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6985                 return -TARGET_EFAULT;
6986             target_fox->type = tswap32(fox.type);
6987             target_fox->pid = tswap32(fox.pid);
6988             unlock_user_struct(target_fox, arg, 1);
6989         }
6990         break;
6991 #endif
6992 
6993 #ifdef F_SETOWN_EX
6994     case TARGET_F_SETOWN_EX:
6995         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6996             return -TARGET_EFAULT;
6997         fox.type = tswap32(target_fox->type);
6998         fox.pid = tswap32(target_fox->pid);
6999         unlock_user_struct(target_fox, arg, 0);
7000         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7001         break;
7002 #endif
7003 
7004     case TARGET_F_SETOWN:
7005     case TARGET_F_GETOWN:
7006     case TARGET_F_SETSIG:
7007     case TARGET_F_GETSIG:
7008     case TARGET_F_SETLEASE:
7009     case TARGET_F_GETLEASE:
7010     case TARGET_F_SETPIPE_SZ:
7011     case TARGET_F_GETPIPE_SZ:
7012         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7013         break;
7014 
7015     default:
7016         ret = get_errno(safe_fcntl(fd, cmd, arg));
7017         break;
7018     }
7019     return ret;
7020 }
7021 
7022 #ifdef USE_UID16
7023 
7024 static inline int high2lowuid(int uid)
7025 {
7026     if (uid > 65535)
7027         return 65534;
7028     else
7029         return uid;
7030 }
7031 
7032 static inline int high2lowgid(int gid)
7033 {
7034     if (gid > 65535)
7035         return 65534;
7036     else
7037         return gid;
7038 }
7039 
7040 static inline int low2highuid(int uid)
7041 {
7042     if ((int16_t)uid == -1)
7043         return -1;
7044     else
7045         return uid;
7046 }
7047 
7048 static inline int low2highgid(int gid)
7049 {
7050     if ((int16_t)gid == -1)
7051         return -1;
7052     else
7053         return gid;
7054 }
7055 static inline int tswapid(int id)
7056 {
7057     return tswap16(id);
7058 }
7059 
7060 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7061 
7062 #else /* !USE_UID16 */
7063 static inline int high2lowuid(int uid)
7064 {
7065     return uid;
7066 }
7067 static inline int high2lowgid(int gid)
7068 {
7069     return gid;
7070 }
7071 static inline int low2highuid(int uid)
7072 {
7073     return uid;
7074 }
7075 static inline int low2highgid(int gid)
7076 {
7077     return gid;
7078 }
7079 static inline int tswapid(int id)
7080 {
7081     return tswap32(id);
7082 }
7083 
7084 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7085 
7086 #endif /* USE_UID16 */
7087 
7088 /* We must do direct syscalls for setting UID/GID, because we want to
7089  * implement the Linux system call semantics of "change only for this thread",
7090  * not the libc/POSIX semantics of "change for all threads in process".
7091  * (See http://ewontfix.com/17/ for more details.)
7092  * We use the 32-bit version of the syscalls if present; if it is not
7093  * then either the host architecture supports 32-bit UIDs natively with
7094  * the standard syscall, or the 16-bit UID is the best we can do.
7095  */
7096 #ifdef __NR_setuid32
7097 #define __NR_sys_setuid __NR_setuid32
7098 #else
7099 #define __NR_sys_setuid __NR_setuid
7100 #endif
7101 #ifdef __NR_setgid32
7102 #define __NR_sys_setgid __NR_setgid32
7103 #else
7104 #define __NR_sys_setgid __NR_setgid
7105 #endif
7106 #ifdef __NR_setresuid32
7107 #define __NR_sys_setresuid __NR_setresuid32
7108 #else
7109 #define __NR_sys_setresuid __NR_setresuid
7110 #endif
7111 #ifdef __NR_setresgid32
7112 #define __NR_sys_setresgid __NR_setresgid32
7113 #else
7114 #define __NR_sys_setresgid __NR_setresgid
7115 #endif
7116 
7117 _syscall1(int, sys_setuid, uid_t, uid)
7118 _syscall1(int, sys_setgid, gid_t, gid)
7119 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7120 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7121 
7122 void syscall_init(void)
7123 {
7124     IOCTLEntry *ie;
7125     const argtype *arg_type;
7126     int size;
7127     int i;
7128 
7129     thunk_init(STRUCT_MAX);
7130 
7131 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7132 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7133 #include "syscall_types.h"
7134 #undef STRUCT
7135 #undef STRUCT_SPECIAL
7136 
7137     /* Build target_to_host_errno_table[] table from
7138      * host_to_target_errno_table[]. */
7139     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7140         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7141     }
7142 
7143     /* we patch the ioctl size if necessary. We rely on the fact that
7144        no ioctl has all the bits at '1' in the size field */
7145     ie = ioctl_entries;
7146     while (ie->target_cmd != 0) {
7147         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7148             TARGET_IOC_SIZEMASK) {
7149             arg_type = ie->arg_type;
7150             if (arg_type[0] != TYPE_PTR) {
7151                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7152                         ie->target_cmd);
7153                 exit(1);
7154             }
7155             arg_type++;
7156             size = thunk_type_size(arg_type, 0);
7157             ie->target_cmd = (ie->target_cmd &
7158                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7159                 (size << TARGET_IOC_SIZESHIFT);
7160         }
7161 
7162         /* automatic consistency check if same arch */
7163 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7164     (defined(__x86_64__) && defined(TARGET_X86_64))
7165         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7166             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7167                     ie->name, ie->target_cmd, ie->host_cmd);
7168         }
7169 #endif
7170         ie++;
7171     }
7172 }
7173 
7174 #if TARGET_ABI_BITS == 32
7175 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
7176 {
7177 #ifdef TARGET_WORDS_BIGENDIAN
7178     return ((uint64_t)word0 << 32) | word1;
7179 #else
7180     return ((uint64_t)word1 << 32) | word0;
7181 #endif
7182 }
7183 #else /* TARGET_ABI_BITS == 32 */
7184 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
7185 {
7186     return word0;
7187 }
7188 #endif /* TARGET_ABI_BITS != 32 */
7189 
7190 #ifdef TARGET_NR_truncate64
7191 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7192                                          abi_long arg2,
7193                                          abi_long arg3,
7194                                          abi_long arg4)
7195 {
7196     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7197         arg2 = arg3;
7198         arg3 = arg4;
7199     }
7200     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7201 }
7202 #endif
7203 
7204 #ifdef TARGET_NR_ftruncate64
7205 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7206                                           abi_long arg2,
7207                                           abi_long arg3,
7208                                           abi_long arg4)
7209 {
7210     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7211         arg2 = arg3;
7212         arg3 = arg4;
7213     }
7214     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7215 }
7216 #endif
7217 
7218 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
7219                                                abi_ulong target_addr)
7220 {
7221     struct target_timespec *target_ts;
7222 
7223     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
7224         return -TARGET_EFAULT;
7225     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
7226     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7227     unlock_user_struct(target_ts, target_addr, 0);
7228     return 0;
7229 }
7230 
7231 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
7232                                                struct timespec *host_ts)
7233 {
7234     struct target_timespec *target_ts;
7235 
7236     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
7237         return -TARGET_EFAULT;
7238     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
7239     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7240     unlock_user_struct(target_ts, target_addr, 1);
7241     return 0;
7242 }
7243 
7244 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
7245                                                  abi_ulong target_addr)
7246 {
7247     struct target_itimerspec *target_itspec;
7248 
7249     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
7250         return -TARGET_EFAULT;
7251     }
7252 
7253     host_itspec->it_interval.tv_sec =
7254                             tswapal(target_itspec->it_interval.tv_sec);
7255     host_itspec->it_interval.tv_nsec =
7256                             tswapal(target_itspec->it_interval.tv_nsec);
7257     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
7258     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
7259 
7260     unlock_user_struct(target_itspec, target_addr, 1);
7261     return 0;
7262 }
7263 
7264 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7265                                                struct itimerspec *host_its)
7266 {
7267     struct target_itimerspec *target_itspec;
7268 
7269     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
7270         return -TARGET_EFAULT;
7271     }
7272 
7273     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
7274     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
7275 
7276     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
7277     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7278 
7279     unlock_user_struct(target_itspec, target_addr, 0);
7280     return 0;
7281 }
7282 
7283 static inline abi_long target_to_host_timex(struct timex *host_tx,
7284                                             abi_long target_addr)
7285 {
7286     struct target_timex *target_tx;
7287 
7288     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7289         return -TARGET_EFAULT;
7290     }
7291 
7292     __get_user(host_tx->modes, &target_tx->modes);
7293     __get_user(host_tx->offset, &target_tx->offset);
7294     __get_user(host_tx->freq, &target_tx->freq);
7295     __get_user(host_tx->maxerror, &target_tx->maxerror);
7296     __get_user(host_tx->esterror, &target_tx->esterror);
7297     __get_user(host_tx->status, &target_tx->status);
7298     __get_user(host_tx->constant, &target_tx->constant);
7299     __get_user(host_tx->precision, &target_tx->precision);
7300     __get_user(host_tx->tolerance, &target_tx->tolerance);
7301     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7302     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7303     __get_user(host_tx->tick, &target_tx->tick);
7304     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7305     __get_user(host_tx->jitter, &target_tx->jitter);
7306     __get_user(host_tx->shift, &target_tx->shift);
7307     __get_user(host_tx->stabil, &target_tx->stabil);
7308     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7309     __get_user(host_tx->calcnt, &target_tx->calcnt);
7310     __get_user(host_tx->errcnt, &target_tx->errcnt);
7311     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7312     __get_user(host_tx->tai, &target_tx->tai);
7313 
7314     unlock_user_struct(target_tx, target_addr, 0);
7315     return 0;
7316 }
7317 
7318 static inline abi_long host_to_target_timex(abi_long target_addr,
7319                                             struct timex *host_tx)
7320 {
7321     struct target_timex *target_tx;
7322 
7323     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7324         return -TARGET_EFAULT;
7325     }
7326 
7327     __put_user(host_tx->modes, &target_tx->modes);
7328     __put_user(host_tx->offset, &target_tx->offset);
7329     __put_user(host_tx->freq, &target_tx->freq);
7330     __put_user(host_tx->maxerror, &target_tx->maxerror);
7331     __put_user(host_tx->esterror, &target_tx->esterror);
7332     __put_user(host_tx->status, &target_tx->status);
7333     __put_user(host_tx->constant, &target_tx->constant);
7334     __put_user(host_tx->precision, &target_tx->precision);
7335     __put_user(host_tx->tolerance, &target_tx->tolerance);
7336     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7337     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7338     __put_user(host_tx->tick, &target_tx->tick);
7339     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7340     __put_user(host_tx->jitter, &target_tx->jitter);
7341     __put_user(host_tx->shift, &target_tx->shift);
7342     __put_user(host_tx->stabil, &target_tx->stabil);
7343     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7344     __put_user(host_tx->calcnt, &target_tx->calcnt);
7345     __put_user(host_tx->errcnt, &target_tx->errcnt);
7346     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7347     __put_user(host_tx->tai, &target_tx->tai);
7348 
7349     unlock_user_struct(target_tx, target_addr, 1);
7350     return 0;
7351 }
7352 
7353 
7354 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7355                                                abi_ulong target_addr)
7356 {
7357     struct target_sigevent *target_sevp;
7358 
7359     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7360         return -TARGET_EFAULT;
7361     }
7362 
7363     /* This union is awkward on 64 bit systems because it has a 32 bit
7364      * integer and a pointer in it; we follow the conversion approach
7365      * used for handling sigval types in signal.c so the guest should get
7366      * the correct value back even if we did a 64 bit byteswap and it's
7367      * using the 32 bit integer.
7368      */
7369     host_sevp->sigev_value.sival_ptr =
7370         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7371     host_sevp->sigev_signo =
7372         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7373     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7374     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7375 
7376     unlock_user_struct(target_sevp, target_addr, 1);
7377     return 0;
7378 }
7379 
7380 #if defined(TARGET_NR_mlockall)
7381 static inline int target_to_host_mlockall_arg(int arg)
7382 {
7383     int result = 0;
7384 
7385     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7386         result |= MCL_CURRENT;
7387     }
7388     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7389         result |= MCL_FUTURE;
7390     }
7391     return result;
7392 }
7393 #endif
7394 
7395 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7396      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7397      defined(TARGET_NR_newfstatat))
7398 static inline abi_long host_to_target_stat64(void *cpu_env,
7399                                              abi_ulong target_addr,
7400                                              struct stat *host_st)
7401 {
7402 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7403     if (((CPUARMState *)cpu_env)->eabi) {
7404         struct target_eabi_stat64 *target_st;
7405 
7406         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7407             return -TARGET_EFAULT;
7408         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7409         __put_user(host_st->st_dev, &target_st->st_dev);
7410         __put_user(host_st->st_ino, &target_st->st_ino);
7411 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7412         __put_user(host_st->st_ino, &target_st->__st_ino);
7413 #endif
7414         __put_user(host_st->st_mode, &target_st->st_mode);
7415         __put_user(host_st->st_nlink, &target_st->st_nlink);
7416         __put_user(host_st->st_uid, &target_st->st_uid);
7417         __put_user(host_st->st_gid, &target_st->st_gid);
7418         __put_user(host_st->st_rdev, &target_st->st_rdev);
7419         __put_user(host_st->st_size, &target_st->st_size);
7420         __put_user(host_st->st_blksize, &target_st->st_blksize);
7421         __put_user(host_st->st_blocks, &target_st->st_blocks);
7422         __put_user(host_st->st_atime, &target_st->target_st_atime);
7423         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7424         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7425         unlock_user_struct(target_st, target_addr, 1);
7426     } else
7427 #endif
7428     {
7429 #if defined(TARGET_HAS_STRUCT_STAT64)
7430         struct target_stat64 *target_st;
7431 #else
7432         struct target_stat *target_st;
7433 #endif
7434 
7435         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7436             return -TARGET_EFAULT;
7437         memset(target_st, 0, sizeof(*target_st));
7438         __put_user(host_st->st_dev, &target_st->st_dev);
7439         __put_user(host_st->st_ino, &target_st->st_ino);
7440 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7441         __put_user(host_st->st_ino, &target_st->__st_ino);
7442 #endif
7443         __put_user(host_st->st_mode, &target_st->st_mode);
7444         __put_user(host_st->st_nlink, &target_st->st_nlink);
7445         __put_user(host_st->st_uid, &target_st->st_uid);
7446         __put_user(host_st->st_gid, &target_st->st_gid);
7447         __put_user(host_st->st_rdev, &target_st->st_rdev);
7448         /* XXX: better use of kernel struct */
7449         __put_user(host_st->st_size, &target_st->st_size);
7450         __put_user(host_st->st_blksize, &target_st->st_blksize);
7451         __put_user(host_st->st_blocks, &target_st->st_blocks);
7452         __put_user(host_st->st_atime, &target_st->target_st_atime);
7453         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7454         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7455         unlock_user_struct(target_st, target_addr, 1);
7456     }
7457 
7458     return 0;
7459 }
7460 #endif
7461 
7462 /* ??? Using host futex calls even when target atomic operations
7463    are not really atomic probably breaks things.  However implementing
7464    futexes locally would make futexes shared between multiple processes
7465    tricky.  However they're probably useless because guest atomic
7466    operations won't work either.  */
7467 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7468                     target_ulong uaddr2, int val3)
7469 {
7470     struct timespec ts, *pts;
7471     int base_op;
7472 
7473     /* ??? We assume FUTEX_* constants are the same on both host
7474        and target.  */
7475 #ifdef FUTEX_CMD_MASK
7476     base_op = op & FUTEX_CMD_MASK;
7477 #else
7478     base_op = op;
7479 #endif
7480     switch (base_op) {
7481     case FUTEX_WAIT:
7482     case FUTEX_WAIT_BITSET:
7483         if (timeout) {
7484             pts = &ts;
7485             target_to_host_timespec(pts, timeout);
7486         } else {
7487             pts = NULL;
7488         }
7489         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7490                          pts, NULL, val3));
7491     case FUTEX_WAKE:
7492         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7493     case FUTEX_FD:
7494         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7495     case FUTEX_REQUEUE:
7496     case FUTEX_CMP_REQUEUE:
7497     case FUTEX_WAKE_OP:
7498         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7499            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7500            But the prototype takes a `struct timespec *'; insert casts
7501            to satisfy the compiler.  We do not need to tswap TIMEOUT
7502            since it's not compared to guest memory.  */
7503         pts = (struct timespec *)(uintptr_t) timeout;
7504         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7505                                     g2h(uaddr2),
7506                                     (base_op == FUTEX_CMP_REQUEUE
7507                                      ? tswap32(val3)
7508                                      : val3)));
7509     default:
7510         return -TARGET_ENOSYS;
7511     }
7512 }
7513 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7514 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7515                                      abi_long handle, abi_long mount_id,
7516                                      abi_long flags)
7517 {
7518     struct file_handle *target_fh;
7519     struct file_handle *fh;
7520     int mid = 0;
7521     abi_long ret;
7522     char *name;
7523     unsigned int size, total_size;
7524 
7525     if (get_user_s32(size, handle)) {
7526         return -TARGET_EFAULT;
7527     }
7528 
7529     name = lock_user_string(pathname);
7530     if (!name) {
7531         return -TARGET_EFAULT;
7532     }
7533 
7534     total_size = sizeof(struct file_handle) + size;
7535     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7536     if (!target_fh) {
7537         unlock_user(name, pathname, 0);
7538         return -TARGET_EFAULT;
7539     }
7540 
7541     fh = g_malloc0(total_size);
7542     fh->handle_bytes = size;
7543 
7544     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7545     unlock_user(name, pathname, 0);
7546 
7547     /* man name_to_handle_at(2):
7548      * Other than the use of the handle_bytes field, the caller should treat
7549      * the file_handle structure as an opaque data type
7550      */
7551 
7552     memcpy(target_fh, fh, total_size);
7553     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7554     target_fh->handle_type = tswap32(fh->handle_type);
7555     g_free(fh);
7556     unlock_user(target_fh, handle, total_size);
7557 
7558     if (put_user_s32(mid, mount_id)) {
7559         return -TARGET_EFAULT;
7560     }
7561 
7562     return ret;
7563 
7564 }
7565 #endif
7566 
7567 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7568 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7569                                      abi_long flags)
7570 {
7571     struct file_handle *target_fh;
7572     struct file_handle *fh;
7573     unsigned int size, total_size;
7574     abi_long ret;
7575 
7576     if (get_user_s32(size, handle)) {
7577         return -TARGET_EFAULT;
7578     }
7579 
7580     total_size = sizeof(struct file_handle) + size;
7581     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7582     if (!target_fh) {
7583         return -TARGET_EFAULT;
7584     }
7585 
7586     fh = g_memdup(target_fh, total_size);
7587     fh->handle_bytes = size;
7588     fh->handle_type = tswap32(target_fh->handle_type);
7589 
7590     ret = get_errno(open_by_handle_at(mount_fd, fh,
7591                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7592 
7593     g_free(fh);
7594 
7595     unlock_user(target_fh, handle, total_size);
7596 
7597     return ret;
7598 }
7599 #endif
7600 
7601 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7602 
7603 /* signalfd siginfo conversion */
7604 
7605 static void
7606 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7607                                 const struct signalfd_siginfo *info)
7608 {
7609     int sig = host_to_target_signal(info->ssi_signo);
7610 
7611     /* linux/signalfd.h defines a ssi_addr_lsb
7612      * not defined in sys/signalfd.h but used by some kernels
7613      */
7614 
7615 #ifdef BUS_MCEERR_AO
7616     if (tinfo->ssi_signo == SIGBUS &&
7617         (tinfo->ssi_code == BUS_MCEERR_AR ||
7618          tinfo->ssi_code == BUS_MCEERR_AO)) {
7619         uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7620         uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7621         *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7622     }
7623 #endif
7624 
7625     tinfo->ssi_signo = tswap32(sig);
7626     tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7627     tinfo->ssi_code = tswap32(info->ssi_code);
7628     tinfo->ssi_pid = tswap32(info->ssi_pid);
7629     tinfo->ssi_uid = tswap32(info->ssi_uid);
7630     tinfo->ssi_fd = tswap32(info->ssi_fd);
7631     tinfo->ssi_tid = tswap32(info->ssi_tid);
7632     tinfo->ssi_band = tswap32(info->ssi_band);
7633     tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7634     tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7635     tinfo->ssi_status = tswap32(info->ssi_status);
7636     tinfo->ssi_int = tswap32(info->ssi_int);
7637     tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7638     tinfo->ssi_utime = tswap64(info->ssi_utime);
7639     tinfo->ssi_stime = tswap64(info->ssi_stime);
7640     tinfo->ssi_addr = tswap64(info->ssi_addr);
7641 }
7642 
7643 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7644 {
7645     int i;
7646 
7647     for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7648         host_to_target_signalfd_siginfo(buf + i, buf + i);
7649     }
7650 
7651     return len;
7652 }
7653 
7654 static TargetFdTrans target_signalfd_trans = {
7655     .host_to_target_data = host_to_target_data_signalfd,
7656 };
7657 
7658 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7659 {
7660     int host_flags;
7661     target_sigset_t *target_mask;
7662     sigset_t host_mask;
7663     abi_long ret;
7664 
7665     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7666         return -TARGET_EINVAL;
7667     }
7668     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7669         return -TARGET_EFAULT;
7670     }
7671 
7672     target_to_host_sigset(&host_mask, target_mask);
7673 
7674     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7675 
7676     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7677     if (ret >= 0) {
7678         fd_trans_register(ret, &target_signalfd_trans);
7679     }
7680 
7681     unlock_user_struct(target_mask, mask, 0);
7682 
7683     return ret;
7684 }
7685 #endif
7686 
7687 /* Map host to target signal numbers for the wait family of syscalls.
7688    Assume all other status bits are the same.  */
7689 int host_to_target_waitstatus(int status)
7690 {
7691     if (WIFSIGNALED(status)) {
7692         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7693     }
7694     if (WIFSTOPPED(status)) {
7695         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7696                | (status & 0xff);
7697     }
7698     return status;
7699 }
7700 
7701 static int open_self_cmdline(void *cpu_env, int fd)
7702 {
7703     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7704     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7705     int i;
7706 
7707     for (i = 0; i < bprm->argc; i++) {
7708         size_t len = strlen(bprm->argv[i]) + 1;
7709 
7710         if (write(fd, bprm->argv[i], len) != len) {
7711             return -1;
7712         }
7713     }
7714 
7715     return 0;
7716 }
7717 
7718 static int open_self_maps(void *cpu_env, int fd)
7719 {
7720     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7721     TaskState *ts = cpu->opaque;
7722     FILE *fp;
7723     char *line = NULL;
7724     size_t len = 0;
7725     ssize_t read;
7726 
7727     fp = fopen("/proc/self/maps", "r");
7728     if (fp == NULL) {
7729         return -1;
7730     }
7731 
7732     while ((read = getline(&line, &len, fp)) != -1) {
7733         int fields, dev_maj, dev_min, inode;
7734         uint64_t min, max, offset;
7735         char flag_r, flag_w, flag_x, flag_p;
7736         char path[512] = "";
7737         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7738                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7739                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7740 
7741         if ((fields < 10) || (fields > 11)) {
7742             continue;
7743         }
7744         if (h2g_valid(min)) {
7745             int flags = page_get_flags(h2g(min));
7746             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7747             if (page_check_range(h2g(min), max - min, flags) == -1) {
7748                 continue;
7749             }
7750             if (h2g(min) == ts->info->stack_limit) {
7751                 pstrcpy(path, sizeof(path), "      [stack]");
7752             }
7753             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7754                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7755                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7756                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
7757                     path[0] ? "         " : "", path);
7758         }
7759     }
7760 
7761     free(line);
7762     fclose(fp);
7763 
7764     return 0;
7765 }
7766 
7767 static int open_self_stat(void *cpu_env, int fd)
7768 {
7769     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7770     TaskState *ts = cpu->opaque;
7771     abi_ulong start_stack = ts->info->start_stack;
7772     int i;
7773 
7774     for (i = 0; i < 44; i++) {
7775       char buf[128];
7776       int len;
7777       uint64_t val = 0;
7778 
7779       if (i == 0) {
7780         /* pid */
7781         val = getpid();
7782         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7783       } else if (i == 1) {
7784         /* app name */
7785         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7786       } else if (i == 27) {
7787         /* stack bottom */
7788         val = start_stack;
7789         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7790       } else {
7791         /* for the rest, there is MasterCard */
7792         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7793       }
7794 
7795       len = strlen(buf);
7796       if (write(fd, buf, len) != len) {
7797           return -1;
7798       }
7799     }
7800 
7801     return 0;
7802 }
7803 
7804 static int open_self_auxv(void *cpu_env, int fd)
7805 {
7806     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7807     TaskState *ts = cpu->opaque;
7808     abi_ulong auxv = ts->info->saved_auxv;
7809     abi_ulong len = ts->info->auxv_len;
7810     char *ptr;
7811 
7812     /*
7813      * Auxiliary vector is stored in target process stack.
7814      * read in whole auxv vector and copy it to file
7815      */
7816     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7817     if (ptr != NULL) {
7818         while (len > 0) {
7819             ssize_t r;
7820             r = write(fd, ptr, len);
7821             if (r <= 0) {
7822                 break;
7823             }
7824             len -= r;
7825             ptr += r;
7826         }
7827         lseek(fd, 0, SEEK_SET);
7828         unlock_user(ptr, auxv, len);
7829     }
7830 
7831     return 0;
7832 }
7833 
7834 static int is_proc_myself(const char *filename, const char *entry)
7835 {
7836     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7837         filename += strlen("/proc/");
7838         if (!strncmp(filename, "self/", strlen("self/"))) {
7839             filename += strlen("self/");
7840         } else if (*filename >= '1' && *filename <= '9') {
7841             char myself[80];
7842             snprintf(myself, sizeof(myself), "%d/", getpid());
7843             if (!strncmp(filename, myself, strlen(myself))) {
7844                 filename += strlen(myself);
7845             } else {
7846                 return 0;
7847             }
7848         } else {
7849             return 0;
7850         }
7851         if (!strcmp(filename, entry)) {
7852             return 1;
7853         }
7854     }
7855     return 0;
7856 }
7857 
7858 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7859 static int is_proc(const char *filename, const char *entry)
7860 {
7861     return strcmp(filename, entry) == 0;
7862 }
7863 
7864 static int open_net_route(void *cpu_env, int fd)
7865 {
7866     FILE *fp;
7867     char *line = NULL;
7868     size_t len = 0;
7869     ssize_t read;
7870 
7871     fp = fopen("/proc/net/route", "r");
7872     if (fp == NULL) {
7873         return -1;
7874     }
7875 
7876     /* read header */
7877 
7878     read = getline(&line, &len, fp);
7879     dprintf(fd, "%s", line);
7880 
7881     /* read routes */
7882 
7883     while ((read = getline(&line, &len, fp)) != -1) {
7884         char iface[16];
7885         uint32_t dest, gw, mask;
7886         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7887         sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7888                      iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7889                      &mask, &mtu, &window, &irtt);
7890         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7891                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7892                 metric, tswap32(mask), mtu, window, irtt);
7893     }
7894 
7895     free(line);
7896     fclose(fp);
7897 
7898     return 0;
7899 }
7900 #endif
7901 
7902 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7903 {
7904     struct fake_open {
7905         const char *filename;
7906         int (*fill)(void *cpu_env, int fd);
7907         int (*cmp)(const char *s1, const char *s2);
7908     };
7909     const struct fake_open *fake_open;
7910     static const struct fake_open fakes[] = {
7911         { "maps", open_self_maps, is_proc_myself },
7912         { "stat", open_self_stat, is_proc_myself },
7913         { "auxv", open_self_auxv, is_proc_myself },
7914         { "cmdline", open_self_cmdline, is_proc_myself },
7915 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7916         { "/proc/net/route", open_net_route, is_proc },
7917 #endif
7918         { NULL, NULL, NULL }
7919     };
7920 
7921     if (is_proc_myself(pathname, "exe")) {
7922         int execfd = qemu_getauxval(AT_EXECFD);
7923         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7924     }
7925 
7926     for (fake_open = fakes; fake_open->filename; fake_open++) {
7927         if (fake_open->cmp(pathname, fake_open->filename)) {
7928             break;
7929         }
7930     }
7931 
7932     if (fake_open->filename) {
7933         const char *tmpdir;
7934         char filename[PATH_MAX];
7935         int fd, r;
7936 
7937         /* create temporary file to map stat to */
7938         tmpdir = getenv("TMPDIR");
7939         if (!tmpdir)
7940             tmpdir = "/tmp";
7941         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7942         fd = mkstemp(filename);
7943         if (fd < 0) {
7944             return fd;
7945         }
7946         unlink(filename);
7947 
7948         if ((r = fake_open->fill(cpu_env, fd))) {
7949             int e = errno;
7950             close(fd);
7951             errno = e;
7952             return r;
7953         }
7954         lseek(fd, 0, SEEK_SET);
7955 
7956         return fd;
7957     }
7958 
7959     return safe_openat(dirfd, path(pathname), flags, mode);
7960 }
7961 
7962 #define TIMER_MAGIC 0x0caf0000
7963 #define TIMER_MAGIC_MASK 0xffff0000
7964 
7965 /* Convert QEMU provided timer ID back to internal 16bit index format */
7966 static target_timer_t get_timer_id(abi_long arg)
7967 {
7968     target_timer_t timerid = arg;
7969 
7970     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7971         return -TARGET_EINVAL;
7972     }
7973 
7974     timerid &= 0xffff;
7975 
7976     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7977         return -TARGET_EINVAL;
7978     }
7979 
7980     return timerid;
7981 }
7982 
7983 static abi_long swap_data_eventfd(void *buf, size_t len)
7984 {
7985     uint64_t *counter = buf;
7986     int i;
7987 
7988     if (len < sizeof(uint64_t)) {
7989         return -EINVAL;
7990     }
7991 
7992     for (i = 0; i < len; i += sizeof(uint64_t)) {
7993         *counter = tswap64(*counter);
7994         counter++;
7995     }
7996 
7997     return len;
7998 }
7999 
8000 static TargetFdTrans target_eventfd_trans = {
8001     .host_to_target_data = swap_data_eventfd,
8002     .target_to_host_data = swap_data_eventfd,
8003 };
8004 
8005 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
8006     (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
8007      defined(__NR_inotify_init1))
8008 static abi_long host_to_target_data_inotify(void *buf, size_t len)
8009 {
8010     struct inotify_event *ev;
8011     int i;
8012     uint32_t name_len;
8013 
8014     for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
8015         ev = (struct inotify_event *)((char *)buf + i);
8016         name_len = ev->len;
8017 
8018         ev->wd = tswap32(ev->wd);
8019         ev->mask = tswap32(ev->mask);
8020         ev->cookie = tswap32(ev->cookie);
8021         ev->len = tswap32(name_len);
8022     }
8023 
8024     return len;
8025 }
8026 
8027 static TargetFdTrans target_inotify_trans = {
8028     .host_to_target_data = host_to_target_data_inotify,
8029 };
8030 #endif
8031 
8032 static int target_to_host_cpu_mask(unsigned long *host_mask,
8033                                    size_t host_size,
8034                                    abi_ulong target_addr,
8035                                    size_t target_size)
8036 {
8037     unsigned target_bits = sizeof(abi_ulong) * 8;
8038     unsigned host_bits = sizeof(*host_mask) * 8;
8039     abi_ulong *target_mask;
8040     unsigned i, j;
8041 
8042     assert(host_size >= target_size);
8043 
8044     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8045     if (!target_mask) {
8046         return -TARGET_EFAULT;
8047     }
8048     memset(host_mask, 0, host_size);
8049 
8050     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8051         unsigned bit = i * target_bits;
8052         abi_ulong val;
8053 
8054         __get_user(val, &target_mask[i]);
8055         for (j = 0; j < target_bits; j++, bit++) {
8056             if (val & (1UL << j)) {
8057                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8058             }
8059         }
8060     }
8061 
8062     unlock_user(target_mask, target_addr, 0);
8063     return 0;
8064 }
8065 
8066 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8067                                    size_t host_size,
8068                                    abi_ulong target_addr,
8069                                    size_t target_size)
8070 {
8071     unsigned target_bits = sizeof(abi_ulong) * 8;
8072     unsigned host_bits = sizeof(*host_mask) * 8;
8073     abi_ulong *target_mask;
8074     unsigned i, j;
8075 
8076     assert(host_size >= target_size);
8077 
8078     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8079     if (!target_mask) {
8080         return -TARGET_EFAULT;
8081     }
8082 
8083     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8084         unsigned bit = i * target_bits;
8085         abi_ulong val = 0;
8086 
8087         for (j = 0; j < target_bits; j++, bit++) {
8088             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8089                 val |= 1UL << j;
8090             }
8091         }
8092         __put_user(val, &target_mask[i]);
8093     }
8094 
8095     unlock_user(target_mask, target_addr, target_size);
8096     return 0;
8097 }
8098 
8099 /* do_syscall() should always have a single exit point at the end so
8100    that actions, such as logging of syscall results, can be performed.
8101    All errnos that do_syscall() returns must be -TARGET_<errcode>. */
8102 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
8103                     abi_long arg2, abi_long arg3, abi_long arg4,
8104                     abi_long arg5, abi_long arg6, abi_long arg7,
8105                     abi_long arg8)
8106 {
8107     CPUState *cpu = ENV_GET_CPU(cpu_env);
8108     abi_long ret;
8109 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8110     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8111     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
8112     struct stat st;
8113 #endif
8114 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8115     || defined(TARGET_NR_fstatfs)
8116     struct statfs stfs;
8117 #endif
8118     void *p;
8119 
8120 #if defined(DEBUG_ERESTARTSYS)
8121     /* Debug-only code for exercising the syscall-restart code paths
8122      * in the per-architecture cpu main loops: restart every syscall
8123      * the guest makes once before letting it through.
8124      */
8125     {
8126         static int flag;
8127 
8128         flag = !flag;
8129         if (flag) {
8130             return -TARGET_ERESTARTSYS;
8131         }
8132     }
8133 #endif
8134 
8135 #ifdef DEBUG
8136     gemu_log("syscall %d", num);
8137 #endif
8138     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
8139     if(do_strace)
8140         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
8141 
8142     switch(num) {
8143     case TARGET_NR_exit:
8144         /* In old applications this may be used to implement _exit(2).
8145            However in threaded applictions it is used for thread termination,
8146            and _exit_group is used for application termination.
8147            Do thread termination if we have more then one thread.  */
8148 
8149         if (block_signals()) {
8150             ret = -TARGET_ERESTARTSYS;
8151             break;
8152         }
8153 
8154         cpu_list_lock();
8155 
8156         if (CPU_NEXT(first_cpu)) {
8157             TaskState *ts;
8158 
8159             /* Remove the CPU from the list.  */
8160             QTAILQ_REMOVE(&cpus, cpu, node);
8161 
8162             cpu_list_unlock();
8163 
8164             ts = cpu->opaque;
8165             if (ts->child_tidptr) {
8166                 put_user_u32(0, ts->child_tidptr);
8167                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8168                           NULL, NULL, 0);
8169             }
8170             thread_cpu = NULL;
8171             object_unref(OBJECT(cpu));
8172             g_free(ts);
8173             rcu_unregister_thread();
8174             pthread_exit(NULL);
8175         }
8176 
8177         cpu_list_unlock();
8178         preexit_cleanup(cpu_env, arg1);
8179         _exit(arg1);
8180         ret = 0; /* avoid warning */
8181         break;
8182     case TARGET_NR_read:
8183         if (arg3 == 0)
8184             ret = 0;
8185         else {
8186             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8187                 goto efault;
8188             ret = get_errno(safe_read(arg1, p, arg3));
8189             if (ret >= 0 &&
8190                 fd_trans_host_to_target_data(arg1)) {
8191                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8192             }
8193             unlock_user(p, arg2, ret);
8194         }
8195         break;
8196     case TARGET_NR_write:
8197         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8198             goto efault;
8199         if (fd_trans_target_to_host_data(arg1)) {
8200             void *copy = g_malloc(arg3);
8201             memcpy(copy, p, arg3);
8202             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8203             if (ret >= 0) {
8204                 ret = get_errno(safe_write(arg1, copy, ret));
8205             }
8206             g_free(copy);
8207         } else {
8208             ret = get_errno(safe_write(arg1, p, arg3));
8209         }
8210         unlock_user(p, arg2, 0);
8211         break;
8212 #ifdef TARGET_NR_open
8213     case TARGET_NR_open:
8214         if (!(p = lock_user_string(arg1)))
8215             goto efault;
8216         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8217                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8218                                   arg3));
8219         fd_trans_unregister(ret);
8220         unlock_user(p, arg1, 0);
8221         break;
8222 #endif
8223     case TARGET_NR_openat:
8224         if (!(p = lock_user_string(arg2)))
8225             goto efault;
8226         ret = get_errno(do_openat(cpu_env, arg1, p,
8227                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8228                                   arg4));
8229         fd_trans_unregister(ret);
8230         unlock_user(p, arg2, 0);
8231         break;
8232 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8233     case TARGET_NR_name_to_handle_at:
8234         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8235         break;
8236 #endif
8237 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8238     case TARGET_NR_open_by_handle_at:
8239         ret = do_open_by_handle_at(arg1, arg2, arg3);
8240         fd_trans_unregister(ret);
8241         break;
8242 #endif
8243     case TARGET_NR_close:
8244         fd_trans_unregister(arg1);
8245         ret = get_errno(close(arg1));
8246         break;
8247     case TARGET_NR_brk:
8248         ret = do_brk(arg1);
8249         break;
8250 #ifdef TARGET_NR_fork
8251     case TARGET_NR_fork:
8252         ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8253         break;
8254 #endif
8255 #ifdef TARGET_NR_waitpid
8256     case TARGET_NR_waitpid:
8257         {
8258             int status;
8259             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8260             if (!is_error(ret) && arg2 && ret
8261                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8262                 goto efault;
8263         }
8264         break;
8265 #endif
8266 #ifdef TARGET_NR_waitid
8267     case TARGET_NR_waitid:
8268         {
8269             siginfo_t info;
8270             info.si_pid = 0;
8271             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8272             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8273                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8274                     goto efault;
8275                 host_to_target_siginfo(p, &info);
8276                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8277             }
8278         }
8279         break;
8280 #endif
8281 #ifdef TARGET_NR_creat /* not on alpha */
8282     case TARGET_NR_creat:
8283         if (!(p = lock_user_string(arg1)))
8284             goto efault;
8285         ret = get_errno(creat(p, arg2));
8286         fd_trans_unregister(ret);
8287         unlock_user(p, arg1, 0);
8288         break;
8289 #endif
8290 #ifdef TARGET_NR_link
8291     case TARGET_NR_link:
8292         {
8293             void * p2;
8294             p = lock_user_string(arg1);
8295             p2 = lock_user_string(arg2);
8296             if (!p || !p2)
8297                 ret = -TARGET_EFAULT;
8298             else
8299                 ret = get_errno(link(p, p2));
8300             unlock_user(p2, arg2, 0);
8301             unlock_user(p, arg1, 0);
8302         }
8303         break;
8304 #endif
8305 #if defined(TARGET_NR_linkat)
8306     case TARGET_NR_linkat:
8307         {
8308             void * p2 = NULL;
8309             if (!arg2 || !arg4)
8310                 goto efault;
8311             p  = lock_user_string(arg2);
8312             p2 = lock_user_string(arg4);
8313             if (!p || !p2)
8314                 ret = -TARGET_EFAULT;
8315             else
8316                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8317             unlock_user(p, arg2, 0);
8318             unlock_user(p2, arg4, 0);
8319         }
8320         break;
8321 #endif
8322 #ifdef TARGET_NR_unlink
8323     case TARGET_NR_unlink:
8324         if (!(p = lock_user_string(arg1)))
8325             goto efault;
8326         ret = get_errno(unlink(p));
8327         unlock_user(p, arg1, 0);
8328         break;
8329 #endif
8330 #if defined(TARGET_NR_unlinkat)
8331     case TARGET_NR_unlinkat:
8332         if (!(p = lock_user_string(arg2)))
8333             goto efault;
8334         ret = get_errno(unlinkat(arg1, p, arg3));
8335         unlock_user(p, arg2, 0);
8336         break;
8337 #endif
8338     case TARGET_NR_execve:
8339         {
8340             char **argp, **envp;
8341             int argc, envc;
8342             abi_ulong gp;
8343             abi_ulong guest_argp;
8344             abi_ulong guest_envp;
8345             abi_ulong addr;
8346             char **q;
8347             int total_size = 0;
8348 
8349             argc = 0;
8350             guest_argp = arg2;
8351             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8352                 if (get_user_ual(addr, gp))
8353                     goto efault;
8354                 if (!addr)
8355                     break;
8356                 argc++;
8357             }
8358             envc = 0;
8359             guest_envp = arg3;
8360             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8361                 if (get_user_ual(addr, gp))
8362                     goto efault;
8363                 if (!addr)
8364                     break;
8365                 envc++;
8366             }
8367 
8368             argp = g_new0(char *, argc + 1);
8369             envp = g_new0(char *, envc + 1);
8370 
8371             for (gp = guest_argp, q = argp; gp;
8372                   gp += sizeof(abi_ulong), q++) {
8373                 if (get_user_ual(addr, gp))
8374                     goto execve_efault;
8375                 if (!addr)
8376                     break;
8377                 if (!(*q = lock_user_string(addr)))
8378                     goto execve_efault;
8379                 total_size += strlen(*q) + 1;
8380             }
8381             *q = NULL;
8382 
8383             for (gp = guest_envp, q = envp; gp;
8384                   gp += sizeof(abi_ulong), q++) {
8385                 if (get_user_ual(addr, gp))
8386                     goto execve_efault;
8387                 if (!addr)
8388                     break;
8389                 if (!(*q = lock_user_string(addr)))
8390                     goto execve_efault;
8391                 total_size += strlen(*q) + 1;
8392             }
8393             *q = NULL;
8394 
8395             if (!(p = lock_user_string(arg1)))
8396                 goto execve_efault;
8397             /* Although execve() is not an interruptible syscall it is
8398              * a special case where we must use the safe_syscall wrapper:
8399              * if we allow a signal to happen before we make the host
8400              * syscall then we will 'lose' it, because at the point of
8401              * execve the process leaves QEMU's control. So we use the
8402              * safe syscall wrapper to ensure that we either take the
8403              * signal as a guest signal, or else it does not happen
8404              * before the execve completes and makes it the other
8405              * program's problem.
8406              */
8407             ret = get_errno(safe_execve(p, argp, envp));
8408             unlock_user(p, arg1, 0);
8409 
8410             goto execve_end;
8411 
8412         execve_efault:
8413             ret = -TARGET_EFAULT;
8414 
8415         execve_end:
8416             for (gp = guest_argp, q = argp; *q;
8417                   gp += sizeof(abi_ulong), q++) {
8418                 if (get_user_ual(addr, gp)
8419                     || !addr)
8420                     break;
8421                 unlock_user(*q, addr, 0);
8422             }
8423             for (gp = guest_envp, q = envp; *q;
8424                   gp += sizeof(abi_ulong), q++) {
8425                 if (get_user_ual(addr, gp)
8426                     || !addr)
8427                     break;
8428                 unlock_user(*q, addr, 0);
8429             }
8430 
8431             g_free(argp);
8432             g_free(envp);
8433         }
8434         break;
8435     case TARGET_NR_chdir:
8436         if (!(p = lock_user_string(arg1)))
8437             goto efault;
8438         ret = get_errno(chdir(p));
8439         unlock_user(p, arg1, 0);
8440         break;
8441 #ifdef TARGET_NR_time
8442     case TARGET_NR_time:
8443         {
8444             time_t host_time;
8445             ret = get_errno(time(&host_time));
8446             if (!is_error(ret)
8447                 && arg1
8448                 && put_user_sal(host_time, arg1))
8449                 goto efault;
8450         }
8451         break;
8452 #endif
8453 #ifdef TARGET_NR_mknod
8454     case TARGET_NR_mknod:
8455         if (!(p = lock_user_string(arg1)))
8456             goto efault;
8457         ret = get_errno(mknod(p, arg2, arg3));
8458         unlock_user(p, arg1, 0);
8459         break;
8460 #endif
8461 #if defined(TARGET_NR_mknodat)
8462     case TARGET_NR_mknodat:
8463         if (!(p = lock_user_string(arg2)))
8464             goto efault;
8465         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8466         unlock_user(p, arg2, 0);
8467         break;
8468 #endif
8469 #ifdef TARGET_NR_chmod
8470     case TARGET_NR_chmod:
8471         if (!(p = lock_user_string(arg1)))
8472             goto efault;
8473         ret = get_errno(chmod(p, arg2));
8474         unlock_user(p, arg1, 0);
8475         break;
8476 #endif
8477 #ifdef TARGET_NR_break
8478     case TARGET_NR_break:
8479         goto unimplemented;
8480 #endif
8481 #ifdef TARGET_NR_oldstat
8482     case TARGET_NR_oldstat:
8483         goto unimplemented;
8484 #endif
8485 #ifdef TARGET_NR_lseek
8486     case TARGET_NR_lseek:
8487         ret = get_errno(lseek(arg1, arg2, arg3));
8488         break;
8489 #endif
8490 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8491     /* Alpha specific */
8492     case TARGET_NR_getxpid:
8493         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8494         ret = get_errno(getpid());
8495         break;
8496 #endif
8497 #ifdef TARGET_NR_getpid
8498     case TARGET_NR_getpid:
8499         ret = get_errno(getpid());
8500         break;
8501 #endif
8502     case TARGET_NR_mount:
8503         {
8504             /* need to look at the data field */
8505             void *p2, *p3;
8506 
8507             if (arg1) {
8508                 p = lock_user_string(arg1);
8509                 if (!p) {
8510                     goto efault;
8511                 }
8512             } else {
8513                 p = NULL;
8514             }
8515 
8516             p2 = lock_user_string(arg2);
8517             if (!p2) {
8518                 if (arg1) {
8519                     unlock_user(p, arg1, 0);
8520                 }
8521                 goto efault;
8522             }
8523 
8524             if (arg3) {
8525                 p3 = lock_user_string(arg3);
8526                 if (!p3) {
8527                     if (arg1) {
8528                         unlock_user(p, arg1, 0);
8529                     }
8530                     unlock_user(p2, arg2, 0);
8531                     goto efault;
8532                 }
8533             } else {
8534                 p3 = NULL;
8535             }
8536 
8537             /* FIXME - arg5 should be locked, but it isn't clear how to
8538              * do that since it's not guaranteed to be a NULL-terminated
8539              * string.
8540              */
8541             if (!arg5) {
8542                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8543             } else {
8544                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8545             }
8546             ret = get_errno(ret);
8547 
8548             if (arg1) {
8549                 unlock_user(p, arg1, 0);
8550             }
8551             unlock_user(p2, arg2, 0);
8552             if (arg3) {
8553                 unlock_user(p3, arg3, 0);
8554             }
8555         }
8556         break;
8557 #ifdef TARGET_NR_umount
8558     case TARGET_NR_umount:
8559         if (!(p = lock_user_string(arg1)))
8560             goto efault;
8561         ret = get_errno(umount(p));
8562         unlock_user(p, arg1, 0);
8563         break;
8564 #endif
8565 #ifdef TARGET_NR_stime /* not on alpha */
8566     case TARGET_NR_stime:
8567         {
8568             time_t host_time;
8569             if (get_user_sal(host_time, arg1))
8570                 goto efault;
8571             ret = get_errno(stime(&host_time));
8572         }
8573         break;
8574 #endif
8575     case TARGET_NR_ptrace:
8576         goto unimplemented;
8577 #ifdef TARGET_NR_alarm /* not on alpha */
8578     case TARGET_NR_alarm:
8579         ret = alarm(arg1);
8580         break;
8581 #endif
8582 #ifdef TARGET_NR_oldfstat
8583     case TARGET_NR_oldfstat:
8584         goto unimplemented;
8585 #endif
8586 #ifdef TARGET_NR_pause /* not on alpha */
8587     case TARGET_NR_pause:
8588         if (!block_signals()) {
8589             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8590         }
8591         ret = -TARGET_EINTR;
8592         break;
8593 #endif
8594 #ifdef TARGET_NR_utime
8595     case TARGET_NR_utime:
8596         {
8597             struct utimbuf tbuf, *host_tbuf;
8598             struct target_utimbuf *target_tbuf;
8599             if (arg2) {
8600                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8601                     goto efault;
8602                 tbuf.actime = tswapal(target_tbuf->actime);
8603                 tbuf.modtime = tswapal(target_tbuf->modtime);
8604                 unlock_user_struct(target_tbuf, arg2, 0);
8605                 host_tbuf = &tbuf;
8606             } else {
8607                 host_tbuf = NULL;
8608             }
8609             if (!(p = lock_user_string(arg1)))
8610                 goto efault;
8611             ret = get_errno(utime(p, host_tbuf));
8612             unlock_user(p, arg1, 0);
8613         }
8614         break;
8615 #endif
8616 #ifdef TARGET_NR_utimes
8617     case TARGET_NR_utimes:
8618         {
8619             struct timeval *tvp, tv[2];
8620             if (arg2) {
8621                 if (copy_from_user_timeval(&tv[0], arg2)
8622                     || copy_from_user_timeval(&tv[1],
8623                                               arg2 + sizeof(struct target_timeval)))
8624                     goto efault;
8625                 tvp = tv;
8626             } else {
8627                 tvp = NULL;
8628             }
8629             if (!(p = lock_user_string(arg1)))
8630                 goto efault;
8631             ret = get_errno(utimes(p, tvp));
8632             unlock_user(p, arg1, 0);
8633         }
8634         break;
8635 #endif
8636 #if defined(TARGET_NR_futimesat)
8637     case TARGET_NR_futimesat:
8638         {
8639             struct timeval *tvp, tv[2];
8640             if (arg3) {
8641                 if (copy_from_user_timeval(&tv[0], arg3)
8642                     || copy_from_user_timeval(&tv[1],
8643                                               arg3 + sizeof(struct target_timeval)))
8644                     goto efault;
8645                 tvp = tv;
8646             } else {
8647                 tvp = NULL;
8648             }
8649             if (!(p = lock_user_string(arg2)))
8650                 goto efault;
8651             ret = get_errno(futimesat(arg1, path(p), tvp));
8652             unlock_user(p, arg2, 0);
8653         }
8654         break;
8655 #endif
8656 #ifdef TARGET_NR_stty
8657     case TARGET_NR_stty:
8658         goto unimplemented;
8659 #endif
8660 #ifdef TARGET_NR_gtty
8661     case TARGET_NR_gtty:
8662         goto unimplemented;
8663 #endif
8664 #ifdef TARGET_NR_access
8665     case TARGET_NR_access:
8666         if (!(p = lock_user_string(arg1)))
8667             goto efault;
8668         ret = get_errno(access(path(p), arg2));
8669         unlock_user(p, arg1, 0);
8670         break;
8671 #endif
8672 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8673     case TARGET_NR_faccessat:
8674         if (!(p = lock_user_string(arg2)))
8675             goto efault;
8676         ret = get_errno(faccessat(arg1, p, arg3, 0));
8677         unlock_user(p, arg2, 0);
8678         break;
8679 #endif
8680 #ifdef TARGET_NR_nice /* not on alpha */
8681     case TARGET_NR_nice:
8682         ret = get_errno(nice(arg1));
8683         break;
8684 #endif
8685 #ifdef TARGET_NR_ftime
8686     case TARGET_NR_ftime:
8687         goto unimplemented;
8688 #endif
8689     case TARGET_NR_sync:
8690         sync();
8691         ret = 0;
8692         break;
8693 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8694     case TARGET_NR_syncfs:
8695         ret = get_errno(syncfs(arg1));
8696         break;
8697 #endif
8698     case TARGET_NR_kill:
8699         ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8700         break;
8701 #ifdef TARGET_NR_rename
8702     case TARGET_NR_rename:
8703         {
8704             void *p2;
8705             p = lock_user_string(arg1);
8706             p2 = lock_user_string(arg2);
8707             if (!p || !p2)
8708                 ret = -TARGET_EFAULT;
8709             else
8710                 ret = get_errno(rename(p, p2));
8711             unlock_user(p2, arg2, 0);
8712             unlock_user(p, arg1, 0);
8713         }
8714         break;
8715 #endif
8716 #if defined(TARGET_NR_renameat)
8717     case TARGET_NR_renameat:
8718         {
8719             void *p2;
8720             p  = lock_user_string(arg2);
8721             p2 = lock_user_string(arg4);
8722             if (!p || !p2)
8723                 ret = -TARGET_EFAULT;
8724             else
8725                 ret = get_errno(renameat(arg1, p, arg3, p2));
8726             unlock_user(p2, arg4, 0);
8727             unlock_user(p, arg2, 0);
8728         }
8729         break;
8730 #endif
8731 #if defined(TARGET_NR_renameat2)
8732     case TARGET_NR_renameat2:
8733         {
8734             void *p2;
8735             p  = lock_user_string(arg2);
8736             p2 = lock_user_string(arg4);
8737             if (!p || !p2) {
8738                 ret = -TARGET_EFAULT;
8739             } else {
8740                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8741             }
8742             unlock_user(p2, arg4, 0);
8743             unlock_user(p, arg2, 0);
8744         }
8745         break;
8746 #endif
8747 #ifdef TARGET_NR_mkdir
8748     case TARGET_NR_mkdir:
8749         if (!(p = lock_user_string(arg1)))
8750             goto efault;
8751         ret = get_errno(mkdir(p, arg2));
8752         unlock_user(p, arg1, 0);
8753         break;
8754 #endif
8755 #if defined(TARGET_NR_mkdirat)
8756     case TARGET_NR_mkdirat:
8757         if (!(p = lock_user_string(arg2)))
8758             goto efault;
8759         ret = get_errno(mkdirat(arg1, p, arg3));
8760         unlock_user(p, arg2, 0);
8761         break;
8762 #endif
8763 #ifdef TARGET_NR_rmdir
8764     case TARGET_NR_rmdir:
8765         if (!(p = lock_user_string(arg1)))
8766             goto efault;
8767         ret = get_errno(rmdir(p));
8768         unlock_user(p, arg1, 0);
8769         break;
8770 #endif
8771     case TARGET_NR_dup:
8772         ret = get_errno(dup(arg1));
8773         if (ret >= 0) {
8774             fd_trans_dup(arg1, ret);
8775         }
8776         break;
8777 #ifdef TARGET_NR_pipe
8778     case TARGET_NR_pipe:
8779         ret = do_pipe(cpu_env, arg1, 0, 0);
8780         break;
8781 #endif
8782 #ifdef TARGET_NR_pipe2
8783     case TARGET_NR_pipe2:
8784         ret = do_pipe(cpu_env, arg1,
8785                       target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8786         break;
8787 #endif
8788     case TARGET_NR_times:
8789         {
8790             struct target_tms *tmsp;
8791             struct tms tms;
8792             ret = get_errno(times(&tms));
8793             if (arg1) {
8794                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8795                 if (!tmsp)
8796                     goto efault;
8797                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8798                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8799                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8800                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8801             }
8802             if (!is_error(ret))
8803                 ret = host_to_target_clock_t(ret);
8804         }
8805         break;
8806 #ifdef TARGET_NR_prof
8807     case TARGET_NR_prof:
8808         goto unimplemented;
8809 #endif
8810 #ifdef TARGET_NR_signal
8811     case TARGET_NR_signal:
8812         goto unimplemented;
8813 #endif
8814     case TARGET_NR_acct:
8815         if (arg1 == 0) {
8816             ret = get_errno(acct(NULL));
8817         } else {
8818             if (!(p = lock_user_string(arg1)))
8819                 goto efault;
8820             ret = get_errno(acct(path(p)));
8821             unlock_user(p, arg1, 0);
8822         }
8823         break;
8824 #ifdef TARGET_NR_umount2
8825     case TARGET_NR_umount2:
8826         if (!(p = lock_user_string(arg1)))
8827             goto efault;
8828         ret = get_errno(umount2(p, arg2));
8829         unlock_user(p, arg1, 0);
8830         break;
8831 #endif
8832 #ifdef TARGET_NR_lock
8833     case TARGET_NR_lock:
8834         goto unimplemented;
8835 #endif
8836     case TARGET_NR_ioctl:
8837         ret = do_ioctl(arg1, arg2, arg3);
8838         break;
8839 #ifdef TARGET_NR_fcntl
8840     case TARGET_NR_fcntl:
8841         ret = do_fcntl(arg1, arg2, arg3);
8842         break;
8843 #endif
8844 #ifdef TARGET_NR_mpx
8845     case TARGET_NR_mpx:
8846         goto unimplemented;
8847 #endif
8848     case TARGET_NR_setpgid:
8849         ret = get_errno(setpgid(arg1, arg2));
8850         break;
8851 #ifdef TARGET_NR_ulimit
8852     case TARGET_NR_ulimit:
8853         goto unimplemented;
8854 #endif
8855 #ifdef TARGET_NR_oldolduname
8856     case TARGET_NR_oldolduname:
8857         goto unimplemented;
8858 #endif
8859     case TARGET_NR_umask:
8860         ret = get_errno(umask(arg1));
8861         break;
8862     case TARGET_NR_chroot:
8863         if (!(p = lock_user_string(arg1)))
8864             goto efault;
8865         ret = get_errno(chroot(p));
8866         unlock_user(p, arg1, 0);
8867         break;
8868 #ifdef TARGET_NR_ustat
8869     case TARGET_NR_ustat:
8870         goto unimplemented;
8871 #endif
8872 #ifdef TARGET_NR_dup2
8873     case TARGET_NR_dup2:
8874         ret = get_errno(dup2(arg1, arg2));
8875         if (ret >= 0) {
8876             fd_trans_dup(arg1, arg2);
8877         }
8878         break;
8879 #endif
8880 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8881     case TARGET_NR_dup3:
8882     {
8883         int host_flags;
8884 
8885         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8886             return -EINVAL;
8887         }
8888         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8889         ret = get_errno(dup3(arg1, arg2, host_flags));
8890         if (ret >= 0) {
8891             fd_trans_dup(arg1, arg2);
8892         }
8893         break;
8894     }
8895 #endif
8896 #ifdef TARGET_NR_getppid /* not on alpha */
8897     case TARGET_NR_getppid:
8898         ret = get_errno(getppid());
8899         break;
8900 #endif
8901 #ifdef TARGET_NR_getpgrp
8902     case TARGET_NR_getpgrp:
8903         ret = get_errno(getpgrp());
8904         break;
8905 #endif
8906     case TARGET_NR_setsid:
8907         ret = get_errno(setsid());
8908         break;
8909 #ifdef TARGET_NR_sigaction
8910     case TARGET_NR_sigaction:
8911         {
8912 #if defined(TARGET_ALPHA)
8913             struct target_sigaction act, oact, *pact = 0;
8914             struct target_old_sigaction *old_act;
8915             if (arg2) {
8916                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8917                     goto efault;
8918                 act._sa_handler = old_act->_sa_handler;
8919                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8920                 act.sa_flags = old_act->sa_flags;
8921                 act.sa_restorer = 0;
8922                 unlock_user_struct(old_act, arg2, 0);
8923                 pact = &act;
8924             }
8925             ret = get_errno(do_sigaction(arg1, pact, &oact));
8926             if (!is_error(ret) && arg3) {
8927                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8928                     goto efault;
8929                 old_act->_sa_handler = oact._sa_handler;
8930                 old_act->sa_mask = oact.sa_mask.sig[0];
8931                 old_act->sa_flags = oact.sa_flags;
8932                 unlock_user_struct(old_act, arg3, 1);
8933             }
8934 #elif defined(TARGET_MIPS)
8935 	    struct target_sigaction act, oact, *pact, *old_act;
8936 
8937 	    if (arg2) {
8938                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8939                     goto efault;
8940 		act._sa_handler = old_act->_sa_handler;
8941 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8942 		act.sa_flags = old_act->sa_flags;
8943 		unlock_user_struct(old_act, arg2, 0);
8944 		pact = &act;
8945 	    } else {
8946 		pact = NULL;
8947 	    }
8948 
8949 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8950 
8951 	    if (!is_error(ret) && arg3) {
8952                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8953                     goto efault;
8954 		old_act->_sa_handler = oact._sa_handler;
8955 		old_act->sa_flags = oact.sa_flags;
8956 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8957 		old_act->sa_mask.sig[1] = 0;
8958 		old_act->sa_mask.sig[2] = 0;
8959 		old_act->sa_mask.sig[3] = 0;
8960 		unlock_user_struct(old_act, arg3, 1);
8961 	    }
8962 #else
8963             struct target_old_sigaction *old_act;
8964             struct target_sigaction act, oact, *pact;
8965             if (arg2) {
8966                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8967                     goto efault;
8968                 act._sa_handler = old_act->_sa_handler;
8969                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8970                 act.sa_flags = old_act->sa_flags;
8971                 act.sa_restorer = old_act->sa_restorer;
8972 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8973                 act.ka_restorer = 0;
8974 #endif
8975                 unlock_user_struct(old_act, arg2, 0);
8976                 pact = &act;
8977             } else {
8978                 pact = NULL;
8979             }
8980             ret = get_errno(do_sigaction(arg1, pact, &oact));
8981             if (!is_error(ret) && arg3) {
8982                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8983                     goto efault;
8984                 old_act->_sa_handler = oact._sa_handler;
8985                 old_act->sa_mask = oact.sa_mask.sig[0];
8986                 old_act->sa_flags = oact.sa_flags;
8987                 old_act->sa_restorer = oact.sa_restorer;
8988                 unlock_user_struct(old_act, arg3, 1);
8989             }
8990 #endif
8991         }
8992         break;
8993 #endif
8994     case TARGET_NR_rt_sigaction:
8995         {
8996 #if defined(TARGET_ALPHA)
8997             /* For Alpha and SPARC this is a 5 argument syscall, with
8998              * a 'restorer' parameter which must be copied into the
8999              * sa_restorer field of the sigaction struct.
9000              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9001              * and arg5 is the sigsetsize.
9002              * Alpha also has a separate rt_sigaction struct that it uses
9003              * here; SPARC uses the usual sigaction struct.
9004              */
9005             struct target_rt_sigaction *rt_act;
9006             struct target_sigaction act, oact, *pact = 0;
9007 
9008             if (arg4 != sizeof(target_sigset_t)) {
9009                 ret = -TARGET_EINVAL;
9010                 break;
9011             }
9012             if (arg2) {
9013                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
9014                     goto efault;
9015                 act._sa_handler = rt_act->_sa_handler;
9016                 act.sa_mask = rt_act->sa_mask;
9017                 act.sa_flags = rt_act->sa_flags;
9018                 act.sa_restorer = arg5;
9019                 unlock_user_struct(rt_act, arg2, 0);
9020                 pact = &act;
9021             }
9022             ret = get_errno(do_sigaction(arg1, pact, &oact));
9023             if (!is_error(ret) && arg3) {
9024                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
9025                     goto efault;
9026                 rt_act->_sa_handler = oact._sa_handler;
9027                 rt_act->sa_mask = oact.sa_mask;
9028                 rt_act->sa_flags = oact.sa_flags;
9029                 unlock_user_struct(rt_act, arg3, 1);
9030             }
9031 #else
9032 #ifdef TARGET_SPARC
9033             target_ulong restorer = arg4;
9034             target_ulong sigsetsize = arg5;
9035 #else
9036             target_ulong sigsetsize = arg4;
9037 #endif
9038             struct target_sigaction *act;
9039             struct target_sigaction *oact;
9040 
9041             if (sigsetsize != sizeof(target_sigset_t)) {
9042                 ret = -TARGET_EINVAL;
9043                 break;
9044             }
9045             if (arg2) {
9046                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9047                     goto efault;
9048                 }
9049 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9050                 act->ka_restorer = restorer;
9051 #endif
9052             } else {
9053                 act = NULL;
9054             }
9055             if (arg3) {
9056                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9057                     ret = -TARGET_EFAULT;
9058                     goto rt_sigaction_fail;
9059                 }
9060             } else
9061                 oact = NULL;
9062             ret = get_errno(do_sigaction(arg1, act, oact));
9063 	rt_sigaction_fail:
9064             if (act)
9065                 unlock_user_struct(act, arg2, 0);
9066             if (oact)
9067                 unlock_user_struct(oact, arg3, 1);
9068 #endif
9069         }
9070         break;
9071 #ifdef TARGET_NR_sgetmask /* not on alpha */
9072     case TARGET_NR_sgetmask:
9073         {
9074             sigset_t cur_set;
9075             abi_ulong target_set;
9076             ret = do_sigprocmask(0, NULL, &cur_set);
9077             if (!ret) {
9078                 host_to_target_old_sigset(&target_set, &cur_set);
9079                 ret = target_set;
9080             }
9081         }
9082         break;
9083 #endif
9084 #ifdef TARGET_NR_ssetmask /* not on alpha */
9085     case TARGET_NR_ssetmask:
9086         {
9087             sigset_t set, oset;
9088             abi_ulong target_set = arg1;
9089             target_to_host_old_sigset(&set, &target_set);
9090             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9091             if (!ret) {
9092                 host_to_target_old_sigset(&target_set, &oset);
9093                 ret = target_set;
9094             }
9095         }
9096         break;
9097 #endif
9098 #ifdef TARGET_NR_sigprocmask
9099     case TARGET_NR_sigprocmask:
9100         {
9101 #if defined(TARGET_ALPHA)
9102             sigset_t set, oldset;
9103             abi_ulong mask;
9104             int how;
9105 
9106             switch (arg1) {
9107             case TARGET_SIG_BLOCK:
9108                 how = SIG_BLOCK;
9109                 break;
9110             case TARGET_SIG_UNBLOCK:
9111                 how = SIG_UNBLOCK;
9112                 break;
9113             case TARGET_SIG_SETMASK:
9114                 how = SIG_SETMASK;
9115                 break;
9116             default:
9117                 ret = -TARGET_EINVAL;
9118                 goto fail;
9119             }
9120             mask = arg2;
9121             target_to_host_old_sigset(&set, &mask);
9122 
9123             ret = do_sigprocmask(how, &set, &oldset);
9124             if (!is_error(ret)) {
9125                 host_to_target_old_sigset(&mask, &oldset);
9126                 ret = mask;
9127                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9128             }
9129 #else
9130             sigset_t set, oldset, *set_ptr;
9131             int how;
9132 
9133             if (arg2) {
9134                 switch (arg1) {
9135                 case TARGET_SIG_BLOCK:
9136                     how = SIG_BLOCK;
9137                     break;
9138                 case TARGET_SIG_UNBLOCK:
9139                     how = SIG_UNBLOCK;
9140                     break;
9141                 case TARGET_SIG_SETMASK:
9142                     how = SIG_SETMASK;
9143                     break;
9144                 default:
9145                     ret = -TARGET_EINVAL;
9146                     goto fail;
9147                 }
9148                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9149                     goto efault;
9150                 target_to_host_old_sigset(&set, p);
9151                 unlock_user(p, arg2, 0);
9152                 set_ptr = &set;
9153             } else {
9154                 how = 0;
9155                 set_ptr = NULL;
9156             }
9157             ret = do_sigprocmask(how, set_ptr, &oldset);
9158             if (!is_error(ret) && arg3) {
9159                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9160                     goto efault;
9161                 host_to_target_old_sigset(p, &oldset);
9162                 unlock_user(p, arg3, sizeof(target_sigset_t));
9163             }
9164 #endif
9165         }
9166         break;
9167 #endif
9168     case TARGET_NR_rt_sigprocmask:
9169         {
9170             int how = arg1;
9171             sigset_t set, oldset, *set_ptr;
9172 
9173             if (arg4 != sizeof(target_sigset_t)) {
9174                 ret = -TARGET_EINVAL;
9175                 break;
9176             }
9177 
9178             if (arg2) {
9179                 switch(how) {
9180                 case TARGET_SIG_BLOCK:
9181                     how = SIG_BLOCK;
9182                     break;
9183                 case TARGET_SIG_UNBLOCK:
9184                     how = SIG_UNBLOCK;
9185                     break;
9186                 case TARGET_SIG_SETMASK:
9187                     how = SIG_SETMASK;
9188                     break;
9189                 default:
9190                     ret = -TARGET_EINVAL;
9191                     goto fail;
9192                 }
9193                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9194                     goto efault;
9195                 target_to_host_sigset(&set, p);
9196                 unlock_user(p, arg2, 0);
9197                 set_ptr = &set;
9198             } else {
9199                 how = 0;
9200                 set_ptr = NULL;
9201             }
9202             ret = do_sigprocmask(how, set_ptr, &oldset);
9203             if (!is_error(ret) && arg3) {
9204                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9205                     goto efault;
9206                 host_to_target_sigset(p, &oldset);
9207                 unlock_user(p, arg3, sizeof(target_sigset_t));
9208             }
9209         }
9210         break;
9211 #ifdef TARGET_NR_sigpending
9212     case TARGET_NR_sigpending:
9213         {
9214             sigset_t set;
9215             ret = get_errno(sigpending(&set));
9216             if (!is_error(ret)) {
9217                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9218                     goto efault;
9219                 host_to_target_old_sigset(p, &set);
9220                 unlock_user(p, arg1, sizeof(target_sigset_t));
9221             }
9222         }
9223         break;
9224 #endif
9225     case TARGET_NR_rt_sigpending:
9226         {
9227             sigset_t set;
9228 
9229             /* Yes, this check is >, not != like most. We follow the kernel's
9230              * logic and it does it like this because it implements
9231              * NR_sigpending through the same code path, and in that case
9232              * the old_sigset_t is smaller in size.
9233              */
9234             if (arg2 > sizeof(target_sigset_t)) {
9235                 ret = -TARGET_EINVAL;
9236                 break;
9237             }
9238 
9239             ret = get_errno(sigpending(&set));
9240             if (!is_error(ret)) {
9241                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9242                     goto efault;
9243                 host_to_target_sigset(p, &set);
9244                 unlock_user(p, arg1, sizeof(target_sigset_t));
9245             }
9246         }
9247         break;
9248 #ifdef TARGET_NR_sigsuspend
9249     case TARGET_NR_sigsuspend:
9250         {
9251             TaskState *ts = cpu->opaque;
9252 #if defined(TARGET_ALPHA)
9253             abi_ulong mask = arg1;
9254             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9255 #else
9256             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9257                 goto efault;
9258             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9259             unlock_user(p, arg1, 0);
9260 #endif
9261             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9262                                                SIGSET_T_SIZE));
9263             if (ret != -TARGET_ERESTARTSYS) {
9264                 ts->in_sigsuspend = 1;
9265             }
9266         }
9267         break;
9268 #endif
9269     case TARGET_NR_rt_sigsuspend:
9270         {
9271             TaskState *ts = cpu->opaque;
9272 
9273             if (arg2 != sizeof(target_sigset_t)) {
9274                 ret = -TARGET_EINVAL;
9275                 break;
9276             }
9277             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9278                 goto efault;
9279             target_to_host_sigset(&ts->sigsuspend_mask, p);
9280             unlock_user(p, arg1, 0);
9281             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9282                                                SIGSET_T_SIZE));
9283             if (ret != -TARGET_ERESTARTSYS) {
9284                 ts->in_sigsuspend = 1;
9285             }
9286         }
9287         break;
9288     case TARGET_NR_rt_sigtimedwait:
9289         {
9290             sigset_t set;
9291             struct timespec uts, *puts;
9292             siginfo_t uinfo;
9293 
9294             if (arg4 != sizeof(target_sigset_t)) {
9295                 ret = -TARGET_EINVAL;
9296                 break;
9297             }
9298 
9299             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9300                 goto efault;
9301             target_to_host_sigset(&set, p);
9302             unlock_user(p, arg1, 0);
9303             if (arg3) {
9304                 puts = &uts;
9305                 target_to_host_timespec(puts, arg3);
9306             } else {
9307                 puts = NULL;
9308             }
9309             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9310                                                  SIGSET_T_SIZE));
9311             if (!is_error(ret)) {
9312                 if (arg2) {
9313                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9314                                   0);
9315                     if (!p) {
9316                         goto efault;
9317                     }
9318                     host_to_target_siginfo(p, &uinfo);
9319                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9320                 }
9321                 ret = host_to_target_signal(ret);
9322             }
9323         }
9324         break;
9325     case TARGET_NR_rt_sigqueueinfo:
9326         {
9327             siginfo_t uinfo;
9328 
9329             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9330             if (!p) {
9331                 goto efault;
9332             }
9333             target_to_host_siginfo(&uinfo, p);
9334             unlock_user(p, arg3, 0);
9335             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9336         }
9337         break;
9338     case TARGET_NR_rt_tgsigqueueinfo:
9339         {
9340             siginfo_t uinfo;
9341 
9342             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9343             if (!p) {
9344                 goto efault;
9345             }
9346             target_to_host_siginfo(&uinfo, p);
9347             unlock_user(p, arg4, 0);
9348             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9349         }
9350         break;
9351 #ifdef TARGET_NR_sigreturn
9352     case TARGET_NR_sigreturn:
9353         if (block_signals()) {
9354             ret = -TARGET_ERESTARTSYS;
9355         } else {
9356             ret = do_sigreturn(cpu_env);
9357         }
9358         break;
9359 #endif
9360     case TARGET_NR_rt_sigreturn:
9361         if (block_signals()) {
9362             ret = -TARGET_ERESTARTSYS;
9363         } else {
9364             ret = do_rt_sigreturn(cpu_env);
9365         }
9366         break;
9367     case TARGET_NR_sethostname:
9368         if (!(p = lock_user_string(arg1)))
9369             goto efault;
9370         ret = get_errno(sethostname(p, arg2));
9371         unlock_user(p, arg1, 0);
9372         break;
9373 #ifdef TARGET_NR_setrlimit
9374     case TARGET_NR_setrlimit:
9375         {
9376             int resource = target_to_host_resource(arg1);
9377             struct target_rlimit *target_rlim;
9378             struct rlimit rlim;
9379             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9380                 goto efault;
9381             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9382             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9383             unlock_user_struct(target_rlim, arg2, 0);
9384             ret = get_errno(setrlimit(resource, &rlim));
9385         }
9386         break;
9387 #endif
9388 #ifdef TARGET_NR_getrlimit
9389     case TARGET_NR_getrlimit:
9390         {
9391             int resource = target_to_host_resource(arg1);
9392             struct target_rlimit *target_rlim;
9393             struct rlimit rlim;
9394 
9395             ret = get_errno(getrlimit(resource, &rlim));
9396             if (!is_error(ret)) {
9397                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9398                     goto efault;
9399                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9400                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9401                 unlock_user_struct(target_rlim, arg2, 1);
9402             }
9403         }
9404         break;
9405 #endif
9406     case TARGET_NR_getrusage:
9407         {
9408             struct rusage rusage;
9409             ret = get_errno(getrusage(arg1, &rusage));
9410             if (!is_error(ret)) {
9411                 ret = host_to_target_rusage(arg2, &rusage);
9412             }
9413         }
9414         break;
9415     case TARGET_NR_gettimeofday:
9416         {
9417             struct timeval tv;
9418             ret = get_errno(gettimeofday(&tv, NULL));
9419             if (!is_error(ret)) {
9420                 if (copy_to_user_timeval(arg1, &tv))
9421                     goto efault;
9422             }
9423         }
9424         break;
9425     case TARGET_NR_settimeofday:
9426         {
9427             struct timeval tv, *ptv = NULL;
9428             struct timezone tz, *ptz = NULL;
9429 
9430             if (arg1) {
9431                 if (copy_from_user_timeval(&tv, arg1)) {
9432                     goto efault;
9433                 }
9434                 ptv = &tv;
9435             }
9436 
9437             if (arg2) {
9438                 if (copy_from_user_timezone(&tz, arg2)) {
9439                     goto efault;
9440                 }
9441                 ptz = &tz;
9442             }
9443 
9444             ret = get_errno(settimeofday(ptv, ptz));
9445         }
9446         break;
9447 #if defined(TARGET_NR_select)
9448     case TARGET_NR_select:
9449 #if defined(TARGET_WANT_NI_OLD_SELECT)
9450         /* some architectures used to have old_select here
9451          * but now ENOSYS it.
9452          */
9453         ret = -TARGET_ENOSYS;
9454 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9455         ret = do_old_select(arg1);
9456 #else
9457         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9458 #endif
9459         break;
9460 #endif
9461 #ifdef TARGET_NR_pselect6
9462     case TARGET_NR_pselect6:
9463         {
9464             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9465             fd_set rfds, wfds, efds;
9466             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9467             struct timespec ts, *ts_ptr;
9468 
9469             /*
9470              * The 6th arg is actually two args smashed together,
9471              * so we cannot use the C library.
9472              */
9473             sigset_t set;
9474             struct {
9475                 sigset_t *set;
9476                 size_t size;
9477             } sig, *sig_ptr;
9478 
9479             abi_ulong arg_sigset, arg_sigsize, *arg7;
9480             target_sigset_t *target_sigset;
9481 
9482             n = arg1;
9483             rfd_addr = arg2;
9484             wfd_addr = arg3;
9485             efd_addr = arg4;
9486             ts_addr = arg5;
9487 
9488             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9489             if (ret) {
9490                 goto fail;
9491             }
9492             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9493             if (ret) {
9494                 goto fail;
9495             }
9496             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9497             if (ret) {
9498                 goto fail;
9499             }
9500 
9501             /*
9502              * This takes a timespec, and not a timeval, so we cannot
9503              * use the do_select() helper ...
9504              */
9505             if (ts_addr) {
9506                 if (target_to_host_timespec(&ts, ts_addr)) {
9507                     goto efault;
9508                 }
9509                 ts_ptr = &ts;
9510             } else {
9511                 ts_ptr = NULL;
9512             }
9513 
9514             /* Extract the two packed args for the sigset */
9515             if (arg6) {
9516                 sig_ptr = &sig;
9517                 sig.size = SIGSET_T_SIZE;
9518 
9519                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9520                 if (!arg7) {
9521                     goto efault;
9522                 }
9523                 arg_sigset = tswapal(arg7[0]);
9524                 arg_sigsize = tswapal(arg7[1]);
9525                 unlock_user(arg7, arg6, 0);
9526 
9527                 if (arg_sigset) {
9528                     sig.set = &set;
9529                     if (arg_sigsize != sizeof(*target_sigset)) {
9530                         /* Like the kernel, we enforce correct size sigsets */
9531                         ret = -TARGET_EINVAL;
9532                         goto fail;
9533                     }
9534                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
9535                                               sizeof(*target_sigset), 1);
9536                     if (!target_sigset) {
9537                         goto efault;
9538                     }
9539                     target_to_host_sigset(&set, target_sigset);
9540                     unlock_user(target_sigset, arg_sigset, 0);
9541                 } else {
9542                     sig.set = NULL;
9543                 }
9544             } else {
9545                 sig_ptr = NULL;
9546             }
9547 
9548             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9549                                           ts_ptr, sig_ptr));
9550 
9551             if (!is_error(ret)) {
9552                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9553                     goto efault;
9554                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9555                     goto efault;
9556                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9557                     goto efault;
9558 
9559                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9560                     goto efault;
9561             }
9562         }
9563         break;
9564 #endif
9565 #ifdef TARGET_NR_symlink
9566     case TARGET_NR_symlink:
9567         {
9568             void *p2;
9569             p = lock_user_string(arg1);
9570             p2 = lock_user_string(arg2);
9571             if (!p || !p2)
9572                 ret = -TARGET_EFAULT;
9573             else
9574                 ret = get_errno(symlink(p, p2));
9575             unlock_user(p2, arg2, 0);
9576             unlock_user(p, arg1, 0);
9577         }
9578         break;
9579 #endif
9580 #if defined(TARGET_NR_symlinkat)
9581     case TARGET_NR_symlinkat:
9582         {
9583             void *p2;
9584             p  = lock_user_string(arg1);
9585             p2 = lock_user_string(arg3);
9586             if (!p || !p2)
9587                 ret = -TARGET_EFAULT;
9588             else
9589                 ret = get_errno(symlinkat(p, arg2, p2));
9590             unlock_user(p2, arg3, 0);
9591             unlock_user(p, arg1, 0);
9592         }
9593         break;
9594 #endif
9595 #ifdef TARGET_NR_oldlstat
9596     case TARGET_NR_oldlstat:
9597         goto unimplemented;
9598 #endif
9599 #ifdef TARGET_NR_readlink
9600     case TARGET_NR_readlink:
9601         {
9602             void *p2;
9603             p = lock_user_string(arg1);
9604             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9605             if (!p || !p2) {
9606                 ret = -TARGET_EFAULT;
9607             } else if (!arg3) {
9608                 /* Short circuit this for the magic exe check. */
9609                 ret = -TARGET_EINVAL;
9610             } else if (is_proc_myself((const char *)p, "exe")) {
9611                 char real[PATH_MAX], *temp;
9612                 temp = realpath(exec_path, real);
9613                 /* Return value is # of bytes that we wrote to the buffer. */
9614                 if (temp == NULL) {
9615                     ret = get_errno(-1);
9616                 } else {
9617                     /* Don't worry about sign mismatch as earlier mapping
9618                      * logic would have thrown a bad address error. */
9619                     ret = MIN(strlen(real), arg3);
9620                     /* We cannot NUL terminate the string. */
9621                     memcpy(p2, real, ret);
9622                 }
9623             } else {
9624                 ret = get_errno(readlink(path(p), p2, arg3));
9625             }
9626             unlock_user(p2, arg2, ret);
9627             unlock_user(p, arg1, 0);
9628         }
9629         break;
9630 #endif
9631 #if defined(TARGET_NR_readlinkat)
9632     case TARGET_NR_readlinkat:
9633         {
9634             void *p2;
9635             p  = lock_user_string(arg2);
9636             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9637             if (!p || !p2) {
9638                 ret = -TARGET_EFAULT;
9639             } else if (is_proc_myself((const char *)p, "exe")) {
9640                 char real[PATH_MAX], *temp;
9641                 temp = realpath(exec_path, real);
9642                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9643                 snprintf((char *)p2, arg4, "%s", real);
9644             } else {
9645                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9646             }
9647             unlock_user(p2, arg3, ret);
9648             unlock_user(p, arg2, 0);
9649         }
9650         break;
9651 #endif
9652 #ifdef TARGET_NR_uselib
9653     case TARGET_NR_uselib:
9654         goto unimplemented;
9655 #endif
9656 #ifdef TARGET_NR_swapon
9657     case TARGET_NR_swapon:
9658         if (!(p = lock_user_string(arg1)))
9659             goto efault;
9660         ret = get_errno(swapon(p, arg2));
9661         unlock_user(p, arg1, 0);
9662         break;
9663 #endif
9664     case TARGET_NR_reboot:
9665         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9666            /* arg4 must be ignored in all other cases */
9667            p = lock_user_string(arg4);
9668            if (!p) {
9669               goto efault;
9670            }
9671            ret = get_errno(reboot(arg1, arg2, arg3, p));
9672            unlock_user(p, arg4, 0);
9673         } else {
9674            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9675         }
9676         break;
9677 #ifdef TARGET_NR_readdir
9678     case TARGET_NR_readdir:
9679         goto unimplemented;
9680 #endif
9681 #ifdef TARGET_NR_mmap
9682     case TARGET_NR_mmap:
9683 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9684     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9685     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9686     || defined(TARGET_S390X)
9687         {
9688             abi_ulong *v;
9689             abi_ulong v1, v2, v3, v4, v5, v6;
9690             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9691                 goto efault;
9692             v1 = tswapal(v[0]);
9693             v2 = tswapal(v[1]);
9694             v3 = tswapal(v[2]);
9695             v4 = tswapal(v[3]);
9696             v5 = tswapal(v[4]);
9697             v6 = tswapal(v[5]);
9698             unlock_user(v, arg1, 0);
9699             ret = get_errno(target_mmap(v1, v2, v3,
9700                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9701                                         v5, v6));
9702         }
9703 #else
9704         ret = get_errno(target_mmap(arg1, arg2, arg3,
9705                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9706                                     arg5,
9707                                     arg6));
9708 #endif
9709         break;
9710 #endif
9711 #ifdef TARGET_NR_mmap2
9712     case TARGET_NR_mmap2:
9713 #ifndef MMAP_SHIFT
9714 #define MMAP_SHIFT 12
9715 #endif
9716         ret = get_errno(target_mmap(arg1, arg2, arg3,
9717                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9718                                     arg5,
9719                                     arg6 << MMAP_SHIFT));
9720         break;
9721 #endif
9722     case TARGET_NR_munmap:
9723         ret = get_errno(target_munmap(arg1, arg2));
9724         break;
9725     case TARGET_NR_mprotect:
9726         {
9727             TaskState *ts = cpu->opaque;
9728             /* Special hack to detect libc making the stack executable.  */
9729             if ((arg3 & PROT_GROWSDOWN)
9730                 && arg1 >= ts->info->stack_limit
9731                 && arg1 <= ts->info->start_stack) {
9732                 arg3 &= ~PROT_GROWSDOWN;
9733                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9734                 arg1 = ts->info->stack_limit;
9735             }
9736         }
9737         ret = get_errno(target_mprotect(arg1, arg2, arg3));
9738         break;
9739 #ifdef TARGET_NR_mremap
9740     case TARGET_NR_mremap:
9741         ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9742         break;
9743 #endif
9744         /* ??? msync/mlock/munlock are broken for softmmu.  */
9745 #ifdef TARGET_NR_msync
9746     case TARGET_NR_msync:
9747         ret = get_errno(msync(g2h(arg1), arg2, arg3));
9748         break;
9749 #endif
9750 #ifdef TARGET_NR_mlock
9751     case TARGET_NR_mlock:
9752         ret = get_errno(mlock(g2h(arg1), arg2));
9753         break;
9754 #endif
9755 #ifdef TARGET_NR_munlock
9756     case TARGET_NR_munlock:
9757         ret = get_errno(munlock(g2h(arg1), arg2));
9758         break;
9759 #endif
9760 #ifdef TARGET_NR_mlockall
9761     case TARGET_NR_mlockall:
9762         ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9763         break;
9764 #endif
9765 #ifdef TARGET_NR_munlockall
9766     case TARGET_NR_munlockall:
9767         ret = get_errno(munlockall());
9768         break;
9769 #endif
9770 #ifdef TARGET_NR_truncate
9771     case TARGET_NR_truncate:
9772         if (!(p = lock_user_string(arg1)))
9773             goto efault;
9774         ret = get_errno(truncate(p, arg2));
9775         unlock_user(p, arg1, 0);
9776         break;
9777 #endif
9778 #ifdef TARGET_NR_ftruncate
9779     case TARGET_NR_ftruncate:
9780         ret = get_errno(ftruncate(arg1, arg2));
9781         break;
9782 #endif
9783     case TARGET_NR_fchmod:
9784         ret = get_errno(fchmod(arg1, arg2));
9785         break;
9786 #if defined(TARGET_NR_fchmodat)
9787     case TARGET_NR_fchmodat:
9788         if (!(p = lock_user_string(arg2)))
9789             goto efault;
9790         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9791         unlock_user(p, arg2, 0);
9792         break;
9793 #endif
9794     case TARGET_NR_getpriority:
9795         /* Note that negative values are valid for getpriority, so we must
9796            differentiate based on errno settings.  */
9797         errno = 0;
9798         ret = getpriority(arg1, arg2);
9799         if (ret == -1 && errno != 0) {
9800             ret = -host_to_target_errno(errno);
9801             break;
9802         }
9803 #ifdef TARGET_ALPHA
9804         /* Return value is the unbiased priority.  Signal no error.  */
9805         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9806 #else
9807         /* Return value is a biased priority to avoid negative numbers.  */
9808         ret = 20 - ret;
9809 #endif
9810         break;
9811     case TARGET_NR_setpriority:
9812         ret = get_errno(setpriority(arg1, arg2, arg3));
9813         break;
9814 #ifdef TARGET_NR_profil
9815     case TARGET_NR_profil:
9816         goto unimplemented;
9817 #endif
9818 #ifdef TARGET_NR_statfs
9819     case TARGET_NR_statfs:
9820         if (!(p = lock_user_string(arg1)))
9821             goto efault;
9822         ret = get_errno(statfs(path(p), &stfs));
9823         unlock_user(p, arg1, 0);
9824     convert_statfs:
9825         if (!is_error(ret)) {
9826             struct target_statfs *target_stfs;
9827 
9828             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9829                 goto efault;
9830             __put_user(stfs.f_type, &target_stfs->f_type);
9831             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9832             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9833             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9834             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9835             __put_user(stfs.f_files, &target_stfs->f_files);
9836             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9837             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9838             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9839             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9840             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9841 #ifdef _STATFS_F_FLAGS
9842             __put_user(stfs.f_flags, &target_stfs->f_flags);
9843 #else
9844             __put_user(0, &target_stfs->f_flags);
9845 #endif
9846             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9847             unlock_user_struct(target_stfs, arg2, 1);
9848         }
9849         break;
9850 #endif
9851 #ifdef TARGET_NR_fstatfs
9852     case TARGET_NR_fstatfs:
9853         ret = get_errno(fstatfs(arg1, &stfs));
9854         goto convert_statfs;
9855 #endif
9856 #ifdef TARGET_NR_statfs64
9857     case TARGET_NR_statfs64:
9858         if (!(p = lock_user_string(arg1)))
9859             goto efault;
9860         ret = get_errno(statfs(path(p), &stfs));
9861         unlock_user(p, arg1, 0);
9862     convert_statfs64:
9863         if (!is_error(ret)) {
9864             struct target_statfs64 *target_stfs;
9865 
9866             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9867                 goto efault;
9868             __put_user(stfs.f_type, &target_stfs->f_type);
9869             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9870             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9871             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9872             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9873             __put_user(stfs.f_files, &target_stfs->f_files);
9874             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9875             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9876             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9877             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9878             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9879             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9880             unlock_user_struct(target_stfs, arg3, 1);
9881         }
9882         break;
9883     case TARGET_NR_fstatfs64:
9884         ret = get_errno(fstatfs(arg1, &stfs));
9885         goto convert_statfs64;
9886 #endif
9887 #ifdef TARGET_NR_ioperm
9888     case TARGET_NR_ioperm:
9889         goto unimplemented;
9890 #endif
9891 #ifdef TARGET_NR_socketcall
9892     case TARGET_NR_socketcall:
9893         ret = do_socketcall(arg1, arg2);
9894         break;
9895 #endif
9896 #ifdef TARGET_NR_accept
9897     case TARGET_NR_accept:
9898         ret = do_accept4(arg1, arg2, arg3, 0);
9899         break;
9900 #endif
9901 #ifdef TARGET_NR_accept4
9902     case TARGET_NR_accept4:
9903         ret = do_accept4(arg1, arg2, arg3, arg4);
9904         break;
9905 #endif
9906 #ifdef TARGET_NR_bind
9907     case TARGET_NR_bind:
9908         ret = do_bind(arg1, arg2, arg3);
9909         break;
9910 #endif
9911 #ifdef TARGET_NR_connect
9912     case TARGET_NR_connect:
9913         ret = do_connect(arg1, arg2, arg3);
9914         break;
9915 #endif
9916 #ifdef TARGET_NR_getpeername
9917     case TARGET_NR_getpeername:
9918         ret = do_getpeername(arg1, arg2, arg3);
9919         break;
9920 #endif
9921 #ifdef TARGET_NR_getsockname
9922     case TARGET_NR_getsockname:
9923         ret = do_getsockname(arg1, arg2, arg3);
9924         break;
9925 #endif
9926 #ifdef TARGET_NR_getsockopt
9927     case TARGET_NR_getsockopt:
9928         ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9929         break;
9930 #endif
9931 #ifdef TARGET_NR_listen
9932     case TARGET_NR_listen:
9933         ret = get_errno(listen(arg1, arg2));
9934         break;
9935 #endif
9936 #ifdef TARGET_NR_recv
9937     case TARGET_NR_recv:
9938         ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9939         break;
9940 #endif
9941 #ifdef TARGET_NR_recvfrom
9942     case TARGET_NR_recvfrom:
9943         ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9944         break;
9945 #endif
9946 #ifdef TARGET_NR_recvmsg
9947     case TARGET_NR_recvmsg:
9948         ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9949         break;
9950 #endif
9951 #ifdef TARGET_NR_send
9952     case TARGET_NR_send:
9953         ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9954         break;
9955 #endif
9956 #ifdef TARGET_NR_sendmsg
9957     case TARGET_NR_sendmsg:
9958         ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9959         break;
9960 #endif
9961 #ifdef TARGET_NR_sendmmsg
9962     case TARGET_NR_sendmmsg:
9963         ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9964         break;
9965     case TARGET_NR_recvmmsg:
9966         ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9967         break;
9968 #endif
9969 #ifdef TARGET_NR_sendto
9970     case TARGET_NR_sendto:
9971         ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9972         break;
9973 #endif
9974 #ifdef TARGET_NR_shutdown
9975     case TARGET_NR_shutdown:
9976         ret = get_errno(shutdown(arg1, arg2));
9977         break;
9978 #endif
9979 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9980     case TARGET_NR_getrandom:
9981         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9982         if (!p) {
9983             goto efault;
9984         }
9985         ret = get_errno(getrandom(p, arg2, arg3));
9986         unlock_user(p, arg1, ret);
9987         break;
9988 #endif
9989 #ifdef TARGET_NR_socket
9990     case TARGET_NR_socket:
9991         ret = do_socket(arg1, arg2, arg3);
9992         break;
9993 #endif
9994 #ifdef TARGET_NR_socketpair
9995     case TARGET_NR_socketpair:
9996         ret = do_socketpair(arg1, arg2, arg3, arg4);
9997         break;
9998 #endif
9999 #ifdef TARGET_NR_setsockopt
10000     case TARGET_NR_setsockopt:
10001         ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10002         break;
10003 #endif
10004 #if defined(TARGET_NR_syslog)
10005     case TARGET_NR_syslog:
10006         {
10007             int len = arg2;
10008 
10009             switch (arg1) {
10010             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10011             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10012             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10013             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10014             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10015             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10016             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10017             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10018                 {
10019                     ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10020                 }
10021                 break;
10022             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10023             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10024             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10025                 {
10026                     ret = -TARGET_EINVAL;
10027                     if (len < 0) {
10028                         goto fail;
10029                     }
10030                     ret = 0;
10031                     if (len == 0) {
10032                         break;
10033                     }
10034                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10035                     if (!p) {
10036                         ret = -TARGET_EFAULT;
10037                         goto fail;
10038                     }
10039                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10040                     unlock_user(p, arg2, arg3);
10041                 }
10042                 break;
10043             default:
10044                 ret = -EINVAL;
10045                 break;
10046             }
10047         }
10048         break;
10049 #endif
10050     case TARGET_NR_setitimer:
10051         {
10052             struct itimerval value, ovalue, *pvalue;
10053 
10054             if (arg2) {
10055                 pvalue = &value;
10056                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10057                     || copy_from_user_timeval(&pvalue->it_value,
10058                                               arg2 + sizeof(struct target_timeval)))
10059                     goto efault;
10060             } else {
10061                 pvalue = NULL;
10062             }
10063             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10064             if (!is_error(ret) && arg3) {
10065                 if (copy_to_user_timeval(arg3,
10066                                          &ovalue.it_interval)
10067                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10068                                             &ovalue.it_value))
10069                     goto efault;
10070             }
10071         }
10072         break;
10073     case TARGET_NR_getitimer:
10074         {
10075             struct itimerval value;
10076 
10077             ret = get_errno(getitimer(arg1, &value));
10078             if (!is_error(ret) && arg2) {
10079                 if (copy_to_user_timeval(arg2,
10080                                          &value.it_interval)
10081                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10082                                             &value.it_value))
10083                     goto efault;
10084             }
10085         }
10086         break;
10087 #ifdef TARGET_NR_stat
10088     case TARGET_NR_stat:
10089         if (!(p = lock_user_string(arg1)))
10090             goto efault;
10091         ret = get_errno(stat(path(p), &st));
10092         unlock_user(p, arg1, 0);
10093         goto do_stat;
10094 #endif
10095 #ifdef TARGET_NR_lstat
10096     case TARGET_NR_lstat:
10097         if (!(p = lock_user_string(arg1)))
10098             goto efault;
10099         ret = get_errno(lstat(path(p), &st));
10100         unlock_user(p, arg1, 0);
10101         goto do_stat;
10102 #endif
10103 #ifdef TARGET_NR_fstat
10104     case TARGET_NR_fstat:
10105         {
10106             ret = get_errno(fstat(arg1, &st));
10107 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10108         do_stat:
10109 #endif
10110             if (!is_error(ret)) {
10111                 struct target_stat *target_st;
10112 
10113                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10114                     goto efault;
10115                 memset(target_st, 0, sizeof(*target_st));
10116                 __put_user(st.st_dev, &target_st->st_dev);
10117                 __put_user(st.st_ino, &target_st->st_ino);
10118                 __put_user(st.st_mode, &target_st->st_mode);
10119                 __put_user(st.st_uid, &target_st->st_uid);
10120                 __put_user(st.st_gid, &target_st->st_gid);
10121                 __put_user(st.st_nlink, &target_st->st_nlink);
10122                 __put_user(st.st_rdev, &target_st->st_rdev);
10123                 __put_user(st.st_size, &target_st->st_size);
10124                 __put_user(st.st_blksize, &target_st->st_blksize);
10125                 __put_user(st.st_blocks, &target_st->st_blocks);
10126                 __put_user(st.st_atime, &target_st->target_st_atime);
10127                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10128                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10129                 unlock_user_struct(target_st, arg2, 1);
10130             }
10131         }
10132         break;
10133 #endif
10134 #ifdef TARGET_NR_olduname
10135     case TARGET_NR_olduname:
10136         goto unimplemented;
10137 #endif
10138 #ifdef TARGET_NR_iopl
10139     case TARGET_NR_iopl:
10140         goto unimplemented;
10141 #endif
10142     case TARGET_NR_vhangup:
10143         ret = get_errno(vhangup());
10144         break;
10145 #ifdef TARGET_NR_idle
10146     case TARGET_NR_idle:
10147         goto unimplemented;
10148 #endif
10149 #ifdef TARGET_NR_syscall
10150     case TARGET_NR_syscall:
10151         ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10152                          arg6, arg7, arg8, 0);
10153         break;
10154 #endif
10155     case TARGET_NR_wait4:
10156         {
10157             int status;
10158             abi_long status_ptr = arg2;
10159             struct rusage rusage, *rusage_ptr;
10160             abi_ulong target_rusage = arg4;
10161             abi_long rusage_err;
10162             if (target_rusage)
10163                 rusage_ptr = &rusage;
10164             else
10165                 rusage_ptr = NULL;
10166             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10167             if (!is_error(ret)) {
10168                 if (status_ptr && ret) {
10169                     status = host_to_target_waitstatus(status);
10170                     if (put_user_s32(status, status_ptr))
10171                         goto efault;
10172                 }
10173                 if (target_rusage) {
10174                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10175                     if (rusage_err) {
10176                         ret = rusage_err;
10177                     }
10178                 }
10179             }
10180         }
10181         break;
10182 #ifdef TARGET_NR_swapoff
10183     case TARGET_NR_swapoff:
10184         if (!(p = lock_user_string(arg1)))
10185             goto efault;
10186         ret = get_errno(swapoff(p));
10187         unlock_user(p, arg1, 0);
10188         break;
10189 #endif
10190     case TARGET_NR_sysinfo:
10191         {
10192             struct target_sysinfo *target_value;
10193             struct sysinfo value;
10194             ret = get_errno(sysinfo(&value));
10195             if (!is_error(ret) && arg1)
10196             {
10197                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10198                     goto efault;
10199                 __put_user(value.uptime, &target_value->uptime);
10200                 __put_user(value.loads[0], &target_value->loads[0]);
10201                 __put_user(value.loads[1], &target_value->loads[1]);
10202                 __put_user(value.loads[2], &target_value->loads[2]);
10203                 __put_user(value.totalram, &target_value->totalram);
10204                 __put_user(value.freeram, &target_value->freeram);
10205                 __put_user(value.sharedram, &target_value->sharedram);
10206                 __put_user(value.bufferram, &target_value->bufferram);
10207                 __put_user(value.totalswap, &target_value->totalswap);
10208                 __put_user(value.freeswap, &target_value->freeswap);
10209                 __put_user(value.procs, &target_value->procs);
10210                 __put_user(value.totalhigh, &target_value->totalhigh);
10211                 __put_user(value.freehigh, &target_value->freehigh);
10212                 __put_user(value.mem_unit, &target_value->mem_unit);
10213                 unlock_user_struct(target_value, arg1, 1);
10214             }
10215         }
10216         break;
10217 #ifdef TARGET_NR_ipc
10218     case TARGET_NR_ipc:
10219         ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10220         break;
10221 #endif
10222 #ifdef TARGET_NR_semget
10223     case TARGET_NR_semget:
10224         ret = get_errno(semget(arg1, arg2, arg3));
10225         break;
10226 #endif
10227 #ifdef TARGET_NR_semop
10228     case TARGET_NR_semop:
10229         ret = do_semop(arg1, arg2, arg3);
10230         break;
10231 #endif
10232 #ifdef TARGET_NR_semctl
10233     case TARGET_NR_semctl:
10234         ret = do_semctl(arg1, arg2, arg3, arg4);
10235         break;
10236 #endif
10237 #ifdef TARGET_NR_msgctl
10238     case TARGET_NR_msgctl:
10239         ret = do_msgctl(arg1, arg2, arg3);
10240         break;
10241 #endif
10242 #ifdef TARGET_NR_msgget
10243     case TARGET_NR_msgget:
10244         ret = get_errno(msgget(arg1, arg2));
10245         break;
10246 #endif
10247 #ifdef TARGET_NR_msgrcv
10248     case TARGET_NR_msgrcv:
10249         ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10250         break;
10251 #endif
10252 #ifdef TARGET_NR_msgsnd
10253     case TARGET_NR_msgsnd:
10254         ret = do_msgsnd(arg1, arg2, arg3, arg4);
10255         break;
10256 #endif
10257 #ifdef TARGET_NR_shmget
10258     case TARGET_NR_shmget:
10259         ret = get_errno(shmget(arg1, arg2, arg3));
10260         break;
10261 #endif
10262 #ifdef TARGET_NR_shmctl
10263     case TARGET_NR_shmctl:
10264         ret = do_shmctl(arg1, arg2, arg3);
10265         break;
10266 #endif
10267 #ifdef TARGET_NR_shmat
10268     case TARGET_NR_shmat:
10269         ret = do_shmat(cpu_env, arg1, arg2, arg3);
10270         break;
10271 #endif
10272 #ifdef TARGET_NR_shmdt
10273     case TARGET_NR_shmdt:
10274         ret = do_shmdt(arg1);
10275         break;
10276 #endif
10277     case TARGET_NR_fsync:
10278         ret = get_errno(fsync(arg1));
10279         break;
10280     case TARGET_NR_clone:
10281         /* Linux manages to have three different orderings for its
10282          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10283          * match the kernel's CONFIG_CLONE_* settings.
10284          * Microblaze is further special in that it uses a sixth
10285          * implicit argument to clone for the TLS pointer.
10286          */
10287 #if defined(TARGET_MICROBLAZE)
10288         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10289 #elif defined(TARGET_CLONE_BACKWARDS)
10290         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10291 #elif defined(TARGET_CLONE_BACKWARDS2)
10292         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10293 #else
10294         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10295 #endif
10296         break;
10297 #ifdef __NR_exit_group
10298         /* new thread calls */
10299     case TARGET_NR_exit_group:
10300         preexit_cleanup(cpu_env, arg1);
10301         ret = get_errno(exit_group(arg1));
10302         break;
10303 #endif
10304     case TARGET_NR_setdomainname:
10305         if (!(p = lock_user_string(arg1)))
10306             goto efault;
10307         ret = get_errno(setdomainname(p, arg2));
10308         unlock_user(p, arg1, 0);
10309         break;
10310     case TARGET_NR_uname:
10311         /* no need to transcode because we use the linux syscall */
10312         {
10313             struct new_utsname * buf;
10314 
10315             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10316                 goto efault;
10317             ret = get_errno(sys_uname(buf));
10318             if (!is_error(ret)) {
10319                 /* Overwrite the native machine name with whatever is being
10320                    emulated. */
10321                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10322                           sizeof(buf->machine));
10323                 /* Allow the user to override the reported release.  */
10324                 if (qemu_uname_release && *qemu_uname_release) {
10325                     g_strlcpy(buf->release, qemu_uname_release,
10326                               sizeof(buf->release));
10327                 }
10328             }
10329             unlock_user_struct(buf, arg1, 1);
10330         }
10331         break;
10332 #ifdef TARGET_I386
10333     case TARGET_NR_modify_ldt:
10334         ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
10335         break;
10336 #if !defined(TARGET_X86_64)
10337     case TARGET_NR_vm86old:
10338         goto unimplemented;
10339     case TARGET_NR_vm86:
10340         ret = do_vm86(cpu_env, arg1, arg2);
10341         break;
10342 #endif
10343 #endif
10344     case TARGET_NR_adjtimex:
10345         {
10346             struct timex host_buf;
10347 
10348             if (target_to_host_timex(&host_buf, arg1) != 0) {
10349                 goto efault;
10350             }
10351             ret = get_errno(adjtimex(&host_buf));
10352             if (!is_error(ret)) {
10353                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10354                     goto efault;
10355                 }
10356             }
10357         }
10358         break;
10359 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10360     case TARGET_NR_clock_adjtime:
10361         {
10362             struct timex htx, *phtx = &htx;
10363 
10364             if (target_to_host_timex(phtx, arg2) != 0) {
10365                 goto efault;
10366             }
10367             ret = get_errno(clock_adjtime(arg1, phtx));
10368             if (!is_error(ret) && phtx) {
10369                 if (host_to_target_timex(arg2, phtx) != 0) {
10370                     goto efault;
10371                 }
10372             }
10373         }
10374         break;
10375 #endif
10376 #ifdef TARGET_NR_create_module
10377     case TARGET_NR_create_module:
10378 #endif
10379     case TARGET_NR_init_module:
10380     case TARGET_NR_delete_module:
10381 #ifdef TARGET_NR_get_kernel_syms
10382     case TARGET_NR_get_kernel_syms:
10383 #endif
10384         goto unimplemented;
10385     case TARGET_NR_quotactl:
10386         goto unimplemented;
10387     case TARGET_NR_getpgid:
10388         ret = get_errno(getpgid(arg1));
10389         break;
10390     case TARGET_NR_fchdir:
10391         ret = get_errno(fchdir(arg1));
10392         break;
10393 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10394     case TARGET_NR_bdflush:
10395         goto unimplemented;
10396 #endif
10397 #ifdef TARGET_NR_sysfs
10398     case TARGET_NR_sysfs:
10399         goto unimplemented;
10400 #endif
10401     case TARGET_NR_personality:
10402         ret = get_errno(personality(arg1));
10403         break;
10404 #ifdef TARGET_NR_afs_syscall
10405     case TARGET_NR_afs_syscall:
10406         goto unimplemented;
10407 #endif
10408 #ifdef TARGET_NR__llseek /* Not on alpha */
10409     case TARGET_NR__llseek:
10410         {
10411             int64_t res;
10412 #if !defined(__NR_llseek)
10413             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10414             if (res == -1) {
10415                 ret = get_errno(res);
10416             } else {
10417                 ret = 0;
10418             }
10419 #else
10420             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10421 #endif
10422             if ((ret == 0) && put_user_s64(res, arg4)) {
10423                 goto efault;
10424             }
10425         }
10426         break;
10427 #endif
10428 #ifdef TARGET_NR_getdents
10429     case TARGET_NR_getdents:
10430 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10431 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10432         {
10433             struct target_dirent *target_dirp;
10434             struct linux_dirent *dirp;
10435             abi_long count = arg3;
10436 
10437             dirp = g_try_malloc(count);
10438             if (!dirp) {
10439                 ret = -TARGET_ENOMEM;
10440                 goto fail;
10441             }
10442 
10443             ret = get_errno(sys_getdents(arg1, dirp, count));
10444             if (!is_error(ret)) {
10445                 struct linux_dirent *de;
10446 		struct target_dirent *tde;
10447                 int len = ret;
10448                 int reclen, treclen;
10449 		int count1, tnamelen;
10450 
10451 		count1 = 0;
10452                 de = dirp;
10453                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10454                     goto efault;
10455 		tde = target_dirp;
10456                 while (len > 0) {
10457                     reclen = de->d_reclen;
10458                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10459                     assert(tnamelen >= 0);
10460                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10461                     assert(count1 + treclen <= count);
10462                     tde->d_reclen = tswap16(treclen);
10463                     tde->d_ino = tswapal(de->d_ino);
10464                     tde->d_off = tswapal(de->d_off);
10465                     memcpy(tde->d_name, de->d_name, tnamelen);
10466                     de = (struct linux_dirent *)((char *)de + reclen);
10467                     len -= reclen;
10468                     tde = (struct target_dirent *)((char *)tde + treclen);
10469 		    count1 += treclen;
10470                 }
10471 		ret = count1;
10472                 unlock_user(target_dirp, arg2, ret);
10473             }
10474             g_free(dirp);
10475         }
10476 #else
10477         {
10478             struct linux_dirent *dirp;
10479             abi_long count = arg3;
10480 
10481             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10482                 goto efault;
10483             ret = get_errno(sys_getdents(arg1, dirp, count));
10484             if (!is_error(ret)) {
10485                 struct linux_dirent *de;
10486                 int len = ret;
10487                 int reclen;
10488                 de = dirp;
10489                 while (len > 0) {
10490                     reclen = de->d_reclen;
10491                     if (reclen > len)
10492                         break;
10493                     de->d_reclen = tswap16(reclen);
10494                     tswapls(&de->d_ino);
10495                     tswapls(&de->d_off);
10496                     de = (struct linux_dirent *)((char *)de + reclen);
10497                     len -= reclen;
10498                 }
10499             }
10500             unlock_user(dirp, arg2, ret);
10501         }
10502 #endif
10503 #else
10504         /* Implement getdents in terms of getdents64 */
10505         {
10506             struct linux_dirent64 *dirp;
10507             abi_long count = arg3;
10508 
10509             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10510             if (!dirp) {
10511                 goto efault;
10512             }
10513             ret = get_errno(sys_getdents64(arg1, dirp, count));
10514             if (!is_error(ret)) {
10515                 /* Convert the dirent64 structs to target dirent.  We do this
10516                  * in-place, since we can guarantee that a target_dirent is no
10517                  * larger than a dirent64; however this means we have to be
10518                  * careful to read everything before writing in the new format.
10519                  */
10520                 struct linux_dirent64 *de;
10521                 struct target_dirent *tde;
10522                 int len = ret;
10523                 int tlen = 0;
10524 
10525                 de = dirp;
10526                 tde = (struct target_dirent *)dirp;
10527                 while (len > 0) {
10528                     int namelen, treclen;
10529                     int reclen = de->d_reclen;
10530                     uint64_t ino = de->d_ino;
10531                     int64_t off = de->d_off;
10532                     uint8_t type = de->d_type;
10533 
10534                     namelen = strlen(de->d_name);
10535                     treclen = offsetof(struct target_dirent, d_name)
10536                         + namelen + 2;
10537                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10538 
10539                     memmove(tde->d_name, de->d_name, namelen + 1);
10540                     tde->d_ino = tswapal(ino);
10541                     tde->d_off = tswapal(off);
10542                     tde->d_reclen = tswap16(treclen);
10543                     /* The target_dirent type is in what was formerly a padding
10544                      * byte at the end of the structure:
10545                      */
10546                     *(((char *)tde) + treclen - 1) = type;
10547 
10548                     de = (struct linux_dirent64 *)((char *)de + reclen);
10549                     tde = (struct target_dirent *)((char *)tde + treclen);
10550                     len -= reclen;
10551                     tlen += treclen;
10552                 }
10553                 ret = tlen;
10554             }
10555             unlock_user(dirp, arg2, ret);
10556         }
10557 #endif
10558         break;
10559 #endif /* TARGET_NR_getdents */
10560 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10561     case TARGET_NR_getdents64:
10562         {
10563             struct linux_dirent64 *dirp;
10564             abi_long count = arg3;
10565             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10566                 goto efault;
10567             ret = get_errno(sys_getdents64(arg1, dirp, count));
10568             if (!is_error(ret)) {
10569                 struct linux_dirent64 *de;
10570                 int len = ret;
10571                 int reclen;
10572                 de = dirp;
10573                 while (len > 0) {
10574                     reclen = de->d_reclen;
10575                     if (reclen > len)
10576                         break;
10577                     de->d_reclen = tswap16(reclen);
10578                     tswap64s((uint64_t *)&de->d_ino);
10579                     tswap64s((uint64_t *)&de->d_off);
10580                     de = (struct linux_dirent64 *)((char *)de + reclen);
10581                     len -= reclen;
10582                 }
10583             }
10584             unlock_user(dirp, arg2, ret);
10585         }
10586         break;
10587 #endif /* TARGET_NR_getdents64 */
10588 #if defined(TARGET_NR__newselect)
10589     case TARGET_NR__newselect:
10590         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10591         break;
10592 #endif
10593 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10594 # ifdef TARGET_NR_poll
10595     case TARGET_NR_poll:
10596 # endif
10597 # ifdef TARGET_NR_ppoll
10598     case TARGET_NR_ppoll:
10599 # endif
10600         {
10601             struct target_pollfd *target_pfd;
10602             unsigned int nfds = arg2;
10603             struct pollfd *pfd;
10604             unsigned int i;
10605 
10606             pfd = NULL;
10607             target_pfd = NULL;
10608             if (nfds) {
10609                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10610                     ret = -TARGET_EINVAL;
10611                     break;
10612                 }
10613 
10614                 target_pfd = lock_user(VERIFY_WRITE, arg1,
10615                                        sizeof(struct target_pollfd) * nfds, 1);
10616                 if (!target_pfd) {
10617                     goto efault;
10618                 }
10619 
10620                 pfd = alloca(sizeof(struct pollfd) * nfds);
10621                 for (i = 0; i < nfds; i++) {
10622                     pfd[i].fd = tswap32(target_pfd[i].fd);
10623                     pfd[i].events = tswap16(target_pfd[i].events);
10624                 }
10625             }
10626 
10627             switch (num) {
10628 # ifdef TARGET_NR_ppoll
10629             case TARGET_NR_ppoll:
10630             {
10631                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10632                 target_sigset_t *target_set;
10633                 sigset_t _set, *set = &_set;
10634 
10635                 if (arg3) {
10636                     if (target_to_host_timespec(timeout_ts, arg3)) {
10637                         unlock_user(target_pfd, arg1, 0);
10638                         goto efault;
10639                     }
10640                 } else {
10641                     timeout_ts = NULL;
10642                 }
10643 
10644                 if (arg4) {
10645                     if (arg5 != sizeof(target_sigset_t)) {
10646                         unlock_user(target_pfd, arg1, 0);
10647                         ret = -TARGET_EINVAL;
10648                         break;
10649                     }
10650 
10651                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10652                     if (!target_set) {
10653                         unlock_user(target_pfd, arg1, 0);
10654                         goto efault;
10655                     }
10656                     target_to_host_sigset(set, target_set);
10657                 } else {
10658                     set = NULL;
10659                 }
10660 
10661                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10662                                            set, SIGSET_T_SIZE));
10663 
10664                 if (!is_error(ret) && arg3) {
10665                     host_to_target_timespec(arg3, timeout_ts);
10666                 }
10667                 if (arg4) {
10668                     unlock_user(target_set, arg4, 0);
10669                 }
10670                 break;
10671             }
10672 # endif
10673 # ifdef TARGET_NR_poll
10674             case TARGET_NR_poll:
10675             {
10676                 struct timespec ts, *pts;
10677 
10678                 if (arg3 >= 0) {
10679                     /* Convert ms to secs, ns */
10680                     ts.tv_sec = arg3 / 1000;
10681                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10682                     pts = &ts;
10683                 } else {
10684                     /* -ve poll() timeout means "infinite" */
10685                     pts = NULL;
10686                 }
10687                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10688                 break;
10689             }
10690 # endif
10691             default:
10692                 g_assert_not_reached();
10693             }
10694 
10695             if (!is_error(ret)) {
10696                 for(i = 0; i < nfds; i++) {
10697                     target_pfd[i].revents = tswap16(pfd[i].revents);
10698                 }
10699             }
10700             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10701         }
10702         break;
10703 #endif
10704     case TARGET_NR_flock:
10705         /* NOTE: the flock constant seems to be the same for every
10706            Linux platform */
10707         ret = get_errno(safe_flock(arg1, arg2));
10708         break;
10709     case TARGET_NR_readv:
10710         {
10711             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10712             if (vec != NULL) {
10713                 ret = get_errno(safe_readv(arg1, vec, arg3));
10714                 unlock_iovec(vec, arg2, arg3, 1);
10715             } else {
10716                 ret = -host_to_target_errno(errno);
10717             }
10718         }
10719         break;
10720     case TARGET_NR_writev:
10721         {
10722             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10723             if (vec != NULL) {
10724                 ret = get_errno(safe_writev(arg1, vec, arg3));
10725                 unlock_iovec(vec, arg2, arg3, 0);
10726             } else {
10727                 ret = -host_to_target_errno(errno);
10728             }
10729         }
10730         break;
10731 #if defined(TARGET_NR_preadv)
10732     case TARGET_NR_preadv:
10733         {
10734             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10735             if (vec != NULL) {
10736                 unsigned long low, high;
10737 
10738                 target_to_host_low_high(arg4, arg5, &low, &high);
10739                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10740                 unlock_iovec(vec, arg2, arg3, 1);
10741             } else {
10742                 ret = -host_to_target_errno(errno);
10743            }
10744         }
10745         break;
10746 #endif
10747 #if defined(TARGET_NR_pwritev)
10748     case TARGET_NR_pwritev:
10749         {
10750             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10751             if (vec != NULL) {
10752                 unsigned long low, high;
10753 
10754                 target_to_host_low_high(arg4, arg5, &low, &high);
10755                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10756                 unlock_iovec(vec, arg2, arg3, 0);
10757             } else {
10758                 ret = -host_to_target_errno(errno);
10759            }
10760         }
10761         break;
10762 #endif
10763     case TARGET_NR_getsid:
10764         ret = get_errno(getsid(arg1));
10765         break;
10766 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10767     case TARGET_NR_fdatasync:
10768         ret = get_errno(fdatasync(arg1));
10769         break;
10770 #endif
10771 #ifdef TARGET_NR__sysctl
10772     case TARGET_NR__sysctl:
10773         /* We don't implement this, but ENOTDIR is always a safe
10774            return value. */
10775         ret = -TARGET_ENOTDIR;
10776         break;
10777 #endif
10778     case TARGET_NR_sched_getaffinity:
10779         {
10780             unsigned int mask_size;
10781             unsigned long *mask;
10782 
10783             /*
10784              * sched_getaffinity needs multiples of ulong, so need to take
10785              * care of mismatches between target ulong and host ulong sizes.
10786              */
10787             if (arg2 & (sizeof(abi_ulong) - 1)) {
10788                 ret = -TARGET_EINVAL;
10789                 break;
10790             }
10791             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10792 
10793             mask = alloca(mask_size);
10794             memset(mask, 0, mask_size);
10795             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10796 
10797             if (!is_error(ret)) {
10798                 if (ret > arg2) {
10799                     /* More data returned than the caller's buffer will fit.
10800                      * This only happens if sizeof(abi_long) < sizeof(long)
10801                      * and the caller passed us a buffer holding an odd number
10802                      * of abi_longs. If the host kernel is actually using the
10803                      * extra 4 bytes then fail EINVAL; otherwise we can just
10804                      * ignore them and only copy the interesting part.
10805                      */
10806                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10807                     if (numcpus > arg2 * 8) {
10808                         ret = -TARGET_EINVAL;
10809                         break;
10810                     }
10811                     ret = arg2;
10812                 }
10813 
10814                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10815                     goto efault;
10816                 }
10817             }
10818         }
10819         break;
10820     case TARGET_NR_sched_setaffinity:
10821         {
10822             unsigned int mask_size;
10823             unsigned long *mask;
10824 
10825             /*
10826              * sched_setaffinity needs multiples of ulong, so need to take
10827              * care of mismatches between target ulong and host ulong sizes.
10828              */
10829             if (arg2 & (sizeof(abi_ulong) - 1)) {
10830                 ret = -TARGET_EINVAL;
10831                 break;
10832             }
10833             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10834             mask = alloca(mask_size);
10835 
10836             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10837             if (ret) {
10838                 break;
10839             }
10840 
10841             ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10842         }
10843         break;
10844     case TARGET_NR_getcpu:
10845         {
10846             unsigned cpu, node;
10847             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10848                                        arg2 ? &node : NULL,
10849                                        NULL));
10850             if (is_error(ret)) {
10851                 goto fail;
10852             }
10853             if (arg1 && put_user_u32(cpu, arg1)) {
10854                 goto efault;
10855             }
10856             if (arg2 && put_user_u32(node, arg2)) {
10857                 goto efault;
10858             }
10859         }
10860         break;
10861     case TARGET_NR_sched_setparam:
10862         {
10863             struct sched_param *target_schp;
10864             struct sched_param schp;
10865 
10866             if (arg2 == 0) {
10867                 return -TARGET_EINVAL;
10868             }
10869             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10870                 goto efault;
10871             schp.sched_priority = tswap32(target_schp->sched_priority);
10872             unlock_user_struct(target_schp, arg2, 0);
10873             ret = get_errno(sched_setparam(arg1, &schp));
10874         }
10875         break;
10876     case TARGET_NR_sched_getparam:
10877         {
10878             struct sched_param *target_schp;
10879             struct sched_param schp;
10880 
10881             if (arg2 == 0) {
10882                 return -TARGET_EINVAL;
10883             }
10884             ret = get_errno(sched_getparam(arg1, &schp));
10885             if (!is_error(ret)) {
10886                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10887                     goto efault;
10888                 target_schp->sched_priority = tswap32(schp.sched_priority);
10889                 unlock_user_struct(target_schp, arg2, 1);
10890             }
10891         }
10892         break;
10893     case TARGET_NR_sched_setscheduler:
10894         {
10895             struct sched_param *target_schp;
10896             struct sched_param schp;
10897             if (arg3 == 0) {
10898                 return -TARGET_EINVAL;
10899             }
10900             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10901                 goto efault;
10902             schp.sched_priority = tswap32(target_schp->sched_priority);
10903             unlock_user_struct(target_schp, arg3, 0);
10904             ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10905         }
10906         break;
10907     case TARGET_NR_sched_getscheduler:
10908         ret = get_errno(sched_getscheduler(arg1));
10909         break;
10910     case TARGET_NR_sched_yield:
10911         ret = get_errno(sched_yield());
10912         break;
10913     case TARGET_NR_sched_get_priority_max:
10914         ret = get_errno(sched_get_priority_max(arg1));
10915         break;
10916     case TARGET_NR_sched_get_priority_min:
10917         ret = get_errno(sched_get_priority_min(arg1));
10918         break;
10919     case TARGET_NR_sched_rr_get_interval:
10920         {
10921             struct timespec ts;
10922             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10923             if (!is_error(ret)) {
10924                 ret = host_to_target_timespec(arg2, &ts);
10925             }
10926         }
10927         break;
10928     case TARGET_NR_nanosleep:
10929         {
10930             struct timespec req, rem;
10931             target_to_host_timespec(&req, arg1);
10932             ret = get_errno(safe_nanosleep(&req, &rem));
10933             if (is_error(ret) && arg2) {
10934                 host_to_target_timespec(arg2, &rem);
10935             }
10936         }
10937         break;
10938 #ifdef TARGET_NR_query_module
10939     case TARGET_NR_query_module:
10940         goto unimplemented;
10941 #endif
10942 #ifdef TARGET_NR_nfsservctl
10943     case TARGET_NR_nfsservctl:
10944         goto unimplemented;
10945 #endif
10946     case TARGET_NR_prctl:
10947         switch (arg1) {
10948         case PR_GET_PDEATHSIG:
10949         {
10950             int deathsig;
10951             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10952             if (!is_error(ret) && arg2
10953                 && put_user_ual(deathsig, arg2)) {
10954                 goto efault;
10955             }
10956             break;
10957         }
10958 #ifdef PR_GET_NAME
10959         case PR_GET_NAME:
10960         {
10961             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10962             if (!name) {
10963                 goto efault;
10964             }
10965             ret = get_errno(prctl(arg1, (unsigned long)name,
10966                                   arg3, arg4, arg5));
10967             unlock_user(name, arg2, 16);
10968             break;
10969         }
10970         case PR_SET_NAME:
10971         {
10972             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10973             if (!name) {
10974                 goto efault;
10975             }
10976             ret = get_errno(prctl(arg1, (unsigned long)name,
10977                                   arg3, arg4, arg5));
10978             unlock_user(name, arg2, 0);
10979             break;
10980         }
10981 #endif
10982 #ifdef TARGET_AARCH64
10983         case TARGET_PR_SVE_SET_VL:
10984             /*
10985              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10986              * PR_SVE_VL_INHERIT.  Note the kernel definition
10987              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10988              * even though the current architectural maximum is VQ=16.
10989              */
10990             ret = -TARGET_EINVAL;
10991             if (arm_feature(cpu_env, ARM_FEATURE_SVE)
10992                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10993                 CPUARMState *env = cpu_env;
10994                 ARMCPU *cpu = arm_env_get_cpu(env);
10995                 uint32_t vq, old_vq;
10996 
10997                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10998                 vq = MAX(arg2 / 16, 1);
10999                 vq = MIN(vq, cpu->sve_max_vq);
11000 
11001                 if (vq < old_vq) {
11002                     aarch64_sve_narrow_vq(env, vq);
11003                 }
11004                 env->vfp.zcr_el[1] = vq - 1;
11005                 ret = vq * 16;
11006             }
11007             break;
11008         case TARGET_PR_SVE_GET_VL:
11009             ret = -TARGET_EINVAL;
11010             if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
11011                 CPUARMState *env = cpu_env;
11012                 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
11013             }
11014             break;
11015 #endif /* AARCH64 */
11016         case PR_GET_SECCOMP:
11017         case PR_SET_SECCOMP:
11018             /* Disable seccomp to prevent the target disabling syscalls we
11019              * need. */
11020             ret = -TARGET_EINVAL;
11021             break;
11022         default:
11023             /* Most prctl options have no pointer arguments */
11024             ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
11025             break;
11026         }
11027         break;
11028 #ifdef TARGET_NR_arch_prctl
11029     case TARGET_NR_arch_prctl:
11030 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
11031         ret = do_arch_prctl(cpu_env, arg1, arg2);
11032         break;
11033 #else
11034         goto unimplemented;
11035 #endif
11036 #endif
11037 #ifdef TARGET_NR_pread64
11038     case TARGET_NR_pread64:
11039         if (regpairs_aligned(cpu_env, num)) {
11040             arg4 = arg5;
11041             arg5 = arg6;
11042         }
11043         if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
11044             goto efault;
11045         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11046         unlock_user(p, arg2, ret);
11047         break;
11048     case TARGET_NR_pwrite64:
11049         if (regpairs_aligned(cpu_env, num)) {
11050             arg4 = arg5;
11051             arg5 = arg6;
11052         }
11053         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
11054             goto efault;
11055         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11056         unlock_user(p, arg2, 0);
11057         break;
11058 #endif
11059     case TARGET_NR_getcwd:
11060         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11061             goto efault;
11062         ret = get_errno(sys_getcwd1(p, arg2));
11063         unlock_user(p, arg1, ret);
11064         break;
11065     case TARGET_NR_capget:
11066     case TARGET_NR_capset:
11067     {
11068         struct target_user_cap_header *target_header;
11069         struct target_user_cap_data *target_data = NULL;
11070         struct __user_cap_header_struct header;
11071         struct __user_cap_data_struct data[2];
11072         struct __user_cap_data_struct *dataptr = NULL;
11073         int i, target_datalen;
11074         int data_items = 1;
11075 
11076         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11077             goto efault;
11078         }
11079         header.version = tswap32(target_header->version);
11080         header.pid = tswap32(target_header->pid);
11081 
11082         if (header.version != _LINUX_CAPABILITY_VERSION) {
11083             /* Version 2 and up takes pointer to two user_data structs */
11084             data_items = 2;
11085         }
11086 
11087         target_datalen = sizeof(*target_data) * data_items;
11088 
11089         if (arg2) {
11090             if (num == TARGET_NR_capget) {
11091                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11092             } else {
11093                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11094             }
11095             if (!target_data) {
11096                 unlock_user_struct(target_header, arg1, 0);
11097                 goto efault;
11098             }
11099 
11100             if (num == TARGET_NR_capset) {
11101                 for (i = 0; i < data_items; i++) {
11102                     data[i].effective = tswap32(target_data[i].effective);
11103                     data[i].permitted = tswap32(target_data[i].permitted);
11104                     data[i].inheritable = tswap32(target_data[i].inheritable);
11105                 }
11106             }
11107 
11108             dataptr = data;
11109         }
11110 
11111         if (num == TARGET_NR_capget) {
11112             ret = get_errno(capget(&header, dataptr));
11113         } else {
11114             ret = get_errno(capset(&header, dataptr));
11115         }
11116 
11117         /* The kernel always updates version for both capget and capset */
11118         target_header->version = tswap32(header.version);
11119         unlock_user_struct(target_header, arg1, 1);
11120 
11121         if (arg2) {
11122             if (num == TARGET_NR_capget) {
11123                 for (i = 0; i < data_items; i++) {
11124                     target_data[i].effective = tswap32(data[i].effective);
11125                     target_data[i].permitted = tswap32(data[i].permitted);
11126                     target_data[i].inheritable = tswap32(data[i].inheritable);
11127                 }
11128                 unlock_user(target_data, arg2, target_datalen);
11129             } else {
11130                 unlock_user(target_data, arg2, 0);
11131             }
11132         }
11133         break;
11134     }
11135     case TARGET_NR_sigaltstack:
11136         ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
11137         break;
11138 
11139 #ifdef CONFIG_SENDFILE
11140 #ifdef TARGET_NR_sendfile
11141     case TARGET_NR_sendfile:
11142     {
11143         off_t *offp = NULL;
11144         off_t off;
11145         if (arg3) {
11146             ret = get_user_sal(off, arg3);
11147             if (is_error(ret)) {
11148                 break;
11149             }
11150             offp = &off;
11151         }
11152         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11153         if (!is_error(ret) && arg3) {
11154             abi_long ret2 = put_user_sal(off, arg3);
11155             if (is_error(ret2)) {
11156                 ret = ret2;
11157             }
11158         }
11159         break;
11160     }
11161 #endif
11162 #ifdef TARGET_NR_sendfile64
11163     case TARGET_NR_sendfile64:
11164     {
11165         off_t *offp = NULL;
11166         off_t off;
11167         if (arg3) {
11168             ret = get_user_s64(off, arg3);
11169             if (is_error(ret)) {
11170                 break;
11171             }
11172             offp = &off;
11173         }
11174         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11175         if (!is_error(ret) && arg3) {
11176             abi_long ret2 = put_user_s64(off, arg3);
11177             if (is_error(ret2)) {
11178                 ret = ret2;
11179             }
11180         }
11181         break;
11182     }
11183 #endif
11184 #else
11185     case TARGET_NR_sendfile:
11186 #ifdef TARGET_NR_sendfile64
11187     case TARGET_NR_sendfile64:
11188 #endif
11189         goto unimplemented;
11190 #endif
11191 
11192 #ifdef TARGET_NR_getpmsg
11193     case TARGET_NR_getpmsg:
11194         goto unimplemented;
11195 #endif
11196 #ifdef TARGET_NR_putpmsg
11197     case TARGET_NR_putpmsg:
11198         goto unimplemented;
11199 #endif
11200 #ifdef TARGET_NR_vfork
11201     case TARGET_NR_vfork:
11202         ret = get_errno(do_fork(cpu_env,
11203                         CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11204                         0, 0, 0, 0));
11205         break;
11206 #endif
11207 #ifdef TARGET_NR_ugetrlimit
11208     case TARGET_NR_ugetrlimit:
11209     {
11210 	struct rlimit rlim;
11211 	int resource = target_to_host_resource(arg1);
11212 	ret = get_errno(getrlimit(resource, &rlim));
11213 	if (!is_error(ret)) {
11214 	    struct target_rlimit *target_rlim;
11215             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11216                 goto efault;
11217 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11218 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11219             unlock_user_struct(target_rlim, arg2, 1);
11220 	}
11221 	break;
11222     }
11223 #endif
11224 #ifdef TARGET_NR_truncate64
11225     case TARGET_NR_truncate64:
11226         if (!(p = lock_user_string(arg1)))
11227             goto efault;
11228 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11229         unlock_user(p, arg1, 0);
11230 	break;
11231 #endif
11232 #ifdef TARGET_NR_ftruncate64
11233     case TARGET_NR_ftruncate64:
11234 	ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11235 	break;
11236 #endif
11237 #ifdef TARGET_NR_stat64
11238     case TARGET_NR_stat64:
11239         if (!(p = lock_user_string(arg1)))
11240             goto efault;
11241         ret = get_errno(stat(path(p), &st));
11242         unlock_user(p, arg1, 0);
11243         if (!is_error(ret))
11244             ret = host_to_target_stat64(cpu_env, arg2, &st);
11245         break;
11246 #endif
11247 #ifdef TARGET_NR_lstat64
11248     case TARGET_NR_lstat64:
11249         if (!(p = lock_user_string(arg1)))
11250             goto efault;
11251         ret = get_errno(lstat(path(p), &st));
11252         unlock_user(p, arg1, 0);
11253         if (!is_error(ret))
11254             ret = host_to_target_stat64(cpu_env, arg2, &st);
11255         break;
11256 #endif
11257 #ifdef TARGET_NR_fstat64
11258     case TARGET_NR_fstat64:
11259         ret = get_errno(fstat(arg1, &st));
11260         if (!is_error(ret))
11261             ret = host_to_target_stat64(cpu_env, arg2, &st);
11262         break;
11263 #endif
11264 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11265 #ifdef TARGET_NR_fstatat64
11266     case TARGET_NR_fstatat64:
11267 #endif
11268 #ifdef TARGET_NR_newfstatat
11269     case TARGET_NR_newfstatat:
11270 #endif
11271         if (!(p = lock_user_string(arg2)))
11272             goto efault;
11273         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11274         if (!is_error(ret))
11275             ret = host_to_target_stat64(cpu_env, arg3, &st);
11276         break;
11277 #endif
11278 #ifdef TARGET_NR_lchown
11279     case TARGET_NR_lchown:
11280         if (!(p = lock_user_string(arg1)))
11281             goto efault;
11282         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11283         unlock_user(p, arg1, 0);
11284         break;
11285 #endif
11286 #ifdef TARGET_NR_getuid
11287     case TARGET_NR_getuid:
11288         ret = get_errno(high2lowuid(getuid()));
11289         break;
11290 #endif
11291 #ifdef TARGET_NR_getgid
11292     case TARGET_NR_getgid:
11293         ret = get_errno(high2lowgid(getgid()));
11294         break;
11295 #endif
11296 #ifdef TARGET_NR_geteuid
11297     case TARGET_NR_geteuid:
11298         ret = get_errno(high2lowuid(geteuid()));
11299         break;
11300 #endif
11301 #ifdef TARGET_NR_getegid
11302     case TARGET_NR_getegid:
11303         ret = get_errno(high2lowgid(getegid()));
11304         break;
11305 #endif
11306     case TARGET_NR_setreuid:
11307         ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11308         break;
11309     case TARGET_NR_setregid:
11310         ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11311         break;
11312     case TARGET_NR_getgroups:
11313         {
11314             int gidsetsize = arg1;
11315             target_id *target_grouplist;
11316             gid_t *grouplist;
11317             int i;
11318 
11319             grouplist = alloca(gidsetsize * sizeof(gid_t));
11320             ret = get_errno(getgroups(gidsetsize, grouplist));
11321             if (gidsetsize == 0)
11322                 break;
11323             if (!is_error(ret)) {
11324                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11325                 if (!target_grouplist)
11326                     goto efault;
11327                 for(i = 0;i < ret; i++)
11328                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11329                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11330             }
11331         }
11332         break;
11333     case TARGET_NR_setgroups:
11334         {
11335             int gidsetsize = arg1;
11336             target_id *target_grouplist;
11337             gid_t *grouplist = NULL;
11338             int i;
11339             if (gidsetsize) {
11340                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11341                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11342                 if (!target_grouplist) {
11343                     ret = -TARGET_EFAULT;
11344                     goto fail;
11345                 }
11346                 for (i = 0; i < gidsetsize; i++) {
11347                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11348                 }
11349                 unlock_user(target_grouplist, arg2, 0);
11350             }
11351             ret = get_errno(setgroups(gidsetsize, grouplist));
11352         }
11353         break;
11354     case TARGET_NR_fchown:
11355         ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11356         break;
11357 #if defined(TARGET_NR_fchownat)
11358     case TARGET_NR_fchownat:
11359         if (!(p = lock_user_string(arg2)))
11360             goto efault;
11361         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11362                                  low2highgid(arg4), arg5));
11363         unlock_user(p, arg2, 0);
11364         break;
11365 #endif
11366 #ifdef TARGET_NR_setresuid
11367     case TARGET_NR_setresuid:
11368         ret = get_errno(sys_setresuid(low2highuid(arg1),
11369                                       low2highuid(arg2),
11370                                       low2highuid(arg3)));
11371         break;
11372 #endif
11373 #ifdef TARGET_NR_getresuid
11374     case TARGET_NR_getresuid:
11375         {
11376             uid_t ruid, euid, suid;
11377             ret = get_errno(getresuid(&ruid, &euid, &suid));
11378             if (!is_error(ret)) {
11379                 if (put_user_id(high2lowuid(ruid), arg1)
11380                     || put_user_id(high2lowuid(euid), arg2)
11381                     || put_user_id(high2lowuid(suid), arg3))
11382                     goto efault;
11383             }
11384         }
11385         break;
11386 #endif
11387 #ifdef TARGET_NR_getresgid
11388     case TARGET_NR_setresgid:
11389         ret = get_errno(sys_setresgid(low2highgid(arg1),
11390                                       low2highgid(arg2),
11391                                       low2highgid(arg3)));
11392         break;
11393 #endif
11394 #ifdef TARGET_NR_getresgid
11395     case TARGET_NR_getresgid:
11396         {
11397             gid_t rgid, egid, sgid;
11398             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11399             if (!is_error(ret)) {
11400                 if (put_user_id(high2lowgid(rgid), arg1)
11401                     || put_user_id(high2lowgid(egid), arg2)
11402                     || put_user_id(high2lowgid(sgid), arg3))
11403                     goto efault;
11404             }
11405         }
11406         break;
11407 #endif
11408 #ifdef TARGET_NR_chown
11409     case TARGET_NR_chown:
11410         if (!(p = lock_user_string(arg1)))
11411             goto efault;
11412         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11413         unlock_user(p, arg1, 0);
11414         break;
11415 #endif
11416     case TARGET_NR_setuid:
11417         ret = get_errno(sys_setuid(low2highuid(arg1)));
11418         break;
11419     case TARGET_NR_setgid:
11420         ret = get_errno(sys_setgid(low2highgid(arg1)));
11421         break;
11422     case TARGET_NR_setfsuid:
11423         ret = get_errno(setfsuid(arg1));
11424         break;
11425     case TARGET_NR_setfsgid:
11426         ret = get_errno(setfsgid(arg1));
11427         break;
11428 
11429 #ifdef TARGET_NR_lchown32
11430     case TARGET_NR_lchown32:
11431         if (!(p = lock_user_string(arg1)))
11432             goto efault;
11433         ret = get_errno(lchown(p, arg2, arg3));
11434         unlock_user(p, arg1, 0);
11435         break;
11436 #endif
11437 #ifdef TARGET_NR_getuid32
11438     case TARGET_NR_getuid32:
11439         ret = get_errno(getuid());
11440         break;
11441 #endif
11442 
11443 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11444    /* Alpha specific */
11445     case TARGET_NR_getxuid:
11446          {
11447             uid_t euid;
11448             euid=geteuid();
11449             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11450          }
11451         ret = get_errno(getuid());
11452         break;
11453 #endif
11454 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11455    /* Alpha specific */
11456     case TARGET_NR_getxgid:
11457          {
11458             uid_t egid;
11459             egid=getegid();
11460             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11461          }
11462         ret = get_errno(getgid());
11463         break;
11464 #endif
11465 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11466     /* Alpha specific */
11467     case TARGET_NR_osf_getsysinfo:
11468         ret = -TARGET_EOPNOTSUPP;
11469         switch (arg1) {
11470           case TARGET_GSI_IEEE_FP_CONTROL:
11471             {
11472                 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11473 
11474                 /* Copied from linux ieee_fpcr_to_swcr.  */
11475                 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11476                 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11477                 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11478                                         | SWCR_TRAP_ENABLE_DZE
11479                                         | SWCR_TRAP_ENABLE_OVF);
11480                 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11481                                         | SWCR_TRAP_ENABLE_INE);
11482                 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11483                 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11484 
11485                 if (put_user_u64 (swcr, arg2))
11486                         goto efault;
11487                 ret = 0;
11488             }
11489             break;
11490 
11491           /* case GSI_IEEE_STATE_AT_SIGNAL:
11492              -- Not implemented in linux kernel.
11493              case GSI_UACPROC:
11494              -- Retrieves current unaligned access state; not much used.
11495              case GSI_PROC_TYPE:
11496              -- Retrieves implver information; surely not used.
11497              case GSI_GET_HWRPB:
11498              -- Grabs a copy of the HWRPB; surely not used.
11499           */
11500         }
11501         break;
11502 #endif
11503 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11504     /* Alpha specific */
11505     case TARGET_NR_osf_setsysinfo:
11506         ret = -TARGET_EOPNOTSUPP;
11507         switch (arg1) {
11508           case TARGET_SSI_IEEE_FP_CONTROL:
11509             {
11510                 uint64_t swcr, fpcr, orig_fpcr;
11511 
11512                 if (get_user_u64 (swcr, arg2)) {
11513                     goto efault;
11514                 }
11515                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11516                 fpcr = orig_fpcr & FPCR_DYN_MASK;
11517 
11518                 /* Copied from linux ieee_swcr_to_fpcr.  */
11519                 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11520                 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11521                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11522                                   | SWCR_TRAP_ENABLE_DZE
11523                                   | SWCR_TRAP_ENABLE_OVF)) << 48;
11524                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11525                                   | SWCR_TRAP_ENABLE_INE)) << 57;
11526                 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11527                 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11528 
11529                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11530                 ret = 0;
11531             }
11532             break;
11533 
11534           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11535             {
11536                 uint64_t exc, fpcr, orig_fpcr;
11537                 int si_code;
11538 
11539                 if (get_user_u64(exc, arg2)) {
11540                     goto efault;
11541                 }
11542 
11543                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11544 
11545                 /* We only add to the exception status here.  */
11546                 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11547 
11548                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11549                 ret = 0;
11550 
11551                 /* Old exceptions are not signaled.  */
11552                 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11553 
11554                 /* If any exceptions set by this call,
11555                    and are unmasked, send a signal.  */
11556                 si_code = 0;
11557                 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11558                     si_code = TARGET_FPE_FLTRES;
11559                 }
11560                 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11561                     si_code = TARGET_FPE_FLTUND;
11562                 }
11563                 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11564                     si_code = TARGET_FPE_FLTOVF;
11565                 }
11566                 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11567                     si_code = TARGET_FPE_FLTDIV;
11568                 }
11569                 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11570                     si_code = TARGET_FPE_FLTINV;
11571                 }
11572                 if (si_code != 0) {
11573                     target_siginfo_t info;
11574                     info.si_signo = SIGFPE;
11575                     info.si_errno = 0;
11576                     info.si_code = si_code;
11577                     info._sifields._sigfault._addr
11578                         = ((CPUArchState *)cpu_env)->pc;
11579                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11580                                  QEMU_SI_FAULT, &info);
11581                 }
11582             }
11583             break;
11584 
11585           /* case SSI_NVPAIRS:
11586              -- Used with SSIN_UACPROC to enable unaligned accesses.
11587              case SSI_IEEE_STATE_AT_SIGNAL:
11588              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11589              -- Not implemented in linux kernel
11590           */
11591         }
11592         break;
11593 #endif
11594 #ifdef TARGET_NR_osf_sigprocmask
11595     /* Alpha specific.  */
11596     case TARGET_NR_osf_sigprocmask:
11597         {
11598             abi_ulong mask;
11599             int how;
11600             sigset_t set, oldset;
11601 
11602             switch(arg1) {
11603             case TARGET_SIG_BLOCK:
11604                 how = SIG_BLOCK;
11605                 break;
11606             case TARGET_SIG_UNBLOCK:
11607                 how = SIG_UNBLOCK;
11608                 break;
11609             case TARGET_SIG_SETMASK:
11610                 how = SIG_SETMASK;
11611                 break;
11612             default:
11613                 ret = -TARGET_EINVAL;
11614                 goto fail;
11615             }
11616             mask = arg2;
11617             target_to_host_old_sigset(&set, &mask);
11618             ret = do_sigprocmask(how, &set, &oldset);
11619             if (!ret) {
11620                 host_to_target_old_sigset(&mask, &oldset);
11621                 ret = mask;
11622             }
11623         }
11624         break;
11625 #endif
11626 
11627 #ifdef TARGET_NR_getgid32
11628     case TARGET_NR_getgid32:
11629         ret = get_errno(getgid());
11630         break;
11631 #endif
11632 #ifdef TARGET_NR_geteuid32
11633     case TARGET_NR_geteuid32:
11634         ret = get_errno(geteuid());
11635         break;
11636 #endif
11637 #ifdef TARGET_NR_getegid32
11638     case TARGET_NR_getegid32:
11639         ret = get_errno(getegid());
11640         break;
11641 #endif
11642 #ifdef TARGET_NR_setreuid32
11643     case TARGET_NR_setreuid32:
11644         ret = get_errno(setreuid(arg1, arg2));
11645         break;
11646 #endif
11647 #ifdef TARGET_NR_setregid32
11648     case TARGET_NR_setregid32:
11649         ret = get_errno(setregid(arg1, arg2));
11650         break;
11651 #endif
11652 #ifdef TARGET_NR_getgroups32
11653     case TARGET_NR_getgroups32:
11654         {
11655             int gidsetsize = arg1;
11656             uint32_t *target_grouplist;
11657             gid_t *grouplist;
11658             int i;
11659 
11660             grouplist = alloca(gidsetsize * sizeof(gid_t));
11661             ret = get_errno(getgroups(gidsetsize, grouplist));
11662             if (gidsetsize == 0)
11663                 break;
11664             if (!is_error(ret)) {
11665                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11666                 if (!target_grouplist) {
11667                     ret = -TARGET_EFAULT;
11668                     goto fail;
11669                 }
11670                 for(i = 0;i < ret; i++)
11671                     target_grouplist[i] = tswap32(grouplist[i]);
11672                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11673             }
11674         }
11675         break;
11676 #endif
11677 #ifdef TARGET_NR_setgroups32
11678     case TARGET_NR_setgroups32:
11679         {
11680             int gidsetsize = arg1;
11681             uint32_t *target_grouplist;
11682             gid_t *grouplist;
11683             int i;
11684 
11685             grouplist = alloca(gidsetsize * sizeof(gid_t));
11686             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11687             if (!target_grouplist) {
11688                 ret = -TARGET_EFAULT;
11689                 goto fail;
11690             }
11691             for(i = 0;i < gidsetsize; i++)
11692                 grouplist[i] = tswap32(target_grouplist[i]);
11693             unlock_user(target_grouplist, arg2, 0);
11694             ret = get_errno(setgroups(gidsetsize, grouplist));
11695         }
11696         break;
11697 #endif
11698 #ifdef TARGET_NR_fchown32
11699     case TARGET_NR_fchown32:
11700         ret = get_errno(fchown(arg1, arg2, arg3));
11701         break;
11702 #endif
11703 #ifdef TARGET_NR_setresuid32
11704     case TARGET_NR_setresuid32:
11705         ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11706         break;
11707 #endif
11708 #ifdef TARGET_NR_getresuid32
11709     case TARGET_NR_getresuid32:
11710         {
11711             uid_t ruid, euid, suid;
11712             ret = get_errno(getresuid(&ruid, &euid, &suid));
11713             if (!is_error(ret)) {
11714                 if (put_user_u32(ruid, arg1)
11715                     || put_user_u32(euid, arg2)
11716                     || put_user_u32(suid, arg3))
11717                     goto efault;
11718             }
11719         }
11720         break;
11721 #endif
11722 #ifdef TARGET_NR_setresgid32
11723     case TARGET_NR_setresgid32:
11724         ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11725         break;
11726 #endif
11727 #ifdef TARGET_NR_getresgid32
11728     case TARGET_NR_getresgid32:
11729         {
11730             gid_t rgid, egid, sgid;
11731             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11732             if (!is_error(ret)) {
11733                 if (put_user_u32(rgid, arg1)
11734                     || put_user_u32(egid, arg2)
11735                     || put_user_u32(sgid, arg3))
11736                     goto efault;
11737             }
11738         }
11739         break;
11740 #endif
11741 #ifdef TARGET_NR_chown32
11742     case TARGET_NR_chown32:
11743         if (!(p = lock_user_string(arg1)))
11744             goto efault;
11745         ret = get_errno(chown(p, arg2, arg3));
11746         unlock_user(p, arg1, 0);
11747         break;
11748 #endif
11749 #ifdef TARGET_NR_setuid32
11750     case TARGET_NR_setuid32:
11751         ret = get_errno(sys_setuid(arg1));
11752         break;
11753 #endif
11754 #ifdef TARGET_NR_setgid32
11755     case TARGET_NR_setgid32:
11756         ret = get_errno(sys_setgid(arg1));
11757         break;
11758 #endif
11759 #ifdef TARGET_NR_setfsuid32
11760     case TARGET_NR_setfsuid32:
11761         ret = get_errno(setfsuid(arg1));
11762         break;
11763 #endif
11764 #ifdef TARGET_NR_setfsgid32
11765     case TARGET_NR_setfsgid32:
11766         ret = get_errno(setfsgid(arg1));
11767         break;
11768 #endif
11769 
11770     case TARGET_NR_pivot_root:
11771         goto unimplemented;
11772 #ifdef TARGET_NR_mincore
11773     case TARGET_NR_mincore:
11774         {
11775             void *a;
11776             ret = -TARGET_ENOMEM;
11777             a = lock_user(VERIFY_READ, arg1, arg2, 0);
11778             if (!a) {
11779                 goto fail;
11780             }
11781             ret = -TARGET_EFAULT;
11782             p = lock_user_string(arg3);
11783             if (!p) {
11784                 goto mincore_fail;
11785             }
11786             ret = get_errno(mincore(a, arg2, p));
11787             unlock_user(p, arg3, ret);
11788             mincore_fail:
11789             unlock_user(a, arg1, 0);
11790         }
11791         break;
11792 #endif
11793 #ifdef TARGET_NR_arm_fadvise64_64
11794     case TARGET_NR_arm_fadvise64_64:
11795         /* arm_fadvise64_64 looks like fadvise64_64 but
11796          * with different argument order: fd, advice, offset, len
11797          * rather than the usual fd, offset, len, advice.
11798          * Note that offset and len are both 64-bit so appear as
11799          * pairs of 32-bit registers.
11800          */
11801         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11802                             target_offset64(arg5, arg6), arg2);
11803         ret = -host_to_target_errno(ret);
11804         break;
11805 #endif
11806 
11807 #if TARGET_ABI_BITS == 32
11808 
11809 #ifdef TARGET_NR_fadvise64_64
11810     case TARGET_NR_fadvise64_64:
11811 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11812         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11813         ret = arg2;
11814         arg2 = arg3;
11815         arg3 = arg4;
11816         arg4 = arg5;
11817         arg5 = arg6;
11818         arg6 = ret;
11819 #else
11820         /* 6 args: fd, offset (high, low), len (high, low), advice */
11821         if (regpairs_aligned(cpu_env, num)) {
11822             /* offset is in (3,4), len in (5,6) and advice in 7 */
11823             arg2 = arg3;
11824             arg3 = arg4;
11825             arg4 = arg5;
11826             arg5 = arg6;
11827             arg6 = arg7;
11828         }
11829 #endif
11830         ret = -host_to_target_errno(posix_fadvise(arg1,
11831                                                   target_offset64(arg2, arg3),
11832                                                   target_offset64(arg4, arg5),
11833                                                   arg6));
11834         break;
11835 #endif
11836 
11837 #ifdef TARGET_NR_fadvise64
11838     case TARGET_NR_fadvise64:
11839         /* 5 args: fd, offset (high, low), len, advice */
11840         if (regpairs_aligned(cpu_env, num)) {
11841             /* offset is in (3,4), len in 5 and advice in 6 */
11842             arg2 = arg3;
11843             arg3 = arg4;
11844             arg4 = arg5;
11845             arg5 = arg6;
11846         }
11847         ret = -host_to_target_errno(posix_fadvise(arg1,
11848                                                   target_offset64(arg2, arg3),
11849                                                   arg4, arg5));
11850         break;
11851 #endif
11852 
11853 #else /* not a 32-bit ABI */
11854 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11855 #ifdef TARGET_NR_fadvise64_64
11856     case TARGET_NR_fadvise64_64:
11857 #endif
11858 #ifdef TARGET_NR_fadvise64
11859     case TARGET_NR_fadvise64:
11860 #endif
11861 #ifdef TARGET_S390X
11862         switch (arg4) {
11863         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11864         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11865         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11866         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11867         default: break;
11868         }
11869 #endif
11870         ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11871         break;
11872 #endif
11873 #endif /* end of 64-bit ABI fadvise handling */
11874 
11875 #ifdef TARGET_NR_madvise
11876     case TARGET_NR_madvise:
11877         /* A straight passthrough may not be safe because qemu sometimes
11878            turns private file-backed mappings into anonymous mappings.
11879            This will break MADV_DONTNEED.
11880            This is a hint, so ignoring and returning success is ok.  */
11881         ret = get_errno(0);
11882         break;
11883 #endif
11884 #if TARGET_ABI_BITS == 32
11885     case TARGET_NR_fcntl64:
11886     {
11887 	int cmd;
11888 	struct flock64 fl;
11889         from_flock64_fn *copyfrom = copy_from_user_flock64;
11890         to_flock64_fn *copyto = copy_to_user_flock64;
11891 
11892 #ifdef TARGET_ARM
11893         if (!((CPUARMState *)cpu_env)->eabi) {
11894             copyfrom = copy_from_user_oabi_flock64;
11895             copyto = copy_to_user_oabi_flock64;
11896         }
11897 #endif
11898 
11899 	cmd = target_to_host_fcntl_cmd(arg2);
11900         if (cmd == -TARGET_EINVAL) {
11901             ret = cmd;
11902             break;
11903         }
11904 
11905         switch(arg2) {
11906         case TARGET_F_GETLK64:
11907             ret = copyfrom(&fl, arg3);
11908             if (ret) {
11909                 break;
11910             }
11911             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11912             if (ret == 0) {
11913                 ret = copyto(arg3, &fl);
11914             }
11915 	    break;
11916 
11917         case TARGET_F_SETLK64:
11918         case TARGET_F_SETLKW64:
11919             ret = copyfrom(&fl, arg3);
11920             if (ret) {
11921                 break;
11922             }
11923             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11924 	    break;
11925         default:
11926             ret = do_fcntl(arg1, arg2, arg3);
11927             break;
11928         }
11929 	break;
11930     }
11931 #endif
11932 #ifdef TARGET_NR_cacheflush
11933     case TARGET_NR_cacheflush:
11934         /* self-modifying code is handled automatically, so nothing needed */
11935         ret = 0;
11936         break;
11937 #endif
11938 #ifdef TARGET_NR_security
11939     case TARGET_NR_security:
11940         goto unimplemented;
11941 #endif
11942 #ifdef TARGET_NR_getpagesize
11943     case TARGET_NR_getpagesize:
11944         ret = TARGET_PAGE_SIZE;
11945         break;
11946 #endif
11947     case TARGET_NR_gettid:
11948         ret = get_errno(gettid());
11949         break;
11950 #ifdef TARGET_NR_readahead
11951     case TARGET_NR_readahead:
11952 #if TARGET_ABI_BITS == 32
11953         if (regpairs_aligned(cpu_env, num)) {
11954             arg2 = arg3;
11955             arg3 = arg4;
11956             arg4 = arg5;
11957         }
11958         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11959 #else
11960         ret = get_errno(readahead(arg1, arg2, arg3));
11961 #endif
11962         break;
11963 #endif
11964 #ifdef CONFIG_ATTR
11965 #ifdef TARGET_NR_setxattr
11966     case TARGET_NR_listxattr:
11967     case TARGET_NR_llistxattr:
11968     {
11969         void *p, *b = 0;
11970         if (arg2) {
11971             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11972             if (!b) {
11973                 ret = -TARGET_EFAULT;
11974                 break;
11975             }
11976         }
11977         p = lock_user_string(arg1);
11978         if (p) {
11979             if (num == TARGET_NR_listxattr) {
11980                 ret = get_errno(listxattr(p, b, arg3));
11981             } else {
11982                 ret = get_errno(llistxattr(p, b, arg3));
11983             }
11984         } else {
11985             ret = -TARGET_EFAULT;
11986         }
11987         unlock_user(p, arg1, 0);
11988         unlock_user(b, arg2, arg3);
11989         break;
11990     }
11991     case TARGET_NR_flistxattr:
11992     {
11993         void *b = 0;
11994         if (arg2) {
11995             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11996             if (!b) {
11997                 ret = -TARGET_EFAULT;
11998                 break;
11999             }
12000         }
12001         ret = get_errno(flistxattr(arg1, b, arg3));
12002         unlock_user(b, arg2, arg3);
12003         break;
12004     }
12005     case TARGET_NR_setxattr:
12006     case TARGET_NR_lsetxattr:
12007         {
12008             void *p, *n, *v = 0;
12009             if (arg3) {
12010                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12011                 if (!v) {
12012                     ret = -TARGET_EFAULT;
12013                     break;
12014                 }
12015             }
12016             p = lock_user_string(arg1);
12017             n = lock_user_string(arg2);
12018             if (p && n) {
12019                 if (num == TARGET_NR_setxattr) {
12020                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12021                 } else {
12022                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12023                 }
12024             } else {
12025                 ret = -TARGET_EFAULT;
12026             }
12027             unlock_user(p, arg1, 0);
12028             unlock_user(n, arg2, 0);
12029             unlock_user(v, arg3, 0);
12030         }
12031         break;
12032     case TARGET_NR_fsetxattr:
12033         {
12034             void *n, *v = 0;
12035             if (arg3) {
12036                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12037                 if (!v) {
12038                     ret = -TARGET_EFAULT;
12039                     break;
12040                 }
12041             }
12042             n = lock_user_string(arg2);
12043             if (n) {
12044                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12045             } else {
12046                 ret = -TARGET_EFAULT;
12047             }
12048             unlock_user(n, arg2, 0);
12049             unlock_user(v, arg3, 0);
12050         }
12051         break;
12052     case TARGET_NR_getxattr:
12053     case TARGET_NR_lgetxattr:
12054         {
12055             void *p, *n, *v = 0;
12056             if (arg3) {
12057                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12058                 if (!v) {
12059                     ret = -TARGET_EFAULT;
12060                     break;
12061                 }
12062             }
12063             p = lock_user_string(arg1);
12064             n = lock_user_string(arg2);
12065             if (p && n) {
12066                 if (num == TARGET_NR_getxattr) {
12067                     ret = get_errno(getxattr(p, n, v, arg4));
12068                 } else {
12069                     ret = get_errno(lgetxattr(p, n, v, arg4));
12070                 }
12071             } else {
12072                 ret = -TARGET_EFAULT;
12073             }
12074             unlock_user(p, arg1, 0);
12075             unlock_user(n, arg2, 0);
12076             unlock_user(v, arg3, arg4);
12077         }
12078         break;
12079     case TARGET_NR_fgetxattr:
12080         {
12081             void *n, *v = 0;
12082             if (arg3) {
12083                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12084                 if (!v) {
12085                     ret = -TARGET_EFAULT;
12086                     break;
12087                 }
12088             }
12089             n = lock_user_string(arg2);
12090             if (n) {
12091                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12092             } else {
12093                 ret = -TARGET_EFAULT;
12094             }
12095             unlock_user(n, arg2, 0);
12096             unlock_user(v, arg3, arg4);
12097         }
12098         break;
12099     case TARGET_NR_removexattr:
12100     case TARGET_NR_lremovexattr:
12101         {
12102             void *p, *n;
12103             p = lock_user_string(arg1);
12104             n = lock_user_string(arg2);
12105             if (p && n) {
12106                 if (num == TARGET_NR_removexattr) {
12107                     ret = get_errno(removexattr(p, n));
12108                 } else {
12109                     ret = get_errno(lremovexattr(p, n));
12110                 }
12111             } else {
12112                 ret = -TARGET_EFAULT;
12113             }
12114             unlock_user(p, arg1, 0);
12115             unlock_user(n, arg2, 0);
12116         }
12117         break;
12118     case TARGET_NR_fremovexattr:
12119         {
12120             void *n;
12121             n = lock_user_string(arg2);
12122             if (n) {
12123                 ret = get_errno(fremovexattr(arg1, n));
12124             } else {
12125                 ret = -TARGET_EFAULT;
12126             }
12127             unlock_user(n, arg2, 0);
12128         }
12129         break;
12130 #endif
12131 #endif /* CONFIG_ATTR */
12132 #ifdef TARGET_NR_set_thread_area
12133     case TARGET_NR_set_thread_area:
12134 #if defined(TARGET_MIPS)
12135       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12136       ret = 0;
12137       break;
12138 #elif defined(TARGET_CRIS)
12139       if (arg1 & 0xff)
12140           ret = -TARGET_EINVAL;
12141       else {
12142           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12143           ret = 0;
12144       }
12145       break;
12146 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12147       ret = do_set_thread_area(cpu_env, arg1);
12148       break;
12149 #elif defined(TARGET_M68K)
12150       {
12151           TaskState *ts = cpu->opaque;
12152           ts->tp_value = arg1;
12153           ret = 0;
12154           break;
12155       }
12156 #else
12157       goto unimplemented_nowarn;
12158 #endif
12159 #endif
12160 #ifdef TARGET_NR_get_thread_area
12161     case TARGET_NR_get_thread_area:
12162 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12163         ret = do_get_thread_area(cpu_env, arg1);
12164         break;
12165 #elif defined(TARGET_M68K)
12166         {
12167             TaskState *ts = cpu->opaque;
12168             ret = ts->tp_value;
12169             break;
12170         }
12171 #else
12172         goto unimplemented_nowarn;
12173 #endif
12174 #endif
12175 #ifdef TARGET_NR_getdomainname
12176     case TARGET_NR_getdomainname:
12177         goto unimplemented_nowarn;
12178 #endif
12179 
12180 #ifdef TARGET_NR_clock_settime
12181     case TARGET_NR_clock_settime:
12182     {
12183         struct timespec ts;
12184 
12185         ret = target_to_host_timespec(&ts, arg2);
12186         if (!is_error(ret)) {
12187             ret = get_errno(clock_settime(arg1, &ts));
12188         }
12189         break;
12190     }
12191 #endif
12192 #ifdef TARGET_NR_clock_gettime
12193     case TARGET_NR_clock_gettime:
12194     {
12195         struct timespec ts;
12196         ret = get_errno(clock_gettime(arg1, &ts));
12197         if (!is_error(ret)) {
12198             ret = host_to_target_timespec(arg2, &ts);
12199         }
12200         break;
12201     }
12202 #endif
12203 #ifdef TARGET_NR_clock_getres
12204     case TARGET_NR_clock_getres:
12205     {
12206         struct timespec ts;
12207         ret = get_errno(clock_getres(arg1, &ts));
12208         if (!is_error(ret)) {
12209             host_to_target_timespec(arg2, &ts);
12210         }
12211         break;
12212     }
12213 #endif
12214 #ifdef TARGET_NR_clock_nanosleep
12215     case TARGET_NR_clock_nanosleep:
12216     {
12217         struct timespec ts;
12218         target_to_host_timespec(&ts, arg3);
12219         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12220                                              &ts, arg4 ? &ts : NULL));
12221         if (arg4)
12222             host_to_target_timespec(arg4, &ts);
12223 
12224 #if defined(TARGET_PPC)
12225         /* clock_nanosleep is odd in that it returns positive errno values.
12226          * On PPC, CR0 bit 3 should be set in such a situation. */
12227         if (ret && ret != -TARGET_ERESTARTSYS) {
12228             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
12229         }
12230 #endif
12231         break;
12232     }
12233 #endif
12234 
12235 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12236     case TARGET_NR_set_tid_address:
12237         ret = get_errno(set_tid_address((int *)g2h(arg1)));
12238         break;
12239 #endif
12240 
12241     case TARGET_NR_tkill:
12242         ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12243         break;
12244 
12245     case TARGET_NR_tgkill:
12246         ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
12247                         target_to_host_signal(arg3)));
12248         break;
12249 
12250 #ifdef TARGET_NR_set_robust_list
12251     case TARGET_NR_set_robust_list:
12252     case TARGET_NR_get_robust_list:
12253         /* The ABI for supporting robust futexes has userspace pass
12254          * the kernel a pointer to a linked list which is updated by
12255          * userspace after the syscall; the list is walked by the kernel
12256          * when the thread exits. Since the linked list in QEMU guest
12257          * memory isn't a valid linked list for the host and we have
12258          * no way to reliably intercept the thread-death event, we can't
12259          * support these. Silently return ENOSYS so that guest userspace
12260          * falls back to a non-robust futex implementation (which should
12261          * be OK except in the corner case of the guest crashing while
12262          * holding a mutex that is shared with another process via
12263          * shared memory).
12264          */
12265         goto unimplemented_nowarn;
12266 #endif
12267 
12268 #if defined(TARGET_NR_utimensat)
12269     case TARGET_NR_utimensat:
12270         {
12271             struct timespec *tsp, ts[2];
12272             if (!arg3) {
12273                 tsp = NULL;
12274             } else {
12275                 target_to_host_timespec(ts, arg3);
12276                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
12277                 tsp = ts;
12278             }
12279             if (!arg2)
12280                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12281             else {
12282                 if (!(p = lock_user_string(arg2))) {
12283                     ret = -TARGET_EFAULT;
12284                     goto fail;
12285                 }
12286                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12287                 unlock_user(p, arg2, 0);
12288             }
12289         }
12290 	break;
12291 #endif
12292     case TARGET_NR_futex:
12293         ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12294         break;
12295 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12296     case TARGET_NR_inotify_init:
12297         ret = get_errno(sys_inotify_init());
12298         if (ret >= 0) {
12299             fd_trans_register(ret, &target_inotify_trans);
12300         }
12301         break;
12302 #endif
12303 #ifdef CONFIG_INOTIFY1
12304 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12305     case TARGET_NR_inotify_init1:
12306         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12307                                           fcntl_flags_tbl)));
12308         if (ret >= 0) {
12309             fd_trans_register(ret, &target_inotify_trans);
12310         }
12311         break;
12312 #endif
12313 #endif
12314 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12315     case TARGET_NR_inotify_add_watch:
12316         p = lock_user_string(arg2);
12317         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12318         unlock_user(p, arg2, 0);
12319         break;
12320 #endif
12321 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12322     case TARGET_NR_inotify_rm_watch:
12323         ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
12324         break;
12325 #endif
12326 
12327 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12328     case TARGET_NR_mq_open:
12329         {
12330             struct mq_attr posix_mq_attr;
12331             struct mq_attr *pposix_mq_attr;
12332             int host_flags;
12333 
12334             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12335             pposix_mq_attr = NULL;
12336             if (arg4) {
12337                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12338                     goto efault;
12339                 }
12340                 pposix_mq_attr = &posix_mq_attr;
12341             }
12342             p = lock_user_string(arg1 - 1);
12343             if (!p) {
12344                 goto efault;
12345             }
12346             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12347             unlock_user (p, arg1, 0);
12348         }
12349         break;
12350 
12351     case TARGET_NR_mq_unlink:
12352         p = lock_user_string(arg1 - 1);
12353         if (!p) {
12354             ret = -TARGET_EFAULT;
12355             break;
12356         }
12357         ret = get_errno(mq_unlink(p));
12358         unlock_user (p, arg1, 0);
12359         break;
12360 
12361     case TARGET_NR_mq_timedsend:
12362         {
12363             struct timespec ts;
12364 
12365             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12366             if (arg5 != 0) {
12367                 target_to_host_timespec(&ts, arg5);
12368                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12369                 host_to_target_timespec(arg5, &ts);
12370             } else {
12371                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12372             }
12373             unlock_user (p, arg2, arg3);
12374         }
12375         break;
12376 
12377     case TARGET_NR_mq_timedreceive:
12378         {
12379             struct timespec ts;
12380             unsigned int prio;
12381 
12382             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12383             if (arg5 != 0) {
12384                 target_to_host_timespec(&ts, arg5);
12385                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12386                                                      &prio, &ts));
12387                 host_to_target_timespec(arg5, &ts);
12388             } else {
12389                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12390                                                      &prio, NULL));
12391             }
12392             unlock_user (p, arg2, arg3);
12393             if (arg4 != 0)
12394                 put_user_u32(prio, arg4);
12395         }
12396         break;
12397 
12398     /* Not implemented for now... */
12399 /*     case TARGET_NR_mq_notify: */
12400 /*         break; */
12401 
12402     case TARGET_NR_mq_getsetattr:
12403         {
12404             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12405             ret = 0;
12406             if (arg2 != 0) {
12407                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12408                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12409                                            &posix_mq_attr_out));
12410             } else if (arg3 != 0) {
12411                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12412             }
12413             if (ret == 0 && arg3 != 0) {
12414                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12415             }
12416         }
12417         break;
12418 #endif
12419 
12420 #ifdef CONFIG_SPLICE
12421 #ifdef TARGET_NR_tee
12422     case TARGET_NR_tee:
12423         {
12424             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12425         }
12426         break;
12427 #endif
12428 #ifdef TARGET_NR_splice
12429     case TARGET_NR_splice:
12430         {
12431             loff_t loff_in, loff_out;
12432             loff_t *ploff_in = NULL, *ploff_out = NULL;
12433             if (arg2) {
12434                 if (get_user_u64(loff_in, arg2)) {
12435                     goto efault;
12436                 }
12437                 ploff_in = &loff_in;
12438             }
12439             if (arg4) {
12440                 if (get_user_u64(loff_out, arg4)) {
12441                     goto efault;
12442                 }
12443                 ploff_out = &loff_out;
12444             }
12445             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12446             if (arg2) {
12447                 if (put_user_u64(loff_in, arg2)) {
12448                     goto efault;
12449                 }
12450             }
12451             if (arg4) {
12452                 if (put_user_u64(loff_out, arg4)) {
12453                     goto efault;
12454                 }
12455             }
12456         }
12457         break;
12458 #endif
12459 #ifdef TARGET_NR_vmsplice
12460 	case TARGET_NR_vmsplice:
12461         {
12462             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12463             if (vec != NULL) {
12464                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12465                 unlock_iovec(vec, arg2, arg3, 0);
12466             } else {
12467                 ret = -host_to_target_errno(errno);
12468             }
12469         }
12470         break;
12471 #endif
12472 #endif /* CONFIG_SPLICE */
12473 #ifdef CONFIG_EVENTFD
12474 #if defined(TARGET_NR_eventfd)
12475     case TARGET_NR_eventfd:
12476         ret = get_errno(eventfd(arg1, 0));
12477         if (ret >= 0) {
12478             fd_trans_register(ret, &target_eventfd_trans);
12479         }
12480         break;
12481 #endif
12482 #if defined(TARGET_NR_eventfd2)
12483     case TARGET_NR_eventfd2:
12484     {
12485         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12486         if (arg2 & TARGET_O_NONBLOCK) {
12487             host_flags |= O_NONBLOCK;
12488         }
12489         if (arg2 & TARGET_O_CLOEXEC) {
12490             host_flags |= O_CLOEXEC;
12491         }
12492         ret = get_errno(eventfd(arg1, host_flags));
12493         if (ret >= 0) {
12494             fd_trans_register(ret, &target_eventfd_trans);
12495         }
12496         break;
12497     }
12498 #endif
12499 #endif /* CONFIG_EVENTFD  */
12500 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12501     case TARGET_NR_fallocate:
12502 #if TARGET_ABI_BITS == 32
12503         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12504                                   target_offset64(arg5, arg6)));
12505 #else
12506         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12507 #endif
12508         break;
12509 #endif
12510 #if defined(CONFIG_SYNC_FILE_RANGE)
12511 #if defined(TARGET_NR_sync_file_range)
12512     case TARGET_NR_sync_file_range:
12513 #if TARGET_ABI_BITS == 32
12514 #if defined(TARGET_MIPS)
12515         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12516                                         target_offset64(arg5, arg6), arg7));
12517 #else
12518         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12519                                         target_offset64(arg4, arg5), arg6));
12520 #endif /* !TARGET_MIPS */
12521 #else
12522         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12523 #endif
12524         break;
12525 #endif
12526 #if defined(TARGET_NR_sync_file_range2)
12527     case TARGET_NR_sync_file_range2:
12528         /* This is like sync_file_range but the arguments are reordered */
12529 #if TARGET_ABI_BITS == 32
12530         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12531                                         target_offset64(arg5, arg6), arg2));
12532 #else
12533         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12534 #endif
12535         break;
12536 #endif
12537 #endif
12538 #if defined(TARGET_NR_signalfd4)
12539     case TARGET_NR_signalfd4:
12540         ret = do_signalfd4(arg1, arg2, arg4);
12541         break;
12542 #endif
12543 #if defined(TARGET_NR_signalfd)
12544     case TARGET_NR_signalfd:
12545         ret = do_signalfd4(arg1, arg2, 0);
12546         break;
12547 #endif
12548 #if defined(CONFIG_EPOLL)
12549 #if defined(TARGET_NR_epoll_create)
12550     case TARGET_NR_epoll_create:
12551         ret = get_errno(epoll_create(arg1));
12552         break;
12553 #endif
12554 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12555     case TARGET_NR_epoll_create1:
12556         ret = get_errno(epoll_create1(arg1));
12557         break;
12558 #endif
12559 #if defined(TARGET_NR_epoll_ctl)
12560     case TARGET_NR_epoll_ctl:
12561     {
12562         struct epoll_event ep;
12563         struct epoll_event *epp = 0;
12564         if (arg4) {
12565             struct target_epoll_event *target_ep;
12566             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12567                 goto efault;
12568             }
12569             ep.events = tswap32(target_ep->events);
12570             /* The epoll_data_t union is just opaque data to the kernel,
12571              * so we transfer all 64 bits across and need not worry what
12572              * actual data type it is.
12573              */
12574             ep.data.u64 = tswap64(target_ep->data.u64);
12575             unlock_user_struct(target_ep, arg4, 0);
12576             epp = &ep;
12577         }
12578         ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12579         break;
12580     }
12581 #endif
12582 
12583 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12584 #if defined(TARGET_NR_epoll_wait)
12585     case TARGET_NR_epoll_wait:
12586 #endif
12587 #if defined(TARGET_NR_epoll_pwait)
12588     case TARGET_NR_epoll_pwait:
12589 #endif
12590     {
12591         struct target_epoll_event *target_ep;
12592         struct epoll_event *ep;
12593         int epfd = arg1;
12594         int maxevents = arg3;
12595         int timeout = arg4;
12596 
12597         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12598             ret = -TARGET_EINVAL;
12599             break;
12600         }
12601 
12602         target_ep = lock_user(VERIFY_WRITE, arg2,
12603                               maxevents * sizeof(struct target_epoll_event), 1);
12604         if (!target_ep) {
12605             goto efault;
12606         }
12607 
12608         ep = g_try_new(struct epoll_event, maxevents);
12609         if (!ep) {
12610             unlock_user(target_ep, arg2, 0);
12611             ret = -TARGET_ENOMEM;
12612             break;
12613         }
12614 
12615         switch (num) {
12616 #if defined(TARGET_NR_epoll_pwait)
12617         case TARGET_NR_epoll_pwait:
12618         {
12619             target_sigset_t *target_set;
12620             sigset_t _set, *set = &_set;
12621 
12622             if (arg5) {
12623                 if (arg6 != sizeof(target_sigset_t)) {
12624                     ret = -TARGET_EINVAL;
12625                     break;
12626                 }
12627 
12628                 target_set = lock_user(VERIFY_READ, arg5,
12629                                        sizeof(target_sigset_t), 1);
12630                 if (!target_set) {
12631                     ret = -TARGET_EFAULT;
12632                     break;
12633                 }
12634                 target_to_host_sigset(set, target_set);
12635                 unlock_user(target_set, arg5, 0);
12636             } else {
12637                 set = NULL;
12638             }
12639 
12640             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12641                                              set, SIGSET_T_SIZE));
12642             break;
12643         }
12644 #endif
12645 #if defined(TARGET_NR_epoll_wait)
12646         case TARGET_NR_epoll_wait:
12647             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12648                                              NULL, 0));
12649             break;
12650 #endif
12651         default:
12652             ret = -TARGET_ENOSYS;
12653         }
12654         if (!is_error(ret)) {
12655             int i;
12656             for (i = 0; i < ret; i++) {
12657                 target_ep[i].events = tswap32(ep[i].events);
12658                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12659             }
12660             unlock_user(target_ep, arg2,
12661                         ret * sizeof(struct target_epoll_event));
12662         } else {
12663             unlock_user(target_ep, arg2, 0);
12664         }
12665         g_free(ep);
12666         break;
12667     }
12668 #endif
12669 #endif
12670 #ifdef TARGET_NR_prlimit64
12671     case TARGET_NR_prlimit64:
12672     {
12673         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12674         struct target_rlimit64 *target_rnew, *target_rold;
12675         struct host_rlimit64 rnew, rold, *rnewp = 0;
12676         int resource = target_to_host_resource(arg2);
12677         if (arg3) {
12678             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12679                 goto efault;
12680             }
12681             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12682             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12683             unlock_user_struct(target_rnew, arg3, 0);
12684             rnewp = &rnew;
12685         }
12686 
12687         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12688         if (!is_error(ret) && arg4) {
12689             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12690                 goto efault;
12691             }
12692             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12693             target_rold->rlim_max = tswap64(rold.rlim_max);
12694             unlock_user_struct(target_rold, arg4, 1);
12695         }
12696         break;
12697     }
12698 #endif
12699 #ifdef TARGET_NR_gethostname
12700     case TARGET_NR_gethostname:
12701     {
12702         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12703         if (name) {
12704             ret = get_errno(gethostname(name, arg2));
12705             unlock_user(name, arg1, arg2);
12706         } else {
12707             ret = -TARGET_EFAULT;
12708         }
12709         break;
12710     }
12711 #endif
12712 #ifdef TARGET_NR_atomic_cmpxchg_32
12713     case TARGET_NR_atomic_cmpxchg_32:
12714     {
12715         /* should use start_exclusive from main.c */
12716         abi_ulong mem_value;
12717         if (get_user_u32(mem_value, arg6)) {
12718             target_siginfo_t info;
12719             info.si_signo = SIGSEGV;
12720             info.si_errno = 0;
12721             info.si_code = TARGET_SEGV_MAPERR;
12722             info._sifields._sigfault._addr = arg6;
12723             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12724                          QEMU_SI_FAULT, &info);
12725             ret = 0xdeadbeef;
12726 
12727         }
12728         if (mem_value == arg2)
12729             put_user_u32(arg1, arg6);
12730         ret = mem_value;
12731         break;
12732     }
12733 #endif
12734 #ifdef TARGET_NR_atomic_barrier
12735     case TARGET_NR_atomic_barrier:
12736     {
12737         /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12738         ret = 0;
12739         break;
12740     }
12741 #endif
12742 
12743 #ifdef TARGET_NR_timer_create
12744     case TARGET_NR_timer_create:
12745     {
12746         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12747 
12748         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12749 
12750         int clkid = arg1;
12751         int timer_index = next_free_host_timer();
12752 
12753         if (timer_index < 0) {
12754             ret = -TARGET_EAGAIN;
12755         } else {
12756             timer_t *phtimer = g_posix_timers  + timer_index;
12757 
12758             if (arg2) {
12759                 phost_sevp = &host_sevp;
12760                 ret = target_to_host_sigevent(phost_sevp, arg2);
12761                 if (ret != 0) {
12762                     break;
12763                 }
12764             }
12765 
12766             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12767             if (ret) {
12768                 phtimer = NULL;
12769             } else {
12770                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12771                     goto efault;
12772                 }
12773             }
12774         }
12775         break;
12776     }
12777 #endif
12778 
12779 #ifdef TARGET_NR_timer_settime
12780     case TARGET_NR_timer_settime:
12781     {
12782         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12783          * struct itimerspec * old_value */
12784         target_timer_t timerid = get_timer_id(arg1);
12785 
12786         if (timerid < 0) {
12787             ret = timerid;
12788         } else if (arg3 == 0) {
12789             ret = -TARGET_EINVAL;
12790         } else {
12791             timer_t htimer = g_posix_timers[timerid];
12792             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12793 
12794             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12795                 goto efault;
12796             }
12797             ret = get_errno(
12798                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12799             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12800                 goto efault;
12801             }
12802         }
12803         break;
12804     }
12805 #endif
12806 
12807 #ifdef TARGET_NR_timer_gettime
12808     case TARGET_NR_timer_gettime:
12809     {
12810         /* args: timer_t timerid, struct itimerspec *curr_value */
12811         target_timer_t timerid = get_timer_id(arg1);
12812 
12813         if (timerid < 0) {
12814             ret = timerid;
12815         } else if (!arg2) {
12816             ret = -TARGET_EFAULT;
12817         } else {
12818             timer_t htimer = g_posix_timers[timerid];
12819             struct itimerspec hspec;
12820             ret = get_errno(timer_gettime(htimer, &hspec));
12821 
12822             if (host_to_target_itimerspec(arg2, &hspec)) {
12823                 ret = -TARGET_EFAULT;
12824             }
12825         }
12826         break;
12827     }
12828 #endif
12829 
12830 #ifdef TARGET_NR_timer_getoverrun
12831     case TARGET_NR_timer_getoverrun:
12832     {
12833         /* args: timer_t timerid */
12834         target_timer_t timerid = get_timer_id(arg1);
12835 
12836         if (timerid < 0) {
12837             ret = timerid;
12838         } else {
12839             timer_t htimer = g_posix_timers[timerid];
12840             ret = get_errno(timer_getoverrun(htimer));
12841         }
12842         fd_trans_unregister(ret);
12843         break;
12844     }
12845 #endif
12846 
12847 #ifdef TARGET_NR_timer_delete
12848     case TARGET_NR_timer_delete:
12849     {
12850         /* args: timer_t timerid */
12851         target_timer_t timerid = get_timer_id(arg1);
12852 
12853         if (timerid < 0) {
12854             ret = timerid;
12855         } else {
12856             timer_t htimer = g_posix_timers[timerid];
12857             ret = get_errno(timer_delete(htimer));
12858             g_posix_timers[timerid] = 0;
12859         }
12860         break;
12861     }
12862 #endif
12863 
12864 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12865     case TARGET_NR_timerfd_create:
12866         ret = get_errno(timerfd_create(arg1,
12867                 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12868         break;
12869 #endif
12870 
12871 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12872     case TARGET_NR_timerfd_gettime:
12873         {
12874             struct itimerspec its_curr;
12875 
12876             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12877 
12878             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12879                 goto efault;
12880             }
12881         }
12882         break;
12883 #endif
12884 
12885 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12886     case TARGET_NR_timerfd_settime:
12887         {
12888             struct itimerspec its_new, its_old, *p_new;
12889 
12890             if (arg3) {
12891                 if (target_to_host_itimerspec(&its_new, arg3)) {
12892                     goto efault;
12893                 }
12894                 p_new = &its_new;
12895             } else {
12896                 p_new = NULL;
12897             }
12898 
12899             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12900 
12901             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12902                 goto efault;
12903             }
12904         }
12905         break;
12906 #endif
12907 
12908 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12909     case TARGET_NR_ioprio_get:
12910         ret = get_errno(ioprio_get(arg1, arg2));
12911         break;
12912 #endif
12913 
12914 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12915     case TARGET_NR_ioprio_set:
12916         ret = get_errno(ioprio_set(arg1, arg2, arg3));
12917         break;
12918 #endif
12919 
12920 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12921     case TARGET_NR_setns:
12922         ret = get_errno(setns(arg1, arg2));
12923         break;
12924 #endif
12925 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12926     case TARGET_NR_unshare:
12927         ret = get_errno(unshare(arg1));
12928         break;
12929 #endif
12930 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12931     case TARGET_NR_kcmp:
12932         ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12933         break;
12934 #endif
12935 #ifdef TARGET_NR_swapcontext
12936     case TARGET_NR_swapcontext:
12937         /* PowerPC specific.  */
12938         ret = do_swapcontext(cpu_env, arg1, arg2, arg3);
12939         break;
12940 #endif
12941 
12942     default:
12943     unimplemented:
12944         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12945 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12946     unimplemented_nowarn:
12947 #endif
12948         ret = -TARGET_ENOSYS;
12949         break;
12950     }
12951 fail:
12952 #ifdef DEBUG
12953     gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12954 #endif
12955     if(do_strace)
12956         print_syscall_ret(num, ret);
12957     trace_guest_user_syscall_ret(cpu, num, ret);
12958     return ret;
12959 efault:
12960     ret = -TARGET_EFAULT;
12961     goto fail;
12962 }
12963