xref: /openbmc/qemu/linux-user/syscall.c (revision 025573be)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77 
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84 
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
112 
113 #include "qemu.h"
114 
115 #ifndef CLONE_IO
116 #define CLONE_IO                0x80000000      /* Clone io context */
117 #endif
118 
119 /* We can't directly call the host clone syscall, because this will
120  * badly confuse libc (breaking mutexes, for example). So we must
121  * divide clone flags into:
122  *  * flag combinations that look like pthread_create()
123  *  * flag combinations that look like fork()
124  *  * flags we can implement within QEMU itself
125  *  * flags we can't support and will return an error for
126  */
127 /* For thread creation, all these flags must be present; for
128  * fork, none must be present.
129  */
130 #define CLONE_THREAD_FLAGS                              \
131     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
132      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
133 
134 /* These flags are ignored:
135  * CLONE_DETACHED is now ignored by the kernel;
136  * CLONE_IO is just an optimisation hint to the I/O scheduler
137  */
138 #define CLONE_IGNORED_FLAGS                     \
139     (CLONE_DETACHED | CLONE_IO)
140 
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS               \
143     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
144      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
145 
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
148     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
149      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
150 
151 #define CLONE_INVALID_FORK_FLAGS                                        \
152     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
153 
154 #define CLONE_INVALID_THREAD_FLAGS                                      \
155     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
156        CLONE_IGNORED_FLAGS))
157 
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159  * have almost all been allocated. We cannot support any of
160  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162  * The checks against the invalid thread masks above will catch these.
163  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
164  */
165 
166 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
167  * once. This exercises the codepaths for restart.
168  */
169 //#define DEBUG_ERESTARTSYS
170 
171 //#include <linux/msdos_fs.h>
172 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
173 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
174 
175 #undef _syscall0
176 #undef _syscall1
177 #undef _syscall2
178 #undef _syscall3
179 #undef _syscall4
180 #undef _syscall5
181 #undef _syscall6
182 
183 #define _syscall0(type,name)		\
184 static type name (void)			\
185 {					\
186 	return syscall(__NR_##name);	\
187 }
188 
189 #define _syscall1(type,name,type1,arg1)		\
190 static type name (type1 arg1)			\
191 {						\
192 	return syscall(__NR_##name, arg1);	\
193 }
194 
195 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
196 static type name (type1 arg1,type2 arg2)		\
197 {							\
198 	return syscall(__NR_##name, arg1, arg2);	\
199 }
200 
201 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
202 static type name (type1 arg1,type2 arg2,type3 arg3)		\
203 {								\
204 	return syscall(__NR_##name, arg1, arg2, arg3);		\
205 }
206 
207 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
208 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
209 {										\
210 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
211 }
212 
213 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
214 		  type5,arg5)							\
215 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
216 {										\
217 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
218 }
219 
220 
221 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
222 		  type5,arg5,type6,arg6)					\
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
224                   type6 arg6)							\
225 {										\
226 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
227 }
228 
229 
230 #define __NR_sys_uname __NR_uname
231 #define __NR_sys_getcwd1 __NR_getcwd
232 #define __NR_sys_getdents __NR_getdents
233 #define __NR_sys_getdents64 __NR_getdents64
234 #define __NR_sys_getpriority __NR_getpriority
235 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
236 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
237 #define __NR_sys_syslog __NR_syslog
238 #define __NR_sys_futex __NR_futex
239 #define __NR_sys_inotify_init __NR_inotify_init
240 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
241 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
242 
243 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
244 #define __NR__llseek __NR_lseek
245 #endif
246 
247 /* Newer kernel ports have llseek() instead of _llseek() */
248 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
249 #define TARGET_NR__llseek TARGET_NR_llseek
250 #endif
251 
252 #ifdef __NR_gettid
253 _syscall0(int, gettid)
254 #else
255 /* This is a replacement for the host gettid() and must return a host
256    errno. */
257 static int gettid(void) {
258     return -ENOSYS;
259 }
260 #endif
261 
262 /* For the 64-bit guest on 32-bit host case we must emulate
263  * getdents using getdents64, because otherwise the host
264  * might hand us back more dirent records than we can fit
265  * into the guest buffer after structure format conversion.
266  * Otherwise we emulate getdents with getdents if the host has it.
267  */
268 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
269 #define EMULATE_GETDENTS_WITH_GETDENTS
270 #endif
271 
272 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
273 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
274 #endif
275 #if (defined(TARGET_NR_getdents) && \
276       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
277     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
278 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
279 #endif
280 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
281 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
282           loff_t *, res, uint, wh);
283 #endif
284 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
285 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
286           siginfo_t *, uinfo)
287 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
288 #ifdef __NR_exit_group
289 _syscall1(int,exit_group,int,error_code)
290 #endif
291 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
292 _syscall1(int,set_tid_address,int *,tidptr)
293 #endif
294 #if defined(TARGET_NR_futex) && defined(__NR_futex)
295 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
296           const struct timespec *,timeout,int *,uaddr2,int,val3)
297 #endif
298 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
299 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
300           unsigned long *, user_mask_ptr);
301 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
302 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
303           unsigned long *, user_mask_ptr);
304 #define __NR_sys_getcpu __NR_getcpu
305 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
306 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
307           void *, arg);
308 _syscall2(int, capget, struct __user_cap_header_struct *, header,
309           struct __user_cap_data_struct *, data);
310 _syscall2(int, capset, struct __user_cap_header_struct *, header,
311           struct __user_cap_data_struct *, data);
312 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
313 _syscall2(int, ioprio_get, int, which, int, who)
314 #endif
315 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
316 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
317 #endif
318 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
319 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
320 #endif
321 
322 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
323 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
324           unsigned long, idx1, unsigned long, idx2)
325 #endif
326 
327 static bitmask_transtbl fcntl_flags_tbl[] = {
328   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
329   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
330   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
331   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
332   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
333   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
334   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
335   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
336   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
337   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
338   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
339   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
340   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
341 #if defined(O_DIRECT)
342   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
343 #endif
344 #if defined(O_NOATIME)
345   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
346 #endif
347 #if defined(O_CLOEXEC)
348   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
349 #endif
350 #if defined(O_PATH)
351   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
352 #endif
353 #if defined(O_TMPFILE)
354   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
355 #endif
356   /* Don't terminate the list prematurely on 64-bit host+guest.  */
357 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
358   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
359 #endif
360   { 0, 0, 0, 0 }
361 };
362 
363 enum {
364     QEMU_IFLA_BR_UNSPEC,
365     QEMU_IFLA_BR_FORWARD_DELAY,
366     QEMU_IFLA_BR_HELLO_TIME,
367     QEMU_IFLA_BR_MAX_AGE,
368     QEMU_IFLA_BR_AGEING_TIME,
369     QEMU_IFLA_BR_STP_STATE,
370     QEMU_IFLA_BR_PRIORITY,
371     QEMU_IFLA_BR_VLAN_FILTERING,
372     QEMU_IFLA_BR_VLAN_PROTOCOL,
373     QEMU_IFLA_BR_GROUP_FWD_MASK,
374     QEMU_IFLA_BR_ROOT_ID,
375     QEMU_IFLA_BR_BRIDGE_ID,
376     QEMU_IFLA_BR_ROOT_PORT,
377     QEMU_IFLA_BR_ROOT_PATH_COST,
378     QEMU_IFLA_BR_TOPOLOGY_CHANGE,
379     QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
380     QEMU_IFLA_BR_HELLO_TIMER,
381     QEMU_IFLA_BR_TCN_TIMER,
382     QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
383     QEMU_IFLA_BR_GC_TIMER,
384     QEMU_IFLA_BR_GROUP_ADDR,
385     QEMU_IFLA_BR_FDB_FLUSH,
386     QEMU_IFLA_BR_MCAST_ROUTER,
387     QEMU_IFLA_BR_MCAST_SNOOPING,
388     QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
389     QEMU_IFLA_BR_MCAST_QUERIER,
390     QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
391     QEMU_IFLA_BR_MCAST_HASH_MAX,
392     QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
393     QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
394     QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
395     QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
396     QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
397     QEMU_IFLA_BR_MCAST_QUERY_INTVL,
398     QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
399     QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
400     QEMU_IFLA_BR_NF_CALL_IPTABLES,
401     QEMU_IFLA_BR_NF_CALL_IP6TABLES,
402     QEMU_IFLA_BR_NF_CALL_ARPTABLES,
403     QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
404     QEMU_IFLA_BR_PAD,
405     QEMU_IFLA_BR_VLAN_STATS_ENABLED,
406     QEMU_IFLA_BR_MCAST_STATS_ENABLED,
407     QEMU_IFLA_BR_MCAST_IGMP_VERSION,
408     QEMU_IFLA_BR_MCAST_MLD_VERSION,
409     QEMU___IFLA_BR_MAX,
410 };
411 
412 enum {
413     QEMU_IFLA_UNSPEC,
414     QEMU_IFLA_ADDRESS,
415     QEMU_IFLA_BROADCAST,
416     QEMU_IFLA_IFNAME,
417     QEMU_IFLA_MTU,
418     QEMU_IFLA_LINK,
419     QEMU_IFLA_QDISC,
420     QEMU_IFLA_STATS,
421     QEMU_IFLA_COST,
422     QEMU_IFLA_PRIORITY,
423     QEMU_IFLA_MASTER,
424     QEMU_IFLA_WIRELESS,
425     QEMU_IFLA_PROTINFO,
426     QEMU_IFLA_TXQLEN,
427     QEMU_IFLA_MAP,
428     QEMU_IFLA_WEIGHT,
429     QEMU_IFLA_OPERSTATE,
430     QEMU_IFLA_LINKMODE,
431     QEMU_IFLA_LINKINFO,
432     QEMU_IFLA_NET_NS_PID,
433     QEMU_IFLA_IFALIAS,
434     QEMU_IFLA_NUM_VF,
435     QEMU_IFLA_VFINFO_LIST,
436     QEMU_IFLA_STATS64,
437     QEMU_IFLA_VF_PORTS,
438     QEMU_IFLA_PORT_SELF,
439     QEMU_IFLA_AF_SPEC,
440     QEMU_IFLA_GROUP,
441     QEMU_IFLA_NET_NS_FD,
442     QEMU_IFLA_EXT_MASK,
443     QEMU_IFLA_PROMISCUITY,
444     QEMU_IFLA_NUM_TX_QUEUES,
445     QEMU_IFLA_NUM_RX_QUEUES,
446     QEMU_IFLA_CARRIER,
447     QEMU_IFLA_PHYS_PORT_ID,
448     QEMU_IFLA_CARRIER_CHANGES,
449     QEMU_IFLA_PHYS_SWITCH_ID,
450     QEMU_IFLA_LINK_NETNSID,
451     QEMU_IFLA_PHYS_PORT_NAME,
452     QEMU_IFLA_PROTO_DOWN,
453     QEMU_IFLA_GSO_MAX_SEGS,
454     QEMU_IFLA_GSO_MAX_SIZE,
455     QEMU_IFLA_PAD,
456     QEMU_IFLA_XDP,
457     QEMU_IFLA_EVENT,
458     QEMU_IFLA_NEW_NETNSID,
459     QEMU_IFLA_IF_NETNSID,
460     QEMU_IFLA_CARRIER_UP_COUNT,
461     QEMU_IFLA_CARRIER_DOWN_COUNT,
462     QEMU_IFLA_NEW_IFINDEX,
463     QEMU___IFLA_MAX
464 };
465 
466 enum {
467     QEMU_IFLA_BRPORT_UNSPEC,
468     QEMU_IFLA_BRPORT_STATE,
469     QEMU_IFLA_BRPORT_PRIORITY,
470     QEMU_IFLA_BRPORT_COST,
471     QEMU_IFLA_BRPORT_MODE,
472     QEMU_IFLA_BRPORT_GUARD,
473     QEMU_IFLA_BRPORT_PROTECT,
474     QEMU_IFLA_BRPORT_FAST_LEAVE,
475     QEMU_IFLA_BRPORT_LEARNING,
476     QEMU_IFLA_BRPORT_UNICAST_FLOOD,
477     QEMU_IFLA_BRPORT_PROXYARP,
478     QEMU_IFLA_BRPORT_LEARNING_SYNC,
479     QEMU_IFLA_BRPORT_PROXYARP_WIFI,
480     QEMU_IFLA_BRPORT_ROOT_ID,
481     QEMU_IFLA_BRPORT_BRIDGE_ID,
482     QEMU_IFLA_BRPORT_DESIGNATED_PORT,
483     QEMU_IFLA_BRPORT_DESIGNATED_COST,
484     QEMU_IFLA_BRPORT_ID,
485     QEMU_IFLA_BRPORT_NO,
486     QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
487     QEMU_IFLA_BRPORT_CONFIG_PENDING,
488     QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
489     QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
490     QEMU_IFLA_BRPORT_HOLD_TIMER,
491     QEMU_IFLA_BRPORT_FLUSH,
492     QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
493     QEMU_IFLA_BRPORT_PAD,
494     QEMU_IFLA_BRPORT_MCAST_FLOOD,
495     QEMU_IFLA_BRPORT_MCAST_TO_UCAST,
496     QEMU_IFLA_BRPORT_VLAN_TUNNEL,
497     QEMU_IFLA_BRPORT_BCAST_FLOOD,
498     QEMU_IFLA_BRPORT_GROUP_FWD_MASK,
499     QEMU_IFLA_BRPORT_NEIGH_SUPPRESS,
500     QEMU___IFLA_BRPORT_MAX
501 };
502 
503 enum {
504     QEMU_IFLA_TUN_UNSPEC,
505     QEMU_IFLA_TUN_OWNER,
506     QEMU_IFLA_TUN_GROUP,
507     QEMU_IFLA_TUN_TYPE,
508     QEMU_IFLA_TUN_PI,
509     QEMU_IFLA_TUN_VNET_HDR,
510     QEMU_IFLA_TUN_PERSIST,
511     QEMU_IFLA_TUN_MULTI_QUEUE,
512     QEMU_IFLA_TUN_NUM_QUEUES,
513     QEMU_IFLA_TUN_NUM_DISABLED_QUEUES,
514     QEMU___IFLA_TUN_MAX,
515 };
516 
517 enum {
518     QEMU_IFLA_INFO_UNSPEC,
519     QEMU_IFLA_INFO_KIND,
520     QEMU_IFLA_INFO_DATA,
521     QEMU_IFLA_INFO_XSTATS,
522     QEMU_IFLA_INFO_SLAVE_KIND,
523     QEMU_IFLA_INFO_SLAVE_DATA,
524     QEMU___IFLA_INFO_MAX,
525 };
526 
527 enum {
528     QEMU_IFLA_INET_UNSPEC,
529     QEMU_IFLA_INET_CONF,
530     QEMU___IFLA_INET_MAX,
531 };
532 
533 enum {
534     QEMU_IFLA_INET6_UNSPEC,
535     QEMU_IFLA_INET6_FLAGS,
536     QEMU_IFLA_INET6_CONF,
537     QEMU_IFLA_INET6_STATS,
538     QEMU_IFLA_INET6_MCAST,
539     QEMU_IFLA_INET6_CACHEINFO,
540     QEMU_IFLA_INET6_ICMP6STATS,
541     QEMU_IFLA_INET6_TOKEN,
542     QEMU_IFLA_INET6_ADDR_GEN_MODE,
543     QEMU___IFLA_INET6_MAX
544 };
545 
546 enum {
547     QEMU_IFLA_XDP_UNSPEC,
548     QEMU_IFLA_XDP_FD,
549     QEMU_IFLA_XDP_ATTACHED,
550     QEMU_IFLA_XDP_FLAGS,
551     QEMU_IFLA_XDP_PROG_ID,
552     QEMU___IFLA_XDP_MAX,
553 };
554 
555 enum {
556     QEMU_RTA_UNSPEC,
557     QEMU_RTA_DST,
558     QEMU_RTA_SRC,
559     QEMU_RTA_IIF,
560     QEMU_RTA_OIF,
561     QEMU_RTA_GATEWAY,
562     QEMU_RTA_PRIORITY,
563     QEMU_RTA_PREFSRC,
564     QEMU_RTA_METRICS,
565     QEMU_RTA_MULTIPATH,
566     QEMU_RTA_PROTOINFO, /* no longer used */
567     QEMU_RTA_FLOW,
568     QEMU_RTA_CACHEINFO,
569     QEMU_RTA_SESSION, /* no longer used */
570     QEMU_RTA_MP_ALGO, /* no longer used */
571     QEMU_RTA_TABLE,
572     QEMU_RTA_MARK,
573     QEMU_RTA_MFC_STATS,
574     QEMU_RTA_VIA,
575     QEMU_RTA_NEWDST,
576     QEMU_RTA_PREF,
577     QEMU_RTA_ENCAP_TYPE,
578     QEMU_RTA_ENCAP,
579     QEMU_RTA_EXPIRES,
580     QEMU_RTA_PAD,
581     QEMU_RTA_UID,
582     QEMU_RTA_TTL_PROPAGATE,
583     QEMU_RTA_IP_PROTO,
584     QEMU_RTA_SPORT,
585     QEMU_RTA_DPORT,
586     QEMU___RTA_MAX
587 };
588 
589 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
590 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
591 typedef struct TargetFdTrans {
592     TargetFdDataFunc host_to_target_data;
593     TargetFdDataFunc target_to_host_data;
594     TargetFdAddrFunc target_to_host_addr;
595 } TargetFdTrans;
596 
597 static TargetFdTrans **target_fd_trans;
598 
599 static unsigned int target_fd_max;
600 
601 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
602 {
603     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
604         return target_fd_trans[fd]->target_to_host_data;
605     }
606     return NULL;
607 }
608 
609 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
610 {
611     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
612         return target_fd_trans[fd]->host_to_target_data;
613     }
614     return NULL;
615 }
616 
617 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
618 {
619     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
620         return target_fd_trans[fd]->target_to_host_addr;
621     }
622     return NULL;
623 }
624 
625 static void fd_trans_register(int fd, TargetFdTrans *trans)
626 {
627     unsigned int oldmax;
628 
629     if (fd >= target_fd_max) {
630         oldmax = target_fd_max;
631         target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
632         target_fd_trans = g_renew(TargetFdTrans *,
633                                   target_fd_trans, target_fd_max);
634         memset((void *)(target_fd_trans + oldmax), 0,
635                (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
636     }
637     target_fd_trans[fd] = trans;
638 }
639 
640 static void fd_trans_unregister(int fd)
641 {
642     if (fd >= 0 && fd < target_fd_max) {
643         target_fd_trans[fd] = NULL;
644     }
645 }
646 
647 static void fd_trans_dup(int oldfd, int newfd)
648 {
649     fd_trans_unregister(newfd);
650     if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
651         fd_trans_register(newfd, target_fd_trans[oldfd]);
652     }
653 }
654 
655 static int sys_getcwd1(char *buf, size_t size)
656 {
657   if (getcwd(buf, size) == NULL) {
658       /* getcwd() sets errno */
659       return (-1);
660   }
661   return strlen(buf)+1;
662 }
663 
664 #ifdef TARGET_NR_utimensat
665 #if defined(__NR_utimensat)
666 #define __NR_sys_utimensat __NR_utimensat
667 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
668           const struct timespec *,tsp,int,flags)
669 #else
670 static int sys_utimensat(int dirfd, const char *pathname,
671                          const struct timespec times[2], int flags)
672 {
673     errno = ENOSYS;
674     return -1;
675 }
676 #endif
677 #endif /* TARGET_NR_utimensat */
678 
679 #ifdef TARGET_NR_renameat2
680 #if defined(__NR_renameat2)
681 #define __NR_sys_renameat2 __NR_renameat2
682 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
683           const char *, new, unsigned int, flags)
684 #else
685 static int sys_renameat2(int oldfd, const char *old,
686                          int newfd, const char *new, int flags)
687 {
688     if (flags == 0) {
689         return renameat(oldfd, old, newfd, new);
690     }
691     errno = ENOSYS;
692     return -1;
693 }
694 #endif
695 #endif /* TARGET_NR_renameat2 */
696 
697 #ifdef CONFIG_INOTIFY
698 #include <sys/inotify.h>
699 
700 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
701 static int sys_inotify_init(void)
702 {
703   return (inotify_init());
704 }
705 #endif
706 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
707 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
708 {
709   return (inotify_add_watch(fd, pathname, mask));
710 }
711 #endif
712 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
713 static int sys_inotify_rm_watch(int fd, int32_t wd)
714 {
715   return (inotify_rm_watch(fd, wd));
716 }
717 #endif
718 #ifdef CONFIG_INOTIFY1
719 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
720 static int sys_inotify_init1(int flags)
721 {
722   return (inotify_init1(flags));
723 }
724 #endif
725 #endif
726 #else
727 /* Userspace can usually survive runtime without inotify */
728 #undef TARGET_NR_inotify_init
729 #undef TARGET_NR_inotify_init1
730 #undef TARGET_NR_inotify_add_watch
731 #undef TARGET_NR_inotify_rm_watch
732 #endif /* CONFIG_INOTIFY  */
733 
734 #if defined(TARGET_NR_prlimit64)
735 #ifndef __NR_prlimit64
736 # define __NR_prlimit64 -1
737 #endif
738 #define __NR_sys_prlimit64 __NR_prlimit64
739 /* The glibc rlimit structure may not be that used by the underlying syscall */
740 struct host_rlimit64 {
741     uint64_t rlim_cur;
742     uint64_t rlim_max;
743 };
744 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
745           const struct host_rlimit64 *, new_limit,
746           struct host_rlimit64 *, old_limit)
747 #endif
748 
749 
750 #if defined(TARGET_NR_timer_create)
751 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
752 static timer_t g_posix_timers[32] = { 0, } ;
753 
754 static inline int next_free_host_timer(void)
755 {
756     int k ;
757     /* FIXME: Does finding the next free slot require a lock? */
758     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
759         if (g_posix_timers[k] == 0) {
760             g_posix_timers[k] = (timer_t) 1;
761             return k;
762         }
763     }
764     return -1;
765 }
766 #endif
767 
768 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
769 #ifdef TARGET_ARM
770 static inline int regpairs_aligned(void *cpu_env, int num)
771 {
772     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
773 }
774 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
775 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
776 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
777 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
778  * of registers which translates to the same as ARM/MIPS, because we start with
779  * r3 as arg1 */
780 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
781 #elif defined(TARGET_SH4)
782 /* SH4 doesn't align register pairs, except for p{read,write}64 */
783 static inline int regpairs_aligned(void *cpu_env, int num)
784 {
785     switch (num) {
786     case TARGET_NR_pread64:
787     case TARGET_NR_pwrite64:
788         return 1;
789 
790     default:
791         return 0;
792     }
793 }
794 #elif defined(TARGET_XTENSA)
795 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
796 #else
797 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
798 #endif
799 
800 #define ERRNO_TABLE_SIZE 1200
801 
802 /* target_to_host_errno_table[] is initialized from
803  * host_to_target_errno_table[] in syscall_init(). */
804 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
805 };
806 
807 /*
808  * This list is the union of errno values overridden in asm-<arch>/errno.h
809  * minus the errnos that are not actually generic to all archs.
810  */
811 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
812     [EAGAIN]		= TARGET_EAGAIN,
813     [EIDRM]		= TARGET_EIDRM,
814     [ECHRNG]		= TARGET_ECHRNG,
815     [EL2NSYNC]		= TARGET_EL2NSYNC,
816     [EL3HLT]		= TARGET_EL3HLT,
817     [EL3RST]		= TARGET_EL3RST,
818     [ELNRNG]		= TARGET_ELNRNG,
819     [EUNATCH]		= TARGET_EUNATCH,
820     [ENOCSI]		= TARGET_ENOCSI,
821     [EL2HLT]		= TARGET_EL2HLT,
822     [EDEADLK]		= TARGET_EDEADLK,
823     [ENOLCK]		= TARGET_ENOLCK,
824     [EBADE]		= TARGET_EBADE,
825     [EBADR]		= TARGET_EBADR,
826     [EXFULL]		= TARGET_EXFULL,
827     [ENOANO]		= TARGET_ENOANO,
828     [EBADRQC]		= TARGET_EBADRQC,
829     [EBADSLT]		= TARGET_EBADSLT,
830     [EBFONT]		= TARGET_EBFONT,
831     [ENOSTR]		= TARGET_ENOSTR,
832     [ENODATA]		= TARGET_ENODATA,
833     [ETIME]		= TARGET_ETIME,
834     [ENOSR]		= TARGET_ENOSR,
835     [ENONET]		= TARGET_ENONET,
836     [ENOPKG]		= TARGET_ENOPKG,
837     [EREMOTE]		= TARGET_EREMOTE,
838     [ENOLINK]		= TARGET_ENOLINK,
839     [EADV]		= TARGET_EADV,
840     [ESRMNT]		= TARGET_ESRMNT,
841     [ECOMM]		= TARGET_ECOMM,
842     [EPROTO]		= TARGET_EPROTO,
843     [EDOTDOT]		= TARGET_EDOTDOT,
844     [EMULTIHOP]		= TARGET_EMULTIHOP,
845     [EBADMSG]		= TARGET_EBADMSG,
846     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
847     [EOVERFLOW]		= TARGET_EOVERFLOW,
848     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
849     [EBADFD]		= TARGET_EBADFD,
850     [EREMCHG]		= TARGET_EREMCHG,
851     [ELIBACC]		= TARGET_ELIBACC,
852     [ELIBBAD]		= TARGET_ELIBBAD,
853     [ELIBSCN]		= TARGET_ELIBSCN,
854     [ELIBMAX]		= TARGET_ELIBMAX,
855     [ELIBEXEC]		= TARGET_ELIBEXEC,
856     [EILSEQ]		= TARGET_EILSEQ,
857     [ENOSYS]		= TARGET_ENOSYS,
858     [ELOOP]		= TARGET_ELOOP,
859     [ERESTART]		= TARGET_ERESTART,
860     [ESTRPIPE]		= TARGET_ESTRPIPE,
861     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
862     [EUSERS]		= TARGET_EUSERS,
863     [ENOTSOCK]		= TARGET_ENOTSOCK,
864     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
865     [EMSGSIZE]		= TARGET_EMSGSIZE,
866     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
867     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
868     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
869     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
870     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
871     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
872     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
873     [EADDRINUSE]	= TARGET_EADDRINUSE,
874     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
875     [ENETDOWN]		= TARGET_ENETDOWN,
876     [ENETUNREACH]	= TARGET_ENETUNREACH,
877     [ENETRESET]		= TARGET_ENETRESET,
878     [ECONNABORTED]	= TARGET_ECONNABORTED,
879     [ECONNRESET]	= TARGET_ECONNRESET,
880     [ENOBUFS]		= TARGET_ENOBUFS,
881     [EISCONN]		= TARGET_EISCONN,
882     [ENOTCONN]		= TARGET_ENOTCONN,
883     [EUCLEAN]		= TARGET_EUCLEAN,
884     [ENOTNAM]		= TARGET_ENOTNAM,
885     [ENAVAIL]		= TARGET_ENAVAIL,
886     [EISNAM]		= TARGET_EISNAM,
887     [EREMOTEIO]		= TARGET_EREMOTEIO,
888     [EDQUOT]            = TARGET_EDQUOT,
889     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
890     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
891     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
892     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
893     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
894     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
895     [EALREADY]		= TARGET_EALREADY,
896     [EINPROGRESS]	= TARGET_EINPROGRESS,
897     [ESTALE]		= TARGET_ESTALE,
898     [ECANCELED]		= TARGET_ECANCELED,
899     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
900     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
901 #ifdef ENOKEY
902     [ENOKEY]		= TARGET_ENOKEY,
903 #endif
904 #ifdef EKEYEXPIRED
905     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
906 #endif
907 #ifdef EKEYREVOKED
908     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
909 #endif
910 #ifdef EKEYREJECTED
911     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
912 #endif
913 #ifdef EOWNERDEAD
914     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
915 #endif
916 #ifdef ENOTRECOVERABLE
917     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
918 #endif
919 #ifdef ENOMSG
920     [ENOMSG]            = TARGET_ENOMSG,
921 #endif
922 #ifdef ERKFILL
923     [ERFKILL]           = TARGET_ERFKILL,
924 #endif
925 #ifdef EHWPOISON
926     [EHWPOISON]         = TARGET_EHWPOISON,
927 #endif
928 };
929 
930 static inline int host_to_target_errno(int err)
931 {
932     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
933         host_to_target_errno_table[err]) {
934         return host_to_target_errno_table[err];
935     }
936     return err;
937 }
938 
939 static inline int target_to_host_errno(int err)
940 {
941     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
942         target_to_host_errno_table[err]) {
943         return target_to_host_errno_table[err];
944     }
945     return err;
946 }
947 
948 static inline abi_long get_errno(abi_long ret)
949 {
950     if (ret == -1)
951         return -host_to_target_errno(errno);
952     else
953         return ret;
954 }
955 
956 const char *target_strerror(int err)
957 {
958     if (err == TARGET_ERESTARTSYS) {
959         return "To be restarted";
960     }
961     if (err == TARGET_QEMU_ESIGRETURN) {
962         return "Successful exit from sigreturn";
963     }
964 
965     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
966         return NULL;
967     }
968     return strerror(target_to_host_errno(err));
969 }
970 
971 #define safe_syscall0(type, name) \
972 static type safe_##name(void) \
973 { \
974     return safe_syscall(__NR_##name); \
975 }
976 
977 #define safe_syscall1(type, name, type1, arg1) \
978 static type safe_##name(type1 arg1) \
979 { \
980     return safe_syscall(__NR_##name, arg1); \
981 }
982 
983 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
984 static type safe_##name(type1 arg1, type2 arg2) \
985 { \
986     return safe_syscall(__NR_##name, arg1, arg2); \
987 }
988 
989 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
990 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
991 { \
992     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
993 }
994 
995 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
996     type4, arg4) \
997 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
998 { \
999     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
1000 }
1001 
1002 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
1003     type4, arg4, type5, arg5) \
1004 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
1005     type5 arg5) \
1006 { \
1007     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
1008 }
1009 
1010 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
1011     type4, arg4, type5, arg5, type6, arg6) \
1012 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
1013     type5 arg5, type6 arg6) \
1014 { \
1015     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
1016 }
1017 
1018 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
1019 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
1020 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
1021               int, flags, mode_t, mode)
1022 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
1023               struct rusage *, rusage)
1024 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
1025               int, options, struct rusage *, rusage)
1026 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
1027 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
1028               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
1029 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
1030               struct timespec *, tsp, const sigset_t *, sigmask,
1031               size_t, sigsetsize)
1032 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
1033               int, maxevents, int, timeout, const sigset_t *, sigmask,
1034               size_t, sigsetsize)
1035 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
1036               const struct timespec *,timeout,int *,uaddr2,int,val3)
1037 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
1038 safe_syscall2(int, kill, pid_t, pid, int, sig)
1039 safe_syscall2(int, tkill, int, tid, int, sig)
1040 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
1041 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
1042 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
1043 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
1044               unsigned long, pos_l, unsigned long, pos_h)
1045 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
1046               unsigned long, pos_l, unsigned long, pos_h)
1047 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
1048               socklen_t, addrlen)
1049 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
1050               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
1051 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
1052               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
1053 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
1054 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
1055 safe_syscall2(int, flock, int, fd, int, operation)
1056 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
1057               const struct timespec *, uts, size_t, sigsetsize)
1058 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
1059               int, flags)
1060 safe_syscall2(int, nanosleep, const struct timespec *, req,
1061               struct timespec *, rem)
1062 #ifdef TARGET_NR_clock_nanosleep
1063 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
1064               const struct timespec *, req, struct timespec *, rem)
1065 #endif
1066 #ifdef __NR_msgsnd
1067 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
1068               int, flags)
1069 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
1070               long, msgtype, int, flags)
1071 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
1072               unsigned, nsops, const struct timespec *, timeout)
1073 #else
1074 /* This host kernel architecture uses a single ipc syscall; fake up
1075  * wrappers for the sub-operations to hide this implementation detail.
1076  * Annoyingly we can't include linux/ipc.h to get the constant definitions
1077  * for the call parameter because some structs in there conflict with the
1078  * sys/ipc.h ones. So we just define them here, and rely on them being
1079  * the same for all host architectures.
1080  */
1081 #define Q_SEMTIMEDOP 4
1082 #define Q_MSGSND 11
1083 #define Q_MSGRCV 12
1084 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1085 
1086 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1087               void *, ptr, long, fifth)
1088 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1089 {
1090     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1091 }
1092 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1093 {
1094     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1095 }
1096 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1097                            const struct timespec *timeout)
1098 {
1099     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1100                     (long)timeout);
1101 }
1102 #endif
1103 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1104 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1105               size_t, len, unsigned, prio, const struct timespec *, timeout)
1106 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1107               size_t, len, unsigned *, prio, const struct timespec *, timeout)
1108 #endif
1109 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1110  * "third argument might be integer or pointer or not present" behaviour of
1111  * the libc function.
1112  */
1113 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1114 /* Similarly for fcntl. Note that callers must always:
1115  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1116  *  use the flock64 struct rather than unsuffixed flock
1117  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1118  */
1119 #ifdef __NR_fcntl64
1120 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1121 #else
1122 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1123 #endif
1124 
1125 static inline int host_to_target_sock_type(int host_type)
1126 {
1127     int target_type;
1128 
1129     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1130     case SOCK_DGRAM:
1131         target_type = TARGET_SOCK_DGRAM;
1132         break;
1133     case SOCK_STREAM:
1134         target_type = TARGET_SOCK_STREAM;
1135         break;
1136     default:
1137         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1138         break;
1139     }
1140 
1141 #if defined(SOCK_CLOEXEC)
1142     if (host_type & SOCK_CLOEXEC) {
1143         target_type |= TARGET_SOCK_CLOEXEC;
1144     }
1145 #endif
1146 
1147 #if defined(SOCK_NONBLOCK)
1148     if (host_type & SOCK_NONBLOCK) {
1149         target_type |= TARGET_SOCK_NONBLOCK;
1150     }
1151 #endif
1152 
1153     return target_type;
1154 }
1155 
1156 static abi_ulong target_brk;
1157 static abi_ulong target_original_brk;
1158 static abi_ulong brk_page;
1159 
1160 void target_set_brk(abi_ulong new_brk)
1161 {
1162     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1163     brk_page = HOST_PAGE_ALIGN(target_brk);
1164 }
1165 
1166 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1167 #define DEBUGF_BRK(message, args...)
1168 
1169 /* do_brk() must return target values and target errnos. */
1170 abi_long do_brk(abi_ulong new_brk)
1171 {
1172     abi_long mapped_addr;
1173     abi_ulong new_alloc_size;
1174 
1175     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1176 
1177     if (!new_brk) {
1178         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1179         return target_brk;
1180     }
1181     if (new_brk < target_original_brk) {
1182         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1183                    target_brk);
1184         return target_brk;
1185     }
1186 
1187     /* If the new brk is less than the highest page reserved to the
1188      * target heap allocation, set it and we're almost done...  */
1189     if (new_brk <= brk_page) {
1190         /* Heap contents are initialized to zero, as for anonymous
1191          * mapped pages.  */
1192         if (new_brk > target_brk) {
1193             memset(g2h(target_brk), 0, new_brk - target_brk);
1194         }
1195 	target_brk = new_brk;
1196         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1197     	return target_brk;
1198     }
1199 
1200     /* We need to allocate more memory after the brk... Note that
1201      * we don't use MAP_FIXED because that will map over the top of
1202      * any existing mapping (like the one with the host libc or qemu
1203      * itself); instead we treat "mapped but at wrong address" as
1204      * a failure and unmap again.
1205      */
1206     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1207     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1208                                         PROT_READ|PROT_WRITE,
1209                                         MAP_ANON|MAP_PRIVATE, 0, 0));
1210 
1211     if (mapped_addr == brk_page) {
1212         /* Heap contents are initialized to zero, as for anonymous
1213          * mapped pages.  Technically the new pages are already
1214          * initialized to zero since they *are* anonymous mapped
1215          * pages, however we have to take care with the contents that
1216          * come from the remaining part of the previous page: it may
1217          * contains garbage data due to a previous heap usage (grown
1218          * then shrunken).  */
1219         memset(g2h(target_brk), 0, brk_page - target_brk);
1220 
1221         target_brk = new_brk;
1222         brk_page = HOST_PAGE_ALIGN(target_brk);
1223         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1224             target_brk);
1225         return target_brk;
1226     } else if (mapped_addr != -1) {
1227         /* Mapped but at wrong address, meaning there wasn't actually
1228          * enough space for this brk.
1229          */
1230         target_munmap(mapped_addr, new_alloc_size);
1231         mapped_addr = -1;
1232         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1233     }
1234     else {
1235         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1236     }
1237 
1238 #if defined(TARGET_ALPHA)
1239     /* We (partially) emulate OSF/1 on Alpha, which requires we
1240        return a proper errno, not an unchanged brk value.  */
1241     return -TARGET_ENOMEM;
1242 #endif
1243     /* For everything else, return the previous break. */
1244     return target_brk;
1245 }
1246 
1247 static inline abi_long copy_from_user_fdset(fd_set *fds,
1248                                             abi_ulong target_fds_addr,
1249                                             int n)
1250 {
1251     int i, nw, j, k;
1252     abi_ulong b, *target_fds;
1253 
1254     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1255     if (!(target_fds = lock_user(VERIFY_READ,
1256                                  target_fds_addr,
1257                                  sizeof(abi_ulong) * nw,
1258                                  1)))
1259         return -TARGET_EFAULT;
1260 
1261     FD_ZERO(fds);
1262     k = 0;
1263     for (i = 0; i < nw; i++) {
1264         /* grab the abi_ulong */
1265         __get_user(b, &target_fds[i]);
1266         for (j = 0; j < TARGET_ABI_BITS; j++) {
1267             /* check the bit inside the abi_ulong */
1268             if ((b >> j) & 1)
1269                 FD_SET(k, fds);
1270             k++;
1271         }
1272     }
1273 
1274     unlock_user(target_fds, target_fds_addr, 0);
1275 
1276     return 0;
1277 }
1278 
1279 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1280                                                  abi_ulong target_fds_addr,
1281                                                  int n)
1282 {
1283     if (target_fds_addr) {
1284         if (copy_from_user_fdset(fds, target_fds_addr, n))
1285             return -TARGET_EFAULT;
1286         *fds_ptr = fds;
1287     } else {
1288         *fds_ptr = NULL;
1289     }
1290     return 0;
1291 }
1292 
1293 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1294                                           const fd_set *fds,
1295                                           int n)
1296 {
1297     int i, nw, j, k;
1298     abi_long v;
1299     abi_ulong *target_fds;
1300 
1301     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1302     if (!(target_fds = lock_user(VERIFY_WRITE,
1303                                  target_fds_addr,
1304                                  sizeof(abi_ulong) * nw,
1305                                  0)))
1306         return -TARGET_EFAULT;
1307 
1308     k = 0;
1309     for (i = 0; i < nw; i++) {
1310         v = 0;
1311         for (j = 0; j < TARGET_ABI_BITS; j++) {
1312             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1313             k++;
1314         }
1315         __put_user(v, &target_fds[i]);
1316     }
1317 
1318     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1319 
1320     return 0;
1321 }
1322 
1323 #if defined(__alpha__)
1324 #define HOST_HZ 1024
1325 #else
1326 #define HOST_HZ 100
1327 #endif
1328 
1329 static inline abi_long host_to_target_clock_t(long ticks)
1330 {
1331 #if HOST_HZ == TARGET_HZ
1332     return ticks;
1333 #else
1334     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1335 #endif
1336 }
1337 
1338 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1339                                              const struct rusage *rusage)
1340 {
1341     struct target_rusage *target_rusage;
1342 
1343     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1344         return -TARGET_EFAULT;
1345     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1346     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1347     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1348     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1349     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1350     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1351     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1352     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1353     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1354     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1355     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1356     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1357     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1358     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1359     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1360     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1361     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1362     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1363     unlock_user_struct(target_rusage, target_addr, 1);
1364 
1365     return 0;
1366 }
1367 
1368 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1369 {
1370     abi_ulong target_rlim_swap;
1371     rlim_t result;
1372 
1373     target_rlim_swap = tswapal(target_rlim);
1374     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1375         return RLIM_INFINITY;
1376 
1377     result = target_rlim_swap;
1378     if (target_rlim_swap != (rlim_t)result)
1379         return RLIM_INFINITY;
1380 
1381     return result;
1382 }
1383 
1384 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1385 {
1386     abi_ulong target_rlim_swap;
1387     abi_ulong result;
1388 
1389     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1390         target_rlim_swap = TARGET_RLIM_INFINITY;
1391     else
1392         target_rlim_swap = rlim;
1393     result = tswapal(target_rlim_swap);
1394 
1395     return result;
1396 }
1397 
1398 static inline int target_to_host_resource(int code)
1399 {
1400     switch (code) {
1401     case TARGET_RLIMIT_AS:
1402         return RLIMIT_AS;
1403     case TARGET_RLIMIT_CORE:
1404         return RLIMIT_CORE;
1405     case TARGET_RLIMIT_CPU:
1406         return RLIMIT_CPU;
1407     case TARGET_RLIMIT_DATA:
1408         return RLIMIT_DATA;
1409     case TARGET_RLIMIT_FSIZE:
1410         return RLIMIT_FSIZE;
1411     case TARGET_RLIMIT_LOCKS:
1412         return RLIMIT_LOCKS;
1413     case TARGET_RLIMIT_MEMLOCK:
1414         return RLIMIT_MEMLOCK;
1415     case TARGET_RLIMIT_MSGQUEUE:
1416         return RLIMIT_MSGQUEUE;
1417     case TARGET_RLIMIT_NICE:
1418         return RLIMIT_NICE;
1419     case TARGET_RLIMIT_NOFILE:
1420         return RLIMIT_NOFILE;
1421     case TARGET_RLIMIT_NPROC:
1422         return RLIMIT_NPROC;
1423     case TARGET_RLIMIT_RSS:
1424         return RLIMIT_RSS;
1425     case TARGET_RLIMIT_RTPRIO:
1426         return RLIMIT_RTPRIO;
1427     case TARGET_RLIMIT_SIGPENDING:
1428         return RLIMIT_SIGPENDING;
1429     case TARGET_RLIMIT_STACK:
1430         return RLIMIT_STACK;
1431     default:
1432         return code;
1433     }
1434 }
1435 
1436 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1437                                               abi_ulong target_tv_addr)
1438 {
1439     struct target_timeval *target_tv;
1440 
1441     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1442         return -TARGET_EFAULT;
1443 
1444     __get_user(tv->tv_sec, &target_tv->tv_sec);
1445     __get_user(tv->tv_usec, &target_tv->tv_usec);
1446 
1447     unlock_user_struct(target_tv, target_tv_addr, 0);
1448 
1449     return 0;
1450 }
1451 
1452 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1453                                             const struct timeval *tv)
1454 {
1455     struct target_timeval *target_tv;
1456 
1457     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1458         return -TARGET_EFAULT;
1459 
1460     __put_user(tv->tv_sec, &target_tv->tv_sec);
1461     __put_user(tv->tv_usec, &target_tv->tv_usec);
1462 
1463     unlock_user_struct(target_tv, target_tv_addr, 1);
1464 
1465     return 0;
1466 }
1467 
1468 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1469                                                abi_ulong target_tz_addr)
1470 {
1471     struct target_timezone *target_tz;
1472 
1473     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1474         return -TARGET_EFAULT;
1475     }
1476 
1477     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1478     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1479 
1480     unlock_user_struct(target_tz, target_tz_addr, 0);
1481 
1482     return 0;
1483 }
1484 
1485 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1486 #include <mqueue.h>
1487 
1488 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1489                                               abi_ulong target_mq_attr_addr)
1490 {
1491     struct target_mq_attr *target_mq_attr;
1492 
1493     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1494                           target_mq_attr_addr, 1))
1495         return -TARGET_EFAULT;
1496 
1497     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1498     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1499     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1500     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1501 
1502     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1503 
1504     return 0;
1505 }
1506 
1507 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1508                                             const struct mq_attr *attr)
1509 {
1510     struct target_mq_attr *target_mq_attr;
1511 
1512     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1513                           target_mq_attr_addr, 0))
1514         return -TARGET_EFAULT;
1515 
1516     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1517     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1518     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1519     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1520 
1521     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1522 
1523     return 0;
1524 }
1525 #endif
1526 
1527 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1528 /* do_select() must return target values and target errnos. */
1529 static abi_long do_select(int n,
1530                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1531                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1532 {
1533     fd_set rfds, wfds, efds;
1534     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1535     struct timeval tv;
1536     struct timespec ts, *ts_ptr;
1537     abi_long ret;
1538 
1539     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1540     if (ret) {
1541         return ret;
1542     }
1543     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1544     if (ret) {
1545         return ret;
1546     }
1547     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1548     if (ret) {
1549         return ret;
1550     }
1551 
1552     if (target_tv_addr) {
1553         if (copy_from_user_timeval(&tv, target_tv_addr))
1554             return -TARGET_EFAULT;
1555         ts.tv_sec = tv.tv_sec;
1556         ts.tv_nsec = tv.tv_usec * 1000;
1557         ts_ptr = &ts;
1558     } else {
1559         ts_ptr = NULL;
1560     }
1561 
1562     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1563                                   ts_ptr, NULL));
1564 
1565     if (!is_error(ret)) {
1566         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1567             return -TARGET_EFAULT;
1568         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1569             return -TARGET_EFAULT;
1570         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1571             return -TARGET_EFAULT;
1572 
1573         if (target_tv_addr) {
1574             tv.tv_sec = ts.tv_sec;
1575             tv.tv_usec = ts.tv_nsec / 1000;
1576             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1577                 return -TARGET_EFAULT;
1578             }
1579         }
1580     }
1581 
1582     return ret;
1583 }
1584 
1585 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1586 static abi_long do_old_select(abi_ulong arg1)
1587 {
1588     struct target_sel_arg_struct *sel;
1589     abi_ulong inp, outp, exp, tvp;
1590     long nsel;
1591 
1592     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1593         return -TARGET_EFAULT;
1594     }
1595 
1596     nsel = tswapal(sel->n);
1597     inp = tswapal(sel->inp);
1598     outp = tswapal(sel->outp);
1599     exp = tswapal(sel->exp);
1600     tvp = tswapal(sel->tvp);
1601 
1602     unlock_user_struct(sel, arg1, 0);
1603 
1604     return do_select(nsel, inp, outp, exp, tvp);
1605 }
1606 #endif
1607 #endif
1608 
1609 static abi_long do_pipe2(int host_pipe[], int flags)
1610 {
1611 #ifdef CONFIG_PIPE2
1612     return pipe2(host_pipe, flags);
1613 #else
1614     return -ENOSYS;
1615 #endif
1616 }
1617 
1618 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1619                         int flags, int is_pipe2)
1620 {
1621     int host_pipe[2];
1622     abi_long ret;
1623     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1624 
1625     if (is_error(ret))
1626         return get_errno(ret);
1627 
1628     /* Several targets have special calling conventions for the original
1629        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1630     if (!is_pipe2) {
1631 #if defined(TARGET_ALPHA)
1632         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1633         return host_pipe[0];
1634 #elif defined(TARGET_MIPS)
1635         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1636         return host_pipe[0];
1637 #elif defined(TARGET_SH4)
1638         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1639         return host_pipe[0];
1640 #elif defined(TARGET_SPARC)
1641         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1642         return host_pipe[0];
1643 #endif
1644     }
1645 
1646     if (put_user_s32(host_pipe[0], pipedes)
1647         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1648         return -TARGET_EFAULT;
1649     return get_errno(ret);
1650 }
1651 
1652 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1653                                               abi_ulong target_addr,
1654                                               socklen_t len)
1655 {
1656     struct target_ip_mreqn *target_smreqn;
1657 
1658     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1659     if (!target_smreqn)
1660         return -TARGET_EFAULT;
1661     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1662     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1663     if (len == sizeof(struct target_ip_mreqn))
1664         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1665     unlock_user(target_smreqn, target_addr, 0);
1666 
1667     return 0;
1668 }
1669 
1670 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1671                                                abi_ulong target_addr,
1672                                                socklen_t len)
1673 {
1674     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1675     sa_family_t sa_family;
1676     struct target_sockaddr *target_saddr;
1677 
1678     if (fd_trans_target_to_host_addr(fd)) {
1679         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1680     }
1681 
1682     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1683     if (!target_saddr)
1684         return -TARGET_EFAULT;
1685 
1686     sa_family = tswap16(target_saddr->sa_family);
1687 
1688     /* Oops. The caller might send a incomplete sun_path; sun_path
1689      * must be terminated by \0 (see the manual page), but
1690      * unfortunately it is quite common to specify sockaddr_un
1691      * length as "strlen(x->sun_path)" while it should be
1692      * "strlen(...) + 1". We'll fix that here if needed.
1693      * Linux kernel has a similar feature.
1694      */
1695 
1696     if (sa_family == AF_UNIX) {
1697         if (len < unix_maxlen && len > 0) {
1698             char *cp = (char*)target_saddr;
1699 
1700             if ( cp[len-1] && !cp[len] )
1701                 len++;
1702         }
1703         if (len > unix_maxlen)
1704             len = unix_maxlen;
1705     }
1706 
1707     memcpy(addr, target_saddr, len);
1708     addr->sa_family = sa_family;
1709     if (sa_family == AF_NETLINK) {
1710         struct sockaddr_nl *nladdr;
1711 
1712         nladdr = (struct sockaddr_nl *)addr;
1713         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1714         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1715     } else if (sa_family == AF_PACKET) {
1716 	struct target_sockaddr_ll *lladdr;
1717 
1718 	lladdr = (struct target_sockaddr_ll *)addr;
1719 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1720 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1721     }
1722     unlock_user(target_saddr, target_addr, 0);
1723 
1724     return 0;
1725 }
1726 
1727 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1728                                                struct sockaddr *addr,
1729                                                socklen_t len)
1730 {
1731     struct target_sockaddr *target_saddr;
1732 
1733     if (len == 0) {
1734         return 0;
1735     }
1736     assert(addr);
1737 
1738     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1739     if (!target_saddr)
1740         return -TARGET_EFAULT;
1741     memcpy(target_saddr, addr, len);
1742     if (len >= offsetof(struct target_sockaddr, sa_family) +
1743         sizeof(target_saddr->sa_family)) {
1744         target_saddr->sa_family = tswap16(addr->sa_family);
1745     }
1746     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1747         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1748         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1749         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1750     } else if (addr->sa_family == AF_PACKET) {
1751         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1752         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1753         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1754     } else if (addr->sa_family == AF_INET6 &&
1755                len >= sizeof(struct target_sockaddr_in6)) {
1756         struct target_sockaddr_in6 *target_in6 =
1757                (struct target_sockaddr_in6 *)target_saddr;
1758         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1759     }
1760     unlock_user(target_saddr, target_addr, len);
1761 
1762     return 0;
1763 }
1764 
1765 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1766                                            struct target_msghdr *target_msgh)
1767 {
1768     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1769     abi_long msg_controllen;
1770     abi_ulong target_cmsg_addr;
1771     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1772     socklen_t space = 0;
1773 
1774     msg_controllen = tswapal(target_msgh->msg_controllen);
1775     if (msg_controllen < sizeof (struct target_cmsghdr))
1776         goto the_end;
1777     target_cmsg_addr = tswapal(target_msgh->msg_control);
1778     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1779     target_cmsg_start = target_cmsg;
1780     if (!target_cmsg)
1781         return -TARGET_EFAULT;
1782 
1783     while (cmsg && target_cmsg) {
1784         void *data = CMSG_DATA(cmsg);
1785         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1786 
1787         int len = tswapal(target_cmsg->cmsg_len)
1788             - sizeof(struct target_cmsghdr);
1789 
1790         space += CMSG_SPACE(len);
1791         if (space > msgh->msg_controllen) {
1792             space -= CMSG_SPACE(len);
1793             /* This is a QEMU bug, since we allocated the payload
1794              * area ourselves (unlike overflow in host-to-target
1795              * conversion, which is just the guest giving us a buffer
1796              * that's too small). It can't happen for the payload types
1797              * we currently support; if it becomes an issue in future
1798              * we would need to improve our allocation strategy to
1799              * something more intelligent than "twice the size of the
1800              * target buffer we're reading from".
1801              */
1802             gemu_log("Host cmsg overflow\n");
1803             break;
1804         }
1805 
1806         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1807             cmsg->cmsg_level = SOL_SOCKET;
1808         } else {
1809             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1810         }
1811         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1812         cmsg->cmsg_len = CMSG_LEN(len);
1813 
1814         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1815             int *fd = (int *)data;
1816             int *target_fd = (int *)target_data;
1817             int i, numfds = len / sizeof(int);
1818 
1819             for (i = 0; i < numfds; i++) {
1820                 __get_user(fd[i], target_fd + i);
1821             }
1822         } else if (cmsg->cmsg_level == SOL_SOCKET
1823                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1824             struct ucred *cred = (struct ucred *)data;
1825             struct target_ucred *target_cred =
1826                 (struct target_ucred *)target_data;
1827 
1828             __get_user(cred->pid, &target_cred->pid);
1829             __get_user(cred->uid, &target_cred->uid);
1830             __get_user(cred->gid, &target_cred->gid);
1831         } else {
1832             gemu_log("Unsupported ancillary data: %d/%d\n",
1833                                         cmsg->cmsg_level, cmsg->cmsg_type);
1834             memcpy(data, target_data, len);
1835         }
1836 
1837         cmsg = CMSG_NXTHDR(msgh, cmsg);
1838         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1839                                          target_cmsg_start);
1840     }
1841     unlock_user(target_cmsg, target_cmsg_addr, 0);
1842  the_end:
1843     msgh->msg_controllen = space;
1844     return 0;
1845 }
1846 
1847 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1848                                            struct msghdr *msgh)
1849 {
1850     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1851     abi_long msg_controllen;
1852     abi_ulong target_cmsg_addr;
1853     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1854     socklen_t space = 0;
1855 
1856     msg_controllen = tswapal(target_msgh->msg_controllen);
1857     if (msg_controllen < sizeof (struct target_cmsghdr))
1858         goto the_end;
1859     target_cmsg_addr = tswapal(target_msgh->msg_control);
1860     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1861     target_cmsg_start = target_cmsg;
1862     if (!target_cmsg)
1863         return -TARGET_EFAULT;
1864 
1865     while (cmsg && target_cmsg) {
1866         void *data = CMSG_DATA(cmsg);
1867         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1868 
1869         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1870         int tgt_len, tgt_space;
1871 
1872         /* We never copy a half-header but may copy half-data;
1873          * this is Linux's behaviour in put_cmsg(). Note that
1874          * truncation here is a guest problem (which we report
1875          * to the guest via the CTRUNC bit), unlike truncation
1876          * in target_to_host_cmsg, which is a QEMU bug.
1877          */
1878         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1879             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1880             break;
1881         }
1882 
1883         if (cmsg->cmsg_level == SOL_SOCKET) {
1884             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1885         } else {
1886             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1887         }
1888         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1889 
1890         /* Payload types which need a different size of payload on
1891          * the target must adjust tgt_len here.
1892          */
1893         tgt_len = len;
1894         switch (cmsg->cmsg_level) {
1895         case SOL_SOCKET:
1896             switch (cmsg->cmsg_type) {
1897             case SO_TIMESTAMP:
1898                 tgt_len = sizeof(struct target_timeval);
1899                 break;
1900             default:
1901                 break;
1902             }
1903             break;
1904         default:
1905             break;
1906         }
1907 
1908         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1909             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1910             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1911         }
1912 
1913         /* We must now copy-and-convert len bytes of payload
1914          * into tgt_len bytes of destination space. Bear in mind
1915          * that in both source and destination we may be dealing
1916          * with a truncated value!
1917          */
1918         switch (cmsg->cmsg_level) {
1919         case SOL_SOCKET:
1920             switch (cmsg->cmsg_type) {
1921             case SCM_RIGHTS:
1922             {
1923                 int *fd = (int *)data;
1924                 int *target_fd = (int *)target_data;
1925                 int i, numfds = tgt_len / sizeof(int);
1926 
1927                 for (i = 0; i < numfds; i++) {
1928                     __put_user(fd[i], target_fd + i);
1929                 }
1930                 break;
1931             }
1932             case SO_TIMESTAMP:
1933             {
1934                 struct timeval *tv = (struct timeval *)data;
1935                 struct target_timeval *target_tv =
1936                     (struct target_timeval *)target_data;
1937 
1938                 if (len != sizeof(struct timeval) ||
1939                     tgt_len != sizeof(struct target_timeval)) {
1940                     goto unimplemented;
1941                 }
1942 
1943                 /* copy struct timeval to target */
1944                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1945                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1946                 break;
1947             }
1948             case SCM_CREDENTIALS:
1949             {
1950                 struct ucred *cred = (struct ucred *)data;
1951                 struct target_ucred *target_cred =
1952                     (struct target_ucred *)target_data;
1953 
1954                 __put_user(cred->pid, &target_cred->pid);
1955                 __put_user(cred->uid, &target_cred->uid);
1956                 __put_user(cred->gid, &target_cred->gid);
1957                 break;
1958             }
1959             default:
1960                 goto unimplemented;
1961             }
1962             break;
1963 
1964         case SOL_IP:
1965             switch (cmsg->cmsg_type) {
1966             case IP_TTL:
1967             {
1968                 uint32_t *v = (uint32_t *)data;
1969                 uint32_t *t_int = (uint32_t *)target_data;
1970 
1971                 if (len != sizeof(uint32_t) ||
1972                     tgt_len != sizeof(uint32_t)) {
1973                     goto unimplemented;
1974                 }
1975                 __put_user(*v, t_int);
1976                 break;
1977             }
1978             case IP_RECVERR:
1979             {
1980                 struct errhdr_t {
1981                    struct sock_extended_err ee;
1982                    struct sockaddr_in offender;
1983                 };
1984                 struct errhdr_t *errh = (struct errhdr_t *)data;
1985                 struct errhdr_t *target_errh =
1986                     (struct errhdr_t *)target_data;
1987 
1988                 if (len != sizeof(struct errhdr_t) ||
1989                     tgt_len != sizeof(struct errhdr_t)) {
1990                     goto unimplemented;
1991                 }
1992                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1993                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1994                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1995                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1996                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1997                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1998                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1999                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2000                     (void *) &errh->offender, sizeof(errh->offender));
2001                 break;
2002             }
2003             default:
2004                 goto unimplemented;
2005             }
2006             break;
2007 
2008         case SOL_IPV6:
2009             switch (cmsg->cmsg_type) {
2010             case IPV6_HOPLIMIT:
2011             {
2012                 uint32_t *v = (uint32_t *)data;
2013                 uint32_t *t_int = (uint32_t *)target_data;
2014 
2015                 if (len != sizeof(uint32_t) ||
2016                     tgt_len != sizeof(uint32_t)) {
2017                     goto unimplemented;
2018                 }
2019                 __put_user(*v, t_int);
2020                 break;
2021             }
2022             case IPV6_RECVERR:
2023             {
2024                 struct errhdr6_t {
2025                    struct sock_extended_err ee;
2026                    struct sockaddr_in6 offender;
2027                 };
2028                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2029                 struct errhdr6_t *target_errh =
2030                     (struct errhdr6_t *)target_data;
2031 
2032                 if (len != sizeof(struct errhdr6_t) ||
2033                     tgt_len != sizeof(struct errhdr6_t)) {
2034                     goto unimplemented;
2035                 }
2036                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2037                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2038                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2039                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2040                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2041                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2042                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2043                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2044                     (void *) &errh->offender, sizeof(errh->offender));
2045                 break;
2046             }
2047             default:
2048                 goto unimplemented;
2049             }
2050             break;
2051 
2052         default:
2053         unimplemented:
2054             gemu_log("Unsupported ancillary data: %d/%d\n",
2055                                         cmsg->cmsg_level, cmsg->cmsg_type);
2056             memcpy(target_data, data, MIN(len, tgt_len));
2057             if (tgt_len > len) {
2058                 memset(target_data + len, 0, tgt_len - len);
2059             }
2060         }
2061 
2062         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2063         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2064         if (msg_controllen < tgt_space) {
2065             tgt_space = msg_controllen;
2066         }
2067         msg_controllen -= tgt_space;
2068         space += tgt_space;
2069         cmsg = CMSG_NXTHDR(msgh, cmsg);
2070         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2071                                          target_cmsg_start);
2072     }
2073     unlock_user(target_cmsg, target_cmsg_addr, space);
2074  the_end:
2075     target_msgh->msg_controllen = tswapal(space);
2076     return 0;
2077 }
2078 
2079 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2080 {
2081     nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2082     nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2083     nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2084     nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2085     nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2086 }
2087 
2088 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2089                                               size_t len,
2090                                               abi_long (*host_to_target_nlmsg)
2091                                                        (struct nlmsghdr *))
2092 {
2093     uint32_t nlmsg_len;
2094     abi_long ret;
2095 
2096     while (len > sizeof(struct nlmsghdr)) {
2097 
2098         nlmsg_len = nlh->nlmsg_len;
2099         if (nlmsg_len < sizeof(struct nlmsghdr) ||
2100             nlmsg_len > len) {
2101             break;
2102         }
2103 
2104         switch (nlh->nlmsg_type) {
2105         case NLMSG_DONE:
2106             tswap_nlmsghdr(nlh);
2107             return 0;
2108         case NLMSG_NOOP:
2109             break;
2110         case NLMSG_ERROR:
2111         {
2112             struct nlmsgerr *e = NLMSG_DATA(nlh);
2113             e->error = tswap32(e->error);
2114             tswap_nlmsghdr(&e->msg);
2115             tswap_nlmsghdr(nlh);
2116             return 0;
2117         }
2118         default:
2119             ret = host_to_target_nlmsg(nlh);
2120             if (ret < 0) {
2121                 tswap_nlmsghdr(nlh);
2122                 return ret;
2123             }
2124             break;
2125         }
2126         tswap_nlmsghdr(nlh);
2127         len -= NLMSG_ALIGN(nlmsg_len);
2128         nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2129     }
2130     return 0;
2131 }
2132 
2133 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2134                                               size_t len,
2135                                               abi_long (*target_to_host_nlmsg)
2136                                                        (struct nlmsghdr *))
2137 {
2138     int ret;
2139 
2140     while (len > sizeof(struct nlmsghdr)) {
2141         if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2142             tswap32(nlh->nlmsg_len) > len) {
2143             break;
2144         }
2145         tswap_nlmsghdr(nlh);
2146         switch (nlh->nlmsg_type) {
2147         case NLMSG_DONE:
2148             return 0;
2149         case NLMSG_NOOP:
2150             break;
2151         case NLMSG_ERROR:
2152         {
2153             struct nlmsgerr *e = NLMSG_DATA(nlh);
2154             e->error = tswap32(e->error);
2155             tswap_nlmsghdr(&e->msg);
2156             return 0;
2157         }
2158         default:
2159             ret = target_to_host_nlmsg(nlh);
2160             if (ret < 0) {
2161                 return ret;
2162             }
2163         }
2164         len -= NLMSG_ALIGN(nlh->nlmsg_len);
2165         nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2166     }
2167     return 0;
2168 }
2169 
2170 #ifdef CONFIG_RTNETLINK
2171 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2172                                                size_t len, void *context,
2173                                                abi_long (*host_to_target_nlattr)
2174                                                         (struct nlattr *,
2175                                                          void *context))
2176 {
2177     unsigned short nla_len;
2178     abi_long ret;
2179 
2180     while (len > sizeof(struct nlattr)) {
2181         nla_len = nlattr->nla_len;
2182         if (nla_len < sizeof(struct nlattr) ||
2183             nla_len > len) {
2184             break;
2185         }
2186         ret = host_to_target_nlattr(nlattr, context);
2187         nlattr->nla_len = tswap16(nlattr->nla_len);
2188         nlattr->nla_type = tswap16(nlattr->nla_type);
2189         if (ret < 0) {
2190             return ret;
2191         }
2192         len -= NLA_ALIGN(nla_len);
2193         nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2194     }
2195     return 0;
2196 }
2197 
2198 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2199                                                size_t len,
2200                                                abi_long (*host_to_target_rtattr)
2201                                                         (struct rtattr *))
2202 {
2203     unsigned short rta_len;
2204     abi_long ret;
2205 
2206     while (len > sizeof(struct rtattr)) {
2207         rta_len = rtattr->rta_len;
2208         if (rta_len < sizeof(struct rtattr) ||
2209             rta_len > len) {
2210             break;
2211         }
2212         ret = host_to_target_rtattr(rtattr);
2213         rtattr->rta_len = tswap16(rtattr->rta_len);
2214         rtattr->rta_type = tswap16(rtattr->rta_type);
2215         if (ret < 0) {
2216             return ret;
2217         }
2218         len -= RTA_ALIGN(rta_len);
2219         rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2220     }
2221     return 0;
2222 }
2223 
2224 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2225 
2226 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2227                                                   void *context)
2228 {
2229     uint16_t *u16;
2230     uint32_t *u32;
2231     uint64_t *u64;
2232 
2233     switch (nlattr->nla_type) {
2234     /* no data */
2235     case QEMU_IFLA_BR_FDB_FLUSH:
2236         break;
2237     /* binary */
2238     case QEMU_IFLA_BR_GROUP_ADDR:
2239         break;
2240     /* uint8_t */
2241     case QEMU_IFLA_BR_VLAN_FILTERING:
2242     case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2243     case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2244     case QEMU_IFLA_BR_MCAST_ROUTER:
2245     case QEMU_IFLA_BR_MCAST_SNOOPING:
2246     case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2247     case QEMU_IFLA_BR_MCAST_QUERIER:
2248     case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2249     case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2250     case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2251     case QEMU_IFLA_BR_VLAN_STATS_ENABLED:
2252     case QEMU_IFLA_BR_MCAST_STATS_ENABLED:
2253     case QEMU_IFLA_BR_MCAST_IGMP_VERSION:
2254     case QEMU_IFLA_BR_MCAST_MLD_VERSION:
2255         break;
2256     /* uint16_t */
2257     case QEMU_IFLA_BR_PRIORITY:
2258     case QEMU_IFLA_BR_VLAN_PROTOCOL:
2259     case QEMU_IFLA_BR_GROUP_FWD_MASK:
2260     case QEMU_IFLA_BR_ROOT_PORT:
2261     case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2262         u16 = NLA_DATA(nlattr);
2263         *u16 = tswap16(*u16);
2264         break;
2265     /* uint32_t */
2266     case QEMU_IFLA_BR_FORWARD_DELAY:
2267     case QEMU_IFLA_BR_HELLO_TIME:
2268     case QEMU_IFLA_BR_MAX_AGE:
2269     case QEMU_IFLA_BR_AGEING_TIME:
2270     case QEMU_IFLA_BR_STP_STATE:
2271     case QEMU_IFLA_BR_ROOT_PATH_COST:
2272     case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2273     case QEMU_IFLA_BR_MCAST_HASH_MAX:
2274     case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2275     case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2276         u32 = NLA_DATA(nlattr);
2277         *u32 = tswap32(*u32);
2278         break;
2279     /* uint64_t */
2280     case QEMU_IFLA_BR_HELLO_TIMER:
2281     case QEMU_IFLA_BR_TCN_TIMER:
2282     case QEMU_IFLA_BR_GC_TIMER:
2283     case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2284     case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2285     case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2286     case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2287     case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2288     case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2289     case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2290         u64 = NLA_DATA(nlattr);
2291         *u64 = tswap64(*u64);
2292         break;
2293     /* ifla_bridge_id: uin8_t[] */
2294     case QEMU_IFLA_BR_ROOT_ID:
2295     case QEMU_IFLA_BR_BRIDGE_ID:
2296         break;
2297     default:
2298         gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2299         break;
2300     }
2301     return 0;
2302 }
2303 
2304 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2305                                                         void *context)
2306 {
2307     uint16_t *u16;
2308     uint32_t *u32;
2309     uint64_t *u64;
2310 
2311     switch (nlattr->nla_type) {
2312     /* uint8_t */
2313     case QEMU_IFLA_BRPORT_STATE:
2314     case QEMU_IFLA_BRPORT_MODE:
2315     case QEMU_IFLA_BRPORT_GUARD:
2316     case QEMU_IFLA_BRPORT_PROTECT:
2317     case QEMU_IFLA_BRPORT_FAST_LEAVE:
2318     case QEMU_IFLA_BRPORT_LEARNING:
2319     case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2320     case QEMU_IFLA_BRPORT_PROXYARP:
2321     case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2322     case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2323     case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2324     case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2325     case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2326     case QEMU_IFLA_BRPORT_MCAST_FLOOD:
2327     case QEMU_IFLA_BRPORT_MCAST_TO_UCAST:
2328     case QEMU_IFLA_BRPORT_VLAN_TUNNEL:
2329     case QEMU_IFLA_BRPORT_BCAST_FLOOD:
2330     case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS:
2331         break;
2332     /* uint16_t */
2333     case QEMU_IFLA_BRPORT_PRIORITY:
2334     case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2335     case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2336     case QEMU_IFLA_BRPORT_ID:
2337     case QEMU_IFLA_BRPORT_NO:
2338     case QEMU_IFLA_BRPORT_GROUP_FWD_MASK:
2339         u16 = NLA_DATA(nlattr);
2340         *u16 = tswap16(*u16);
2341         break;
2342     /* uin32_t */
2343     case QEMU_IFLA_BRPORT_COST:
2344         u32 = NLA_DATA(nlattr);
2345         *u32 = tswap32(*u32);
2346         break;
2347     /* uint64_t */
2348     case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2349     case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2350     case QEMU_IFLA_BRPORT_HOLD_TIMER:
2351         u64 = NLA_DATA(nlattr);
2352         *u64 = tswap64(*u64);
2353         break;
2354     /* ifla_bridge_id: uint8_t[] */
2355     case QEMU_IFLA_BRPORT_ROOT_ID:
2356     case QEMU_IFLA_BRPORT_BRIDGE_ID:
2357         break;
2358     default:
2359         gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2360         break;
2361     }
2362     return 0;
2363 }
2364 
2365 static abi_long host_to_target_data_tun_nlattr(struct nlattr *nlattr,
2366                                                   void *context)
2367 {
2368     uint32_t *u32;
2369 
2370     switch (nlattr->nla_type) {
2371     /* uint8_t */
2372     case QEMU_IFLA_TUN_TYPE:
2373     case QEMU_IFLA_TUN_PI:
2374     case QEMU_IFLA_TUN_VNET_HDR:
2375     case QEMU_IFLA_TUN_PERSIST:
2376     case QEMU_IFLA_TUN_MULTI_QUEUE:
2377         break;
2378     /* uint32_t */
2379     case QEMU_IFLA_TUN_NUM_QUEUES:
2380     case QEMU_IFLA_TUN_NUM_DISABLED_QUEUES:
2381     case QEMU_IFLA_TUN_OWNER:
2382     case QEMU_IFLA_TUN_GROUP:
2383         u32 = NLA_DATA(nlattr);
2384         *u32 = tswap32(*u32);
2385         break;
2386     default:
2387         gemu_log("Unknown QEMU_IFLA_TUN type %d\n", nlattr->nla_type);
2388         break;
2389     }
2390     return 0;
2391 }
2392 
2393 struct linkinfo_context {
2394     int len;
2395     char *name;
2396     int slave_len;
2397     char *slave_name;
2398 };
2399 
2400 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2401                                                     void *context)
2402 {
2403     struct linkinfo_context *li_context = context;
2404 
2405     switch (nlattr->nla_type) {
2406     /* string */
2407     case QEMU_IFLA_INFO_KIND:
2408         li_context->name = NLA_DATA(nlattr);
2409         li_context->len = nlattr->nla_len - NLA_HDRLEN;
2410         break;
2411     case QEMU_IFLA_INFO_SLAVE_KIND:
2412         li_context->slave_name = NLA_DATA(nlattr);
2413         li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2414         break;
2415     /* stats */
2416     case QEMU_IFLA_INFO_XSTATS:
2417         /* FIXME: only used by CAN */
2418         break;
2419     /* nested */
2420     case QEMU_IFLA_INFO_DATA:
2421         if (strncmp(li_context->name, "bridge",
2422                     li_context->len) == 0) {
2423             return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2424                                                   nlattr->nla_len,
2425                                                   NULL,
2426                                              host_to_target_data_bridge_nlattr);
2427         } else if (strncmp(li_context->name, "tun",
2428                     li_context->len) == 0) {
2429             return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2430                                                   nlattr->nla_len,
2431                                                   NULL,
2432                                                 host_to_target_data_tun_nlattr);
2433         } else {
2434             gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2435         }
2436         break;
2437     case QEMU_IFLA_INFO_SLAVE_DATA:
2438         if (strncmp(li_context->slave_name, "bridge",
2439                     li_context->slave_len) == 0) {
2440             return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2441                                                   nlattr->nla_len,
2442                                                   NULL,
2443                                        host_to_target_slave_data_bridge_nlattr);
2444         } else {
2445             gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2446                      li_context->slave_name);
2447         }
2448         break;
2449     default:
2450         gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2451         break;
2452     }
2453 
2454     return 0;
2455 }
2456 
2457 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2458                                                 void *context)
2459 {
2460     uint32_t *u32;
2461     int i;
2462 
2463     switch (nlattr->nla_type) {
2464     case QEMU_IFLA_INET_CONF:
2465         u32 = NLA_DATA(nlattr);
2466         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2467              i++) {
2468             u32[i] = tswap32(u32[i]);
2469         }
2470         break;
2471     default:
2472         gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2473     }
2474     return 0;
2475 }
2476 
2477 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2478                                                 void *context)
2479 {
2480     uint32_t *u32;
2481     uint64_t *u64;
2482     struct ifla_cacheinfo *ci;
2483     int i;
2484 
2485     switch (nlattr->nla_type) {
2486     /* binaries */
2487     case QEMU_IFLA_INET6_TOKEN:
2488         break;
2489     /* uint8_t */
2490     case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2491         break;
2492     /* uint32_t */
2493     case QEMU_IFLA_INET6_FLAGS:
2494         u32 = NLA_DATA(nlattr);
2495         *u32 = tswap32(*u32);
2496         break;
2497     /* uint32_t[] */
2498     case QEMU_IFLA_INET6_CONF:
2499         u32 = NLA_DATA(nlattr);
2500         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2501              i++) {
2502             u32[i] = tswap32(u32[i]);
2503         }
2504         break;
2505     /* ifla_cacheinfo */
2506     case QEMU_IFLA_INET6_CACHEINFO:
2507         ci = NLA_DATA(nlattr);
2508         ci->max_reasm_len = tswap32(ci->max_reasm_len);
2509         ci->tstamp = tswap32(ci->tstamp);
2510         ci->reachable_time = tswap32(ci->reachable_time);
2511         ci->retrans_time = tswap32(ci->retrans_time);
2512         break;
2513     /* uint64_t[] */
2514     case QEMU_IFLA_INET6_STATS:
2515     case QEMU_IFLA_INET6_ICMP6STATS:
2516         u64 = NLA_DATA(nlattr);
2517         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2518              i++) {
2519             u64[i] = tswap64(u64[i]);
2520         }
2521         break;
2522     default:
2523         gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2524     }
2525     return 0;
2526 }
2527 
2528 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2529                                                     void *context)
2530 {
2531     switch (nlattr->nla_type) {
2532     case AF_INET:
2533         return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2534                                               NULL,
2535                                              host_to_target_data_inet_nlattr);
2536     case AF_INET6:
2537         return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2538                                               NULL,
2539                                              host_to_target_data_inet6_nlattr);
2540     default:
2541         gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2542         break;
2543     }
2544     return 0;
2545 }
2546 
2547 static abi_long host_to_target_data_xdp_nlattr(struct nlattr *nlattr,
2548                                                void *context)
2549 {
2550     uint32_t *u32;
2551 
2552     switch (nlattr->nla_type) {
2553     /* uint8_t */
2554     case QEMU_IFLA_XDP_ATTACHED:
2555         break;
2556     /* uint32_t */
2557     case QEMU_IFLA_XDP_PROG_ID:
2558         u32 = NLA_DATA(nlattr);
2559         *u32 = tswap32(*u32);
2560         break;
2561     default:
2562         gemu_log("Unknown host XDP type: %d\n", nlattr->nla_type);
2563         break;
2564     }
2565     return 0;
2566 }
2567 
2568 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2569 {
2570     uint32_t *u32;
2571     struct rtnl_link_stats *st;
2572     struct rtnl_link_stats64 *st64;
2573     struct rtnl_link_ifmap *map;
2574     struct linkinfo_context li_context;
2575 
2576     switch (rtattr->rta_type) {
2577     /* binary stream */
2578     case QEMU_IFLA_ADDRESS:
2579     case QEMU_IFLA_BROADCAST:
2580     /* string */
2581     case QEMU_IFLA_IFNAME:
2582     case QEMU_IFLA_QDISC:
2583         break;
2584     /* uin8_t */
2585     case QEMU_IFLA_OPERSTATE:
2586     case QEMU_IFLA_LINKMODE:
2587     case QEMU_IFLA_CARRIER:
2588     case QEMU_IFLA_PROTO_DOWN:
2589         break;
2590     /* uint32_t */
2591     case QEMU_IFLA_MTU:
2592     case QEMU_IFLA_LINK:
2593     case QEMU_IFLA_WEIGHT:
2594     case QEMU_IFLA_TXQLEN:
2595     case QEMU_IFLA_CARRIER_CHANGES:
2596     case QEMU_IFLA_NUM_RX_QUEUES:
2597     case QEMU_IFLA_NUM_TX_QUEUES:
2598     case QEMU_IFLA_PROMISCUITY:
2599     case QEMU_IFLA_EXT_MASK:
2600     case QEMU_IFLA_LINK_NETNSID:
2601     case QEMU_IFLA_GROUP:
2602     case QEMU_IFLA_MASTER:
2603     case QEMU_IFLA_NUM_VF:
2604     case QEMU_IFLA_GSO_MAX_SEGS:
2605     case QEMU_IFLA_GSO_MAX_SIZE:
2606     case QEMU_IFLA_CARRIER_UP_COUNT:
2607     case QEMU_IFLA_CARRIER_DOWN_COUNT:
2608         u32 = RTA_DATA(rtattr);
2609         *u32 = tswap32(*u32);
2610         break;
2611     /* struct rtnl_link_stats */
2612     case QEMU_IFLA_STATS:
2613         st = RTA_DATA(rtattr);
2614         st->rx_packets = tswap32(st->rx_packets);
2615         st->tx_packets = tswap32(st->tx_packets);
2616         st->rx_bytes = tswap32(st->rx_bytes);
2617         st->tx_bytes = tswap32(st->tx_bytes);
2618         st->rx_errors = tswap32(st->rx_errors);
2619         st->tx_errors = tswap32(st->tx_errors);
2620         st->rx_dropped = tswap32(st->rx_dropped);
2621         st->tx_dropped = tswap32(st->tx_dropped);
2622         st->multicast = tswap32(st->multicast);
2623         st->collisions = tswap32(st->collisions);
2624 
2625         /* detailed rx_errors: */
2626         st->rx_length_errors = tswap32(st->rx_length_errors);
2627         st->rx_over_errors = tswap32(st->rx_over_errors);
2628         st->rx_crc_errors = tswap32(st->rx_crc_errors);
2629         st->rx_frame_errors = tswap32(st->rx_frame_errors);
2630         st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2631         st->rx_missed_errors = tswap32(st->rx_missed_errors);
2632 
2633         /* detailed tx_errors */
2634         st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2635         st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2636         st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2637         st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2638         st->tx_window_errors = tswap32(st->tx_window_errors);
2639 
2640         /* for cslip etc */
2641         st->rx_compressed = tswap32(st->rx_compressed);
2642         st->tx_compressed = tswap32(st->tx_compressed);
2643         break;
2644     /* struct rtnl_link_stats64 */
2645     case QEMU_IFLA_STATS64:
2646         st64 = RTA_DATA(rtattr);
2647         st64->rx_packets = tswap64(st64->rx_packets);
2648         st64->tx_packets = tswap64(st64->tx_packets);
2649         st64->rx_bytes = tswap64(st64->rx_bytes);
2650         st64->tx_bytes = tswap64(st64->tx_bytes);
2651         st64->rx_errors = tswap64(st64->rx_errors);
2652         st64->tx_errors = tswap64(st64->tx_errors);
2653         st64->rx_dropped = tswap64(st64->rx_dropped);
2654         st64->tx_dropped = tswap64(st64->tx_dropped);
2655         st64->multicast = tswap64(st64->multicast);
2656         st64->collisions = tswap64(st64->collisions);
2657 
2658         /* detailed rx_errors: */
2659         st64->rx_length_errors = tswap64(st64->rx_length_errors);
2660         st64->rx_over_errors = tswap64(st64->rx_over_errors);
2661         st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2662         st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2663         st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2664         st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2665 
2666         /* detailed tx_errors */
2667         st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2668         st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2669         st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2670         st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2671         st64->tx_window_errors = tswap64(st64->tx_window_errors);
2672 
2673         /* for cslip etc */
2674         st64->rx_compressed = tswap64(st64->rx_compressed);
2675         st64->tx_compressed = tswap64(st64->tx_compressed);
2676         break;
2677     /* struct rtnl_link_ifmap */
2678     case QEMU_IFLA_MAP:
2679         map = RTA_DATA(rtattr);
2680         map->mem_start = tswap64(map->mem_start);
2681         map->mem_end = tswap64(map->mem_end);
2682         map->base_addr = tswap64(map->base_addr);
2683         map->irq = tswap16(map->irq);
2684         break;
2685     /* nested */
2686     case QEMU_IFLA_LINKINFO:
2687         memset(&li_context, 0, sizeof(li_context));
2688         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2689                                               &li_context,
2690                                            host_to_target_data_linkinfo_nlattr);
2691     case QEMU_IFLA_AF_SPEC:
2692         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2693                                               NULL,
2694                                              host_to_target_data_spec_nlattr);
2695     case QEMU_IFLA_XDP:
2696         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2697                                               NULL,
2698                                                 host_to_target_data_xdp_nlattr);
2699     default:
2700         gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2701         break;
2702     }
2703     return 0;
2704 }
2705 
2706 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2707 {
2708     uint32_t *u32;
2709     struct ifa_cacheinfo *ci;
2710 
2711     switch (rtattr->rta_type) {
2712     /* binary: depends on family type */
2713     case IFA_ADDRESS:
2714     case IFA_LOCAL:
2715         break;
2716     /* string */
2717     case IFA_LABEL:
2718         break;
2719     /* u32 */
2720     case IFA_FLAGS:
2721     case IFA_BROADCAST:
2722         u32 = RTA_DATA(rtattr);
2723         *u32 = tswap32(*u32);
2724         break;
2725     /* struct ifa_cacheinfo */
2726     case IFA_CACHEINFO:
2727         ci = RTA_DATA(rtattr);
2728         ci->ifa_prefered = tswap32(ci->ifa_prefered);
2729         ci->ifa_valid = tswap32(ci->ifa_valid);
2730         ci->cstamp = tswap32(ci->cstamp);
2731         ci->tstamp = tswap32(ci->tstamp);
2732         break;
2733     default:
2734         gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2735         break;
2736     }
2737     return 0;
2738 }
2739 
2740 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2741 {
2742     uint32_t *u32;
2743     struct rta_cacheinfo *ci;
2744 
2745     switch (rtattr->rta_type) {
2746     /* binary: depends on family type */
2747     case QEMU_RTA_GATEWAY:
2748     case QEMU_RTA_DST:
2749     case QEMU_RTA_PREFSRC:
2750         break;
2751     /* u8 */
2752     case QEMU_RTA_PREF:
2753         break;
2754     /* u32 */
2755     case QEMU_RTA_PRIORITY:
2756     case QEMU_RTA_TABLE:
2757     case QEMU_RTA_OIF:
2758         u32 = RTA_DATA(rtattr);
2759         *u32 = tswap32(*u32);
2760         break;
2761     /* struct rta_cacheinfo */
2762     case QEMU_RTA_CACHEINFO:
2763         ci = RTA_DATA(rtattr);
2764         ci->rta_clntref = tswap32(ci->rta_clntref);
2765         ci->rta_lastuse = tswap32(ci->rta_lastuse);
2766         ci->rta_expires = tswap32(ci->rta_expires);
2767         ci->rta_error = tswap32(ci->rta_error);
2768         ci->rta_used = tswap32(ci->rta_used);
2769 #if defined(RTNETLINK_HAVE_PEERINFO)
2770         ci->rta_id = tswap32(ci->rta_id);
2771         ci->rta_ts = tswap32(ci->rta_ts);
2772         ci->rta_tsage = tswap32(ci->rta_tsage);
2773 #endif
2774         break;
2775     default:
2776         gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2777         break;
2778     }
2779     return 0;
2780 }
2781 
2782 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2783                                          uint32_t rtattr_len)
2784 {
2785     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2786                                           host_to_target_data_link_rtattr);
2787 }
2788 
2789 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2790                                          uint32_t rtattr_len)
2791 {
2792     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2793                                           host_to_target_data_addr_rtattr);
2794 }
2795 
2796 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2797                                          uint32_t rtattr_len)
2798 {
2799     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2800                                           host_to_target_data_route_rtattr);
2801 }
2802 
2803 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2804 {
2805     uint32_t nlmsg_len;
2806     struct ifinfomsg *ifi;
2807     struct ifaddrmsg *ifa;
2808     struct rtmsg *rtm;
2809 
2810     nlmsg_len = nlh->nlmsg_len;
2811     switch (nlh->nlmsg_type) {
2812     case RTM_NEWLINK:
2813     case RTM_DELLINK:
2814     case RTM_GETLINK:
2815         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2816             ifi = NLMSG_DATA(nlh);
2817             ifi->ifi_type = tswap16(ifi->ifi_type);
2818             ifi->ifi_index = tswap32(ifi->ifi_index);
2819             ifi->ifi_flags = tswap32(ifi->ifi_flags);
2820             ifi->ifi_change = tswap32(ifi->ifi_change);
2821             host_to_target_link_rtattr(IFLA_RTA(ifi),
2822                                        nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2823         }
2824         break;
2825     case RTM_NEWADDR:
2826     case RTM_DELADDR:
2827     case RTM_GETADDR:
2828         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2829             ifa = NLMSG_DATA(nlh);
2830             ifa->ifa_index = tswap32(ifa->ifa_index);
2831             host_to_target_addr_rtattr(IFA_RTA(ifa),
2832                                        nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2833         }
2834         break;
2835     case RTM_NEWROUTE:
2836     case RTM_DELROUTE:
2837     case RTM_GETROUTE:
2838         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2839             rtm = NLMSG_DATA(nlh);
2840             rtm->rtm_flags = tswap32(rtm->rtm_flags);
2841             host_to_target_route_rtattr(RTM_RTA(rtm),
2842                                         nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2843         }
2844         break;
2845     default:
2846         return -TARGET_EINVAL;
2847     }
2848     return 0;
2849 }
2850 
2851 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2852                                                   size_t len)
2853 {
2854     return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2855 }
2856 
2857 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2858                                                size_t len,
2859                                                abi_long (*target_to_host_rtattr)
2860                                                         (struct rtattr *))
2861 {
2862     abi_long ret;
2863 
2864     while (len >= sizeof(struct rtattr)) {
2865         if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2866             tswap16(rtattr->rta_len) > len) {
2867             break;
2868         }
2869         rtattr->rta_len = tswap16(rtattr->rta_len);
2870         rtattr->rta_type = tswap16(rtattr->rta_type);
2871         ret = target_to_host_rtattr(rtattr);
2872         if (ret < 0) {
2873             return ret;
2874         }
2875         len -= RTA_ALIGN(rtattr->rta_len);
2876         rtattr = (struct rtattr *)(((char *)rtattr) +
2877                  RTA_ALIGN(rtattr->rta_len));
2878     }
2879     return 0;
2880 }
2881 
2882 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2883 {
2884     switch (rtattr->rta_type) {
2885     default:
2886         gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2887         break;
2888     }
2889     return 0;
2890 }
2891 
2892 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2893 {
2894     switch (rtattr->rta_type) {
2895     /* binary: depends on family type */
2896     case IFA_LOCAL:
2897     case IFA_ADDRESS:
2898         break;
2899     default:
2900         gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2901         break;
2902     }
2903     return 0;
2904 }
2905 
2906 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2907 {
2908     uint32_t *u32;
2909     switch (rtattr->rta_type) {
2910     /* binary: depends on family type */
2911     case QEMU_RTA_DST:
2912     case QEMU_RTA_SRC:
2913     case QEMU_RTA_GATEWAY:
2914         break;
2915     /* u32 */
2916     case QEMU_RTA_PRIORITY:
2917     case QEMU_RTA_OIF:
2918         u32 = RTA_DATA(rtattr);
2919         *u32 = tswap32(*u32);
2920         break;
2921     default:
2922         gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2923         break;
2924     }
2925     return 0;
2926 }
2927 
2928 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2929                                        uint32_t rtattr_len)
2930 {
2931     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2932                                    target_to_host_data_link_rtattr);
2933 }
2934 
2935 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2936                                      uint32_t rtattr_len)
2937 {
2938     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2939                                    target_to_host_data_addr_rtattr);
2940 }
2941 
2942 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2943                                      uint32_t rtattr_len)
2944 {
2945     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2946                                    target_to_host_data_route_rtattr);
2947 }
2948 
2949 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2950 {
2951     struct ifinfomsg *ifi;
2952     struct ifaddrmsg *ifa;
2953     struct rtmsg *rtm;
2954 
2955     switch (nlh->nlmsg_type) {
2956     case RTM_GETLINK:
2957         break;
2958     case RTM_NEWLINK:
2959     case RTM_DELLINK:
2960         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2961             ifi = NLMSG_DATA(nlh);
2962             ifi->ifi_type = tswap16(ifi->ifi_type);
2963             ifi->ifi_index = tswap32(ifi->ifi_index);
2964             ifi->ifi_flags = tswap32(ifi->ifi_flags);
2965             ifi->ifi_change = tswap32(ifi->ifi_change);
2966             target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2967                                        NLMSG_LENGTH(sizeof(*ifi)));
2968         }
2969         break;
2970     case RTM_GETADDR:
2971     case RTM_NEWADDR:
2972     case RTM_DELADDR:
2973         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2974             ifa = NLMSG_DATA(nlh);
2975             ifa->ifa_index = tswap32(ifa->ifa_index);
2976             target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2977                                        NLMSG_LENGTH(sizeof(*ifa)));
2978         }
2979         break;
2980     case RTM_GETROUTE:
2981         break;
2982     case RTM_NEWROUTE:
2983     case RTM_DELROUTE:
2984         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2985             rtm = NLMSG_DATA(nlh);
2986             rtm->rtm_flags = tswap32(rtm->rtm_flags);
2987             target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2988                                         NLMSG_LENGTH(sizeof(*rtm)));
2989         }
2990         break;
2991     default:
2992         return -TARGET_EOPNOTSUPP;
2993     }
2994     return 0;
2995 }
2996 
2997 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2998 {
2999     return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
3000 }
3001 #endif /* CONFIG_RTNETLINK */
3002 
3003 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
3004 {
3005     switch (nlh->nlmsg_type) {
3006     default:
3007         gemu_log("Unknown host audit message type %d\n",
3008                  nlh->nlmsg_type);
3009         return -TARGET_EINVAL;
3010     }
3011     return 0;
3012 }
3013 
3014 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
3015                                                   size_t len)
3016 {
3017     return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
3018 }
3019 
3020 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
3021 {
3022     switch (nlh->nlmsg_type) {
3023     case AUDIT_USER:
3024     case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
3025     case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
3026         break;
3027     default:
3028         gemu_log("Unknown target audit message type %d\n",
3029                  nlh->nlmsg_type);
3030         return -TARGET_EINVAL;
3031     }
3032 
3033     return 0;
3034 }
3035 
3036 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
3037 {
3038     return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
3039 }
3040 
3041 /* do_setsockopt() Must return target values and target errnos. */
3042 static abi_long do_setsockopt(int sockfd, int level, int optname,
3043                               abi_ulong optval_addr, socklen_t optlen)
3044 {
3045     abi_long ret;
3046     int val;
3047     struct ip_mreqn *ip_mreq;
3048     struct ip_mreq_source *ip_mreq_source;
3049 
3050     switch(level) {
3051     case SOL_TCP:
3052         /* TCP options all take an 'int' value.  */
3053         if (optlen < sizeof(uint32_t))
3054             return -TARGET_EINVAL;
3055 
3056         if (get_user_u32(val, optval_addr))
3057             return -TARGET_EFAULT;
3058         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
3059         break;
3060     case SOL_IP:
3061         switch(optname) {
3062         case IP_TOS:
3063         case IP_TTL:
3064         case IP_HDRINCL:
3065         case IP_ROUTER_ALERT:
3066         case IP_RECVOPTS:
3067         case IP_RETOPTS:
3068         case IP_PKTINFO:
3069         case IP_MTU_DISCOVER:
3070         case IP_RECVERR:
3071         case IP_RECVTTL:
3072         case IP_RECVTOS:
3073 #ifdef IP_FREEBIND
3074         case IP_FREEBIND:
3075 #endif
3076         case IP_MULTICAST_TTL:
3077         case IP_MULTICAST_LOOP:
3078             val = 0;
3079             if (optlen >= sizeof(uint32_t)) {
3080                 if (get_user_u32(val, optval_addr))
3081                     return -TARGET_EFAULT;
3082             } else if (optlen >= 1) {
3083                 if (get_user_u8(val, optval_addr))
3084                     return -TARGET_EFAULT;
3085             }
3086             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
3087             break;
3088         case IP_ADD_MEMBERSHIP:
3089         case IP_DROP_MEMBERSHIP:
3090             if (optlen < sizeof (struct target_ip_mreq) ||
3091                 optlen > sizeof (struct target_ip_mreqn))
3092                 return -TARGET_EINVAL;
3093 
3094             ip_mreq = (struct ip_mreqn *) alloca(optlen);
3095             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
3096             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
3097             break;
3098 
3099         case IP_BLOCK_SOURCE:
3100         case IP_UNBLOCK_SOURCE:
3101         case IP_ADD_SOURCE_MEMBERSHIP:
3102         case IP_DROP_SOURCE_MEMBERSHIP:
3103             if (optlen != sizeof (struct target_ip_mreq_source))
3104                 return -TARGET_EINVAL;
3105 
3106             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3107             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
3108             unlock_user (ip_mreq_source, optval_addr, 0);
3109             break;
3110 
3111         default:
3112             goto unimplemented;
3113         }
3114         break;
3115     case SOL_IPV6:
3116         switch (optname) {
3117         case IPV6_MTU_DISCOVER:
3118         case IPV6_MTU:
3119         case IPV6_V6ONLY:
3120         case IPV6_RECVPKTINFO:
3121         case IPV6_UNICAST_HOPS:
3122         case IPV6_MULTICAST_HOPS:
3123         case IPV6_MULTICAST_LOOP:
3124         case IPV6_RECVERR:
3125         case IPV6_RECVHOPLIMIT:
3126         case IPV6_2292HOPLIMIT:
3127         case IPV6_CHECKSUM:
3128             val = 0;
3129             if (optlen < sizeof(uint32_t)) {
3130                 return -TARGET_EINVAL;
3131             }
3132             if (get_user_u32(val, optval_addr)) {
3133                 return -TARGET_EFAULT;
3134             }
3135             ret = get_errno(setsockopt(sockfd, level, optname,
3136                                        &val, sizeof(val)));
3137             break;
3138         case IPV6_PKTINFO:
3139         {
3140             struct in6_pktinfo pki;
3141 
3142             if (optlen < sizeof(pki)) {
3143                 return -TARGET_EINVAL;
3144             }
3145 
3146             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
3147                 return -TARGET_EFAULT;
3148             }
3149 
3150             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
3151 
3152             ret = get_errno(setsockopt(sockfd, level, optname,
3153                                        &pki, sizeof(pki)));
3154             break;
3155         }
3156         default:
3157             goto unimplemented;
3158         }
3159         break;
3160     case SOL_ICMPV6:
3161         switch (optname) {
3162         case ICMPV6_FILTER:
3163         {
3164             struct icmp6_filter icmp6f;
3165 
3166             if (optlen > sizeof(icmp6f)) {
3167                 optlen = sizeof(icmp6f);
3168             }
3169 
3170             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
3171                 return -TARGET_EFAULT;
3172             }
3173 
3174             for (val = 0; val < 8; val++) {
3175                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3176             }
3177 
3178             ret = get_errno(setsockopt(sockfd, level, optname,
3179                                        &icmp6f, optlen));
3180             break;
3181         }
3182         default:
3183             goto unimplemented;
3184         }
3185         break;
3186     case SOL_RAW:
3187         switch (optname) {
3188         case ICMP_FILTER:
3189         case IPV6_CHECKSUM:
3190             /* those take an u32 value */
3191             if (optlen < sizeof(uint32_t)) {
3192                 return -TARGET_EINVAL;
3193             }
3194 
3195             if (get_user_u32(val, optval_addr)) {
3196                 return -TARGET_EFAULT;
3197             }
3198             ret = get_errno(setsockopt(sockfd, level, optname,
3199                                        &val, sizeof(val)));
3200             break;
3201 
3202         default:
3203             goto unimplemented;
3204         }
3205         break;
3206     case TARGET_SOL_SOCKET:
3207         switch (optname) {
3208         case TARGET_SO_RCVTIMEO:
3209         {
3210                 struct timeval tv;
3211 
3212                 optname = SO_RCVTIMEO;
3213 
3214 set_timeout:
3215                 if (optlen != sizeof(struct target_timeval)) {
3216                     return -TARGET_EINVAL;
3217                 }
3218 
3219                 if (copy_from_user_timeval(&tv, optval_addr)) {
3220                     return -TARGET_EFAULT;
3221                 }
3222 
3223                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3224                                 &tv, sizeof(tv)));
3225                 return ret;
3226         }
3227         case TARGET_SO_SNDTIMEO:
3228                 optname = SO_SNDTIMEO;
3229                 goto set_timeout;
3230         case TARGET_SO_ATTACH_FILTER:
3231         {
3232                 struct target_sock_fprog *tfprog;
3233                 struct target_sock_filter *tfilter;
3234                 struct sock_fprog fprog;
3235                 struct sock_filter *filter;
3236                 int i;
3237 
3238                 if (optlen != sizeof(*tfprog)) {
3239                     return -TARGET_EINVAL;
3240                 }
3241                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3242                     return -TARGET_EFAULT;
3243                 }
3244                 if (!lock_user_struct(VERIFY_READ, tfilter,
3245                                       tswapal(tfprog->filter), 0)) {
3246                     unlock_user_struct(tfprog, optval_addr, 1);
3247                     return -TARGET_EFAULT;
3248                 }
3249 
3250                 fprog.len = tswap16(tfprog->len);
3251                 filter = g_try_new(struct sock_filter, fprog.len);
3252                 if (filter == NULL) {
3253                     unlock_user_struct(tfilter, tfprog->filter, 1);
3254                     unlock_user_struct(tfprog, optval_addr, 1);
3255                     return -TARGET_ENOMEM;
3256                 }
3257                 for (i = 0; i < fprog.len; i++) {
3258                     filter[i].code = tswap16(tfilter[i].code);
3259                     filter[i].jt = tfilter[i].jt;
3260                     filter[i].jf = tfilter[i].jf;
3261                     filter[i].k = tswap32(tfilter[i].k);
3262                 }
3263                 fprog.filter = filter;
3264 
3265                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3266                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3267                 g_free(filter);
3268 
3269                 unlock_user_struct(tfilter, tfprog->filter, 1);
3270                 unlock_user_struct(tfprog, optval_addr, 1);
3271                 return ret;
3272         }
3273 	case TARGET_SO_BINDTODEVICE:
3274 	{
3275 		char *dev_ifname, *addr_ifname;
3276 
3277 		if (optlen > IFNAMSIZ - 1) {
3278 		    optlen = IFNAMSIZ - 1;
3279 		}
3280 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3281 		if (!dev_ifname) {
3282 		    return -TARGET_EFAULT;
3283 		}
3284 		optname = SO_BINDTODEVICE;
3285 		addr_ifname = alloca(IFNAMSIZ);
3286 		memcpy(addr_ifname, dev_ifname, optlen);
3287 		addr_ifname[optlen] = 0;
3288 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3289                                            addr_ifname, optlen));
3290 		unlock_user (dev_ifname, optval_addr, 0);
3291 		return ret;
3292 	}
3293             /* Options with 'int' argument.  */
3294         case TARGET_SO_DEBUG:
3295 		optname = SO_DEBUG;
3296 		break;
3297         case TARGET_SO_REUSEADDR:
3298 		optname = SO_REUSEADDR;
3299 		break;
3300         case TARGET_SO_TYPE:
3301 		optname = SO_TYPE;
3302 		break;
3303         case TARGET_SO_ERROR:
3304 		optname = SO_ERROR;
3305 		break;
3306         case TARGET_SO_DONTROUTE:
3307 		optname = SO_DONTROUTE;
3308 		break;
3309         case TARGET_SO_BROADCAST:
3310 		optname = SO_BROADCAST;
3311 		break;
3312         case TARGET_SO_SNDBUF:
3313 		optname = SO_SNDBUF;
3314 		break;
3315         case TARGET_SO_SNDBUFFORCE:
3316                 optname = SO_SNDBUFFORCE;
3317                 break;
3318         case TARGET_SO_RCVBUF:
3319 		optname = SO_RCVBUF;
3320 		break;
3321         case TARGET_SO_RCVBUFFORCE:
3322                 optname = SO_RCVBUFFORCE;
3323                 break;
3324         case TARGET_SO_KEEPALIVE:
3325 		optname = SO_KEEPALIVE;
3326 		break;
3327         case TARGET_SO_OOBINLINE:
3328 		optname = SO_OOBINLINE;
3329 		break;
3330         case TARGET_SO_NO_CHECK:
3331 		optname = SO_NO_CHECK;
3332 		break;
3333         case TARGET_SO_PRIORITY:
3334 		optname = SO_PRIORITY;
3335 		break;
3336 #ifdef SO_BSDCOMPAT
3337         case TARGET_SO_BSDCOMPAT:
3338 		optname = SO_BSDCOMPAT;
3339 		break;
3340 #endif
3341         case TARGET_SO_PASSCRED:
3342 		optname = SO_PASSCRED;
3343 		break;
3344         case TARGET_SO_PASSSEC:
3345                 optname = SO_PASSSEC;
3346                 break;
3347         case TARGET_SO_TIMESTAMP:
3348 		optname = SO_TIMESTAMP;
3349 		break;
3350         case TARGET_SO_RCVLOWAT:
3351 		optname = SO_RCVLOWAT;
3352 		break;
3353         default:
3354             goto unimplemented;
3355         }
3356 	if (optlen < sizeof(uint32_t))
3357             return -TARGET_EINVAL;
3358 
3359 	if (get_user_u32(val, optval_addr))
3360             return -TARGET_EFAULT;
3361 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3362         break;
3363     default:
3364     unimplemented:
3365         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3366         ret = -TARGET_ENOPROTOOPT;
3367     }
3368     return ret;
3369 }
3370 
3371 /* do_getsockopt() Must return target values and target errnos. */
3372 static abi_long do_getsockopt(int sockfd, int level, int optname,
3373                               abi_ulong optval_addr, abi_ulong optlen)
3374 {
3375     abi_long ret;
3376     int len, val;
3377     socklen_t lv;
3378 
3379     switch(level) {
3380     case TARGET_SOL_SOCKET:
3381         level = SOL_SOCKET;
3382         switch (optname) {
3383         /* These don't just return a single integer */
3384         case TARGET_SO_LINGER:
3385         case TARGET_SO_RCVTIMEO:
3386         case TARGET_SO_SNDTIMEO:
3387         case TARGET_SO_PEERNAME:
3388             goto unimplemented;
3389         case TARGET_SO_PEERCRED: {
3390             struct ucred cr;
3391             socklen_t crlen;
3392             struct target_ucred *tcr;
3393 
3394             if (get_user_u32(len, optlen)) {
3395                 return -TARGET_EFAULT;
3396             }
3397             if (len < 0) {
3398                 return -TARGET_EINVAL;
3399             }
3400 
3401             crlen = sizeof(cr);
3402             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3403                                        &cr, &crlen));
3404             if (ret < 0) {
3405                 return ret;
3406             }
3407             if (len > crlen) {
3408                 len = crlen;
3409             }
3410             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3411                 return -TARGET_EFAULT;
3412             }
3413             __put_user(cr.pid, &tcr->pid);
3414             __put_user(cr.uid, &tcr->uid);
3415             __put_user(cr.gid, &tcr->gid);
3416             unlock_user_struct(tcr, optval_addr, 1);
3417             if (put_user_u32(len, optlen)) {
3418                 return -TARGET_EFAULT;
3419             }
3420             break;
3421         }
3422         /* Options with 'int' argument.  */
3423         case TARGET_SO_DEBUG:
3424             optname = SO_DEBUG;
3425             goto int_case;
3426         case TARGET_SO_REUSEADDR:
3427             optname = SO_REUSEADDR;
3428             goto int_case;
3429         case TARGET_SO_TYPE:
3430             optname = SO_TYPE;
3431             goto int_case;
3432         case TARGET_SO_ERROR:
3433             optname = SO_ERROR;
3434             goto int_case;
3435         case TARGET_SO_DONTROUTE:
3436             optname = SO_DONTROUTE;
3437             goto int_case;
3438         case TARGET_SO_BROADCAST:
3439             optname = SO_BROADCAST;
3440             goto int_case;
3441         case TARGET_SO_SNDBUF:
3442             optname = SO_SNDBUF;
3443             goto int_case;
3444         case TARGET_SO_RCVBUF:
3445             optname = SO_RCVBUF;
3446             goto int_case;
3447         case TARGET_SO_KEEPALIVE:
3448             optname = SO_KEEPALIVE;
3449             goto int_case;
3450         case TARGET_SO_OOBINLINE:
3451             optname = SO_OOBINLINE;
3452             goto int_case;
3453         case TARGET_SO_NO_CHECK:
3454             optname = SO_NO_CHECK;
3455             goto int_case;
3456         case TARGET_SO_PRIORITY:
3457             optname = SO_PRIORITY;
3458             goto int_case;
3459 #ifdef SO_BSDCOMPAT
3460         case TARGET_SO_BSDCOMPAT:
3461             optname = SO_BSDCOMPAT;
3462             goto int_case;
3463 #endif
3464         case TARGET_SO_PASSCRED:
3465             optname = SO_PASSCRED;
3466             goto int_case;
3467         case TARGET_SO_TIMESTAMP:
3468             optname = SO_TIMESTAMP;
3469             goto int_case;
3470         case TARGET_SO_RCVLOWAT:
3471             optname = SO_RCVLOWAT;
3472             goto int_case;
3473         case TARGET_SO_ACCEPTCONN:
3474             optname = SO_ACCEPTCONN;
3475             goto int_case;
3476         default:
3477             goto int_case;
3478         }
3479         break;
3480     case SOL_TCP:
3481         /* TCP options all take an 'int' value.  */
3482     int_case:
3483         if (get_user_u32(len, optlen))
3484             return -TARGET_EFAULT;
3485         if (len < 0)
3486             return -TARGET_EINVAL;
3487         lv = sizeof(lv);
3488         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3489         if (ret < 0)
3490             return ret;
3491         if (optname == SO_TYPE) {
3492             val = host_to_target_sock_type(val);
3493         }
3494         if (len > lv)
3495             len = lv;
3496         if (len == 4) {
3497             if (put_user_u32(val, optval_addr))
3498                 return -TARGET_EFAULT;
3499         } else {
3500             if (put_user_u8(val, optval_addr))
3501                 return -TARGET_EFAULT;
3502         }
3503         if (put_user_u32(len, optlen))
3504             return -TARGET_EFAULT;
3505         break;
3506     case SOL_IP:
3507         switch(optname) {
3508         case IP_TOS:
3509         case IP_TTL:
3510         case IP_HDRINCL:
3511         case IP_ROUTER_ALERT:
3512         case IP_RECVOPTS:
3513         case IP_RETOPTS:
3514         case IP_PKTINFO:
3515         case IP_MTU_DISCOVER:
3516         case IP_RECVERR:
3517         case IP_RECVTOS:
3518 #ifdef IP_FREEBIND
3519         case IP_FREEBIND:
3520 #endif
3521         case IP_MULTICAST_TTL:
3522         case IP_MULTICAST_LOOP:
3523             if (get_user_u32(len, optlen))
3524                 return -TARGET_EFAULT;
3525             if (len < 0)
3526                 return -TARGET_EINVAL;
3527             lv = sizeof(lv);
3528             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3529             if (ret < 0)
3530                 return ret;
3531             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3532                 len = 1;
3533                 if (put_user_u32(len, optlen)
3534                     || put_user_u8(val, optval_addr))
3535                     return -TARGET_EFAULT;
3536             } else {
3537                 if (len > sizeof(int))
3538                     len = sizeof(int);
3539                 if (put_user_u32(len, optlen)
3540                     || put_user_u32(val, optval_addr))
3541                     return -TARGET_EFAULT;
3542             }
3543             break;
3544         default:
3545             ret = -TARGET_ENOPROTOOPT;
3546             break;
3547         }
3548         break;
3549     default:
3550     unimplemented:
3551         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3552                  level, optname);
3553         ret = -TARGET_EOPNOTSUPP;
3554         break;
3555     }
3556     return ret;
3557 }
3558 
3559 /* Convert target low/high pair representing file offset into the host
3560  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3561  * as the kernel doesn't handle them either.
3562  */
3563 static void target_to_host_low_high(abi_ulong tlow,
3564                                     abi_ulong thigh,
3565                                     unsigned long *hlow,
3566                                     unsigned long *hhigh)
3567 {
3568     uint64_t off = tlow |
3569         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3570         TARGET_LONG_BITS / 2;
3571 
3572     *hlow = off;
3573     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3574 }
3575 
3576 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3577                                 abi_ulong count, int copy)
3578 {
3579     struct target_iovec *target_vec;
3580     struct iovec *vec;
3581     abi_ulong total_len, max_len;
3582     int i;
3583     int err = 0;
3584     bool bad_address = false;
3585 
3586     if (count == 0) {
3587         errno = 0;
3588         return NULL;
3589     }
3590     if (count > IOV_MAX) {
3591         errno = EINVAL;
3592         return NULL;
3593     }
3594 
3595     vec = g_try_new0(struct iovec, count);
3596     if (vec == NULL) {
3597         errno = ENOMEM;
3598         return NULL;
3599     }
3600 
3601     target_vec = lock_user(VERIFY_READ, target_addr,
3602                            count * sizeof(struct target_iovec), 1);
3603     if (target_vec == NULL) {
3604         err = EFAULT;
3605         goto fail2;
3606     }
3607 
3608     /* ??? If host page size > target page size, this will result in a
3609        value larger than what we can actually support.  */
3610     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3611     total_len = 0;
3612 
3613     for (i = 0; i < count; i++) {
3614         abi_ulong base = tswapal(target_vec[i].iov_base);
3615         abi_long len = tswapal(target_vec[i].iov_len);
3616 
3617         if (len < 0) {
3618             err = EINVAL;
3619             goto fail;
3620         } else if (len == 0) {
3621             /* Zero length pointer is ignored.  */
3622             vec[i].iov_base = 0;
3623         } else {
3624             vec[i].iov_base = lock_user(type, base, len, copy);
3625             /* If the first buffer pointer is bad, this is a fault.  But
3626              * subsequent bad buffers will result in a partial write; this
3627              * is realized by filling the vector with null pointers and
3628              * zero lengths. */
3629             if (!vec[i].iov_base) {
3630                 if (i == 0) {
3631                     err = EFAULT;
3632                     goto fail;
3633                 } else {
3634                     bad_address = true;
3635                 }
3636             }
3637             if (bad_address) {
3638                 len = 0;
3639             }
3640             if (len > max_len - total_len) {
3641                 len = max_len - total_len;
3642             }
3643         }
3644         vec[i].iov_len = len;
3645         total_len += len;
3646     }
3647 
3648     unlock_user(target_vec, target_addr, 0);
3649     return vec;
3650 
3651  fail:
3652     while (--i >= 0) {
3653         if (tswapal(target_vec[i].iov_len) > 0) {
3654             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3655         }
3656     }
3657     unlock_user(target_vec, target_addr, 0);
3658  fail2:
3659     g_free(vec);
3660     errno = err;
3661     return NULL;
3662 }
3663 
3664 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3665                          abi_ulong count, int copy)
3666 {
3667     struct target_iovec *target_vec;
3668     int i;
3669 
3670     target_vec = lock_user(VERIFY_READ, target_addr,
3671                            count * sizeof(struct target_iovec), 1);
3672     if (target_vec) {
3673         for (i = 0; i < count; i++) {
3674             abi_ulong base = tswapal(target_vec[i].iov_base);
3675             abi_long len = tswapal(target_vec[i].iov_len);
3676             if (len < 0) {
3677                 break;
3678             }
3679             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3680         }
3681         unlock_user(target_vec, target_addr, 0);
3682     }
3683 
3684     g_free(vec);
3685 }
3686 
3687 static inline int target_to_host_sock_type(int *type)
3688 {
3689     int host_type = 0;
3690     int target_type = *type;
3691 
3692     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3693     case TARGET_SOCK_DGRAM:
3694         host_type = SOCK_DGRAM;
3695         break;
3696     case TARGET_SOCK_STREAM:
3697         host_type = SOCK_STREAM;
3698         break;
3699     default:
3700         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3701         break;
3702     }
3703     if (target_type & TARGET_SOCK_CLOEXEC) {
3704 #if defined(SOCK_CLOEXEC)
3705         host_type |= SOCK_CLOEXEC;
3706 #else
3707         return -TARGET_EINVAL;
3708 #endif
3709     }
3710     if (target_type & TARGET_SOCK_NONBLOCK) {
3711 #if defined(SOCK_NONBLOCK)
3712         host_type |= SOCK_NONBLOCK;
3713 #elif !defined(O_NONBLOCK)
3714         return -TARGET_EINVAL;
3715 #endif
3716     }
3717     *type = host_type;
3718     return 0;
3719 }
3720 
3721 /* Try to emulate socket type flags after socket creation.  */
3722 static int sock_flags_fixup(int fd, int target_type)
3723 {
3724 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3725     if (target_type & TARGET_SOCK_NONBLOCK) {
3726         int flags = fcntl(fd, F_GETFL);
3727         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3728             close(fd);
3729             return -TARGET_EINVAL;
3730         }
3731     }
3732 #endif
3733     return fd;
3734 }
3735 
3736 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3737                                                abi_ulong target_addr,
3738                                                socklen_t len)
3739 {
3740     struct sockaddr *addr = host_addr;
3741     struct target_sockaddr *target_saddr;
3742 
3743     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3744     if (!target_saddr) {
3745         return -TARGET_EFAULT;
3746     }
3747 
3748     memcpy(addr, target_saddr, len);
3749     addr->sa_family = tswap16(target_saddr->sa_family);
3750     /* spkt_protocol is big-endian */
3751 
3752     unlock_user(target_saddr, target_addr, 0);
3753     return 0;
3754 }
3755 
3756 static TargetFdTrans target_packet_trans = {
3757     .target_to_host_addr = packet_target_to_host_sockaddr,
3758 };
3759 
3760 #ifdef CONFIG_RTNETLINK
3761 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3762 {
3763     abi_long ret;
3764 
3765     ret = target_to_host_nlmsg_route(buf, len);
3766     if (ret < 0) {
3767         return ret;
3768     }
3769 
3770     return len;
3771 }
3772 
3773 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3774 {
3775     abi_long ret;
3776 
3777     ret = host_to_target_nlmsg_route(buf, len);
3778     if (ret < 0) {
3779         return ret;
3780     }
3781 
3782     return len;
3783 }
3784 
3785 static TargetFdTrans target_netlink_route_trans = {
3786     .target_to_host_data = netlink_route_target_to_host,
3787     .host_to_target_data = netlink_route_host_to_target,
3788 };
3789 #endif /* CONFIG_RTNETLINK */
3790 
3791 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3792 {
3793     abi_long ret;
3794 
3795     ret = target_to_host_nlmsg_audit(buf, len);
3796     if (ret < 0) {
3797         return ret;
3798     }
3799 
3800     return len;
3801 }
3802 
3803 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3804 {
3805     abi_long ret;
3806 
3807     ret = host_to_target_nlmsg_audit(buf, len);
3808     if (ret < 0) {
3809         return ret;
3810     }
3811 
3812     return len;
3813 }
3814 
3815 static TargetFdTrans target_netlink_audit_trans = {
3816     .target_to_host_data = netlink_audit_target_to_host,
3817     .host_to_target_data = netlink_audit_host_to_target,
3818 };
3819 
3820 /* do_socket() Must return target values and target errnos. */
3821 static abi_long do_socket(int domain, int type, int protocol)
3822 {
3823     int target_type = type;
3824     int ret;
3825 
3826     ret = target_to_host_sock_type(&type);
3827     if (ret) {
3828         return ret;
3829     }
3830 
3831     if (domain == PF_NETLINK && !(
3832 #ifdef CONFIG_RTNETLINK
3833          protocol == NETLINK_ROUTE ||
3834 #endif
3835          protocol == NETLINK_KOBJECT_UEVENT ||
3836          protocol == NETLINK_AUDIT)) {
3837         return -EPFNOSUPPORT;
3838     }
3839 
3840     if (domain == AF_PACKET ||
3841         (domain == AF_INET && type == SOCK_PACKET)) {
3842         protocol = tswap16(protocol);
3843     }
3844 
3845     ret = get_errno(socket(domain, type, protocol));
3846     if (ret >= 0) {
3847         ret = sock_flags_fixup(ret, target_type);
3848         if (type == SOCK_PACKET) {
3849             /* Manage an obsolete case :
3850              * if socket type is SOCK_PACKET, bind by name
3851              */
3852             fd_trans_register(ret, &target_packet_trans);
3853         } else if (domain == PF_NETLINK) {
3854             switch (protocol) {
3855 #ifdef CONFIG_RTNETLINK
3856             case NETLINK_ROUTE:
3857                 fd_trans_register(ret, &target_netlink_route_trans);
3858                 break;
3859 #endif
3860             case NETLINK_KOBJECT_UEVENT:
3861                 /* nothing to do: messages are strings */
3862                 break;
3863             case NETLINK_AUDIT:
3864                 fd_trans_register(ret, &target_netlink_audit_trans);
3865                 break;
3866             default:
3867                 g_assert_not_reached();
3868             }
3869         }
3870     }
3871     return ret;
3872 }
3873 
3874 /* do_bind() Must return target values and target errnos. */
3875 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3876                         socklen_t addrlen)
3877 {
3878     void *addr;
3879     abi_long ret;
3880 
3881     if ((int)addrlen < 0) {
3882         return -TARGET_EINVAL;
3883     }
3884 
3885     addr = alloca(addrlen+1);
3886 
3887     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3888     if (ret)
3889         return ret;
3890 
3891     return get_errno(bind(sockfd, addr, addrlen));
3892 }
3893 
3894 /* do_connect() Must return target values and target errnos. */
3895 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3896                            socklen_t addrlen)
3897 {
3898     void *addr;
3899     abi_long ret;
3900 
3901     if ((int)addrlen < 0) {
3902         return -TARGET_EINVAL;
3903     }
3904 
3905     addr = alloca(addrlen+1);
3906 
3907     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3908     if (ret)
3909         return ret;
3910 
3911     return get_errno(safe_connect(sockfd, addr, addrlen));
3912 }
3913 
3914 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3915 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3916                                       int flags, int send)
3917 {
3918     abi_long ret, len;
3919     struct msghdr msg;
3920     abi_ulong count;
3921     struct iovec *vec;
3922     abi_ulong target_vec;
3923 
3924     if (msgp->msg_name) {
3925         msg.msg_namelen = tswap32(msgp->msg_namelen);
3926         msg.msg_name = alloca(msg.msg_namelen+1);
3927         ret = target_to_host_sockaddr(fd, msg.msg_name,
3928                                       tswapal(msgp->msg_name),
3929                                       msg.msg_namelen);
3930         if (ret == -TARGET_EFAULT) {
3931             /* For connected sockets msg_name and msg_namelen must
3932              * be ignored, so returning EFAULT immediately is wrong.
3933              * Instead, pass a bad msg_name to the host kernel, and
3934              * let it decide whether to return EFAULT or not.
3935              */
3936             msg.msg_name = (void *)-1;
3937         } else if (ret) {
3938             goto out2;
3939         }
3940     } else {
3941         msg.msg_name = NULL;
3942         msg.msg_namelen = 0;
3943     }
3944     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3945     msg.msg_control = alloca(msg.msg_controllen);
3946     memset(msg.msg_control, 0, msg.msg_controllen);
3947 
3948     msg.msg_flags = tswap32(msgp->msg_flags);
3949 
3950     count = tswapal(msgp->msg_iovlen);
3951     target_vec = tswapal(msgp->msg_iov);
3952 
3953     if (count > IOV_MAX) {
3954         /* sendrcvmsg returns a different errno for this condition than
3955          * readv/writev, so we must catch it here before lock_iovec() does.
3956          */
3957         ret = -TARGET_EMSGSIZE;
3958         goto out2;
3959     }
3960 
3961     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3962                      target_vec, count, send);
3963     if (vec == NULL) {
3964         ret = -host_to_target_errno(errno);
3965         goto out2;
3966     }
3967     msg.msg_iovlen = count;
3968     msg.msg_iov = vec;
3969 
3970     if (send) {
3971         if (fd_trans_target_to_host_data(fd)) {
3972             void *host_msg;
3973 
3974             host_msg = g_malloc(msg.msg_iov->iov_len);
3975             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3976             ret = fd_trans_target_to_host_data(fd)(host_msg,
3977                                                    msg.msg_iov->iov_len);
3978             if (ret >= 0) {
3979                 msg.msg_iov->iov_base = host_msg;
3980                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3981             }
3982             g_free(host_msg);
3983         } else {
3984             ret = target_to_host_cmsg(&msg, msgp);
3985             if (ret == 0) {
3986                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3987             }
3988         }
3989     } else {
3990         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3991         if (!is_error(ret)) {
3992             len = ret;
3993             if (fd_trans_host_to_target_data(fd)) {
3994                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3995                                                MIN(msg.msg_iov->iov_len, len));
3996             } else {
3997                 ret = host_to_target_cmsg(msgp, &msg);
3998             }
3999             if (!is_error(ret)) {
4000                 msgp->msg_namelen = tswap32(msg.msg_namelen);
4001                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
4002                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
4003                                     msg.msg_name, msg.msg_namelen);
4004                     if (ret) {
4005                         goto out;
4006                     }
4007                 }
4008 
4009                 ret = len;
4010             }
4011         }
4012     }
4013 
4014 out:
4015     unlock_iovec(vec, target_vec, count, !send);
4016 out2:
4017     return ret;
4018 }
4019 
4020 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
4021                                int flags, int send)
4022 {
4023     abi_long ret;
4024     struct target_msghdr *msgp;
4025 
4026     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
4027                           msgp,
4028                           target_msg,
4029                           send ? 1 : 0)) {
4030         return -TARGET_EFAULT;
4031     }
4032     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
4033     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
4034     return ret;
4035 }
4036 
4037 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
4038  * so it might not have this *mmsg-specific flag either.
4039  */
4040 #ifndef MSG_WAITFORONE
4041 #define MSG_WAITFORONE 0x10000
4042 #endif
4043 
4044 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
4045                                 unsigned int vlen, unsigned int flags,
4046                                 int send)
4047 {
4048     struct target_mmsghdr *mmsgp;
4049     abi_long ret = 0;
4050     int i;
4051 
4052     if (vlen > UIO_MAXIOV) {
4053         vlen = UIO_MAXIOV;
4054     }
4055 
4056     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
4057     if (!mmsgp) {
4058         return -TARGET_EFAULT;
4059     }
4060 
4061     for (i = 0; i < vlen; i++) {
4062         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
4063         if (is_error(ret)) {
4064             break;
4065         }
4066         mmsgp[i].msg_len = tswap32(ret);
4067         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
4068         if (flags & MSG_WAITFORONE) {
4069             flags |= MSG_DONTWAIT;
4070         }
4071     }
4072 
4073     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
4074 
4075     /* Return number of datagrams sent if we sent any at all;
4076      * otherwise return the error.
4077      */
4078     if (i) {
4079         return i;
4080     }
4081     return ret;
4082 }
4083 
4084 /* do_accept4() Must return target values and target errnos. */
4085 static abi_long do_accept4(int fd, abi_ulong target_addr,
4086                            abi_ulong target_addrlen_addr, int flags)
4087 {
4088     socklen_t addrlen;
4089     void *addr;
4090     abi_long ret;
4091     int host_flags;
4092 
4093     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
4094 
4095     if (target_addr == 0) {
4096         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
4097     }
4098 
4099     /* linux returns EINVAL if addrlen pointer is invalid */
4100     if (get_user_u32(addrlen, target_addrlen_addr))
4101         return -TARGET_EINVAL;
4102 
4103     if ((int)addrlen < 0) {
4104         return -TARGET_EINVAL;
4105     }
4106 
4107     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4108         return -TARGET_EINVAL;
4109 
4110     addr = alloca(addrlen);
4111 
4112     ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
4113     if (!is_error(ret)) {
4114         host_to_target_sockaddr(target_addr, addr, addrlen);
4115         if (put_user_u32(addrlen, target_addrlen_addr))
4116             ret = -TARGET_EFAULT;
4117     }
4118     return ret;
4119 }
4120 
4121 /* do_getpeername() Must return target values and target errnos. */
4122 static abi_long do_getpeername(int fd, abi_ulong target_addr,
4123                                abi_ulong target_addrlen_addr)
4124 {
4125     socklen_t addrlen;
4126     void *addr;
4127     abi_long ret;
4128 
4129     if (get_user_u32(addrlen, target_addrlen_addr))
4130         return -TARGET_EFAULT;
4131 
4132     if ((int)addrlen < 0) {
4133         return -TARGET_EINVAL;
4134     }
4135 
4136     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4137         return -TARGET_EFAULT;
4138 
4139     addr = alloca(addrlen);
4140 
4141     ret = get_errno(getpeername(fd, addr, &addrlen));
4142     if (!is_error(ret)) {
4143         host_to_target_sockaddr(target_addr, addr, addrlen);
4144         if (put_user_u32(addrlen, target_addrlen_addr))
4145             ret = -TARGET_EFAULT;
4146     }
4147     return ret;
4148 }
4149 
4150 /* do_getsockname() Must return target values and target errnos. */
4151 static abi_long do_getsockname(int fd, abi_ulong target_addr,
4152                                abi_ulong target_addrlen_addr)
4153 {
4154     socklen_t addrlen;
4155     void *addr;
4156     abi_long ret;
4157 
4158     if (get_user_u32(addrlen, target_addrlen_addr))
4159         return -TARGET_EFAULT;
4160 
4161     if ((int)addrlen < 0) {
4162         return -TARGET_EINVAL;
4163     }
4164 
4165     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4166         return -TARGET_EFAULT;
4167 
4168     addr = alloca(addrlen);
4169 
4170     ret = get_errno(getsockname(fd, addr, &addrlen));
4171     if (!is_error(ret)) {
4172         host_to_target_sockaddr(target_addr, addr, addrlen);
4173         if (put_user_u32(addrlen, target_addrlen_addr))
4174             ret = -TARGET_EFAULT;
4175     }
4176     return ret;
4177 }
4178 
4179 /* do_socketpair() Must return target values and target errnos. */
4180 static abi_long do_socketpair(int domain, int type, int protocol,
4181                               abi_ulong target_tab_addr)
4182 {
4183     int tab[2];
4184     abi_long ret;
4185 
4186     target_to_host_sock_type(&type);
4187 
4188     ret = get_errno(socketpair(domain, type, protocol, tab));
4189     if (!is_error(ret)) {
4190         if (put_user_s32(tab[0], target_tab_addr)
4191             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4192             ret = -TARGET_EFAULT;
4193     }
4194     return ret;
4195 }
4196 
4197 /* do_sendto() Must return target values and target errnos. */
4198 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4199                           abi_ulong target_addr, socklen_t addrlen)
4200 {
4201     void *addr;
4202     void *host_msg;
4203     void *copy_msg = NULL;
4204     abi_long ret;
4205 
4206     if ((int)addrlen < 0) {
4207         return -TARGET_EINVAL;
4208     }
4209 
4210     host_msg = lock_user(VERIFY_READ, msg, len, 1);
4211     if (!host_msg)
4212         return -TARGET_EFAULT;
4213     if (fd_trans_target_to_host_data(fd)) {
4214         copy_msg = host_msg;
4215         host_msg = g_malloc(len);
4216         memcpy(host_msg, copy_msg, len);
4217         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4218         if (ret < 0) {
4219             goto fail;
4220         }
4221     }
4222     if (target_addr) {
4223         addr = alloca(addrlen+1);
4224         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4225         if (ret) {
4226             goto fail;
4227         }
4228         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4229     } else {
4230         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4231     }
4232 fail:
4233     if (copy_msg) {
4234         g_free(host_msg);
4235         host_msg = copy_msg;
4236     }
4237     unlock_user(host_msg, msg, 0);
4238     return ret;
4239 }
4240 
4241 /* do_recvfrom() Must return target values and target errnos. */
4242 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4243                             abi_ulong target_addr,
4244                             abi_ulong target_addrlen)
4245 {
4246     socklen_t addrlen;
4247     void *addr;
4248     void *host_msg;
4249     abi_long ret;
4250 
4251     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4252     if (!host_msg)
4253         return -TARGET_EFAULT;
4254     if (target_addr) {
4255         if (get_user_u32(addrlen, target_addrlen)) {
4256             ret = -TARGET_EFAULT;
4257             goto fail;
4258         }
4259         if ((int)addrlen < 0) {
4260             ret = -TARGET_EINVAL;
4261             goto fail;
4262         }
4263         addr = alloca(addrlen);
4264         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4265                                       addr, &addrlen));
4266     } else {
4267         addr = NULL; /* To keep compiler quiet.  */
4268         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4269     }
4270     if (!is_error(ret)) {
4271         if (fd_trans_host_to_target_data(fd)) {
4272             abi_long trans;
4273             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
4274             if (is_error(trans)) {
4275                 ret = trans;
4276                 goto fail;
4277             }
4278         }
4279         if (target_addr) {
4280             host_to_target_sockaddr(target_addr, addr, addrlen);
4281             if (put_user_u32(addrlen, target_addrlen)) {
4282                 ret = -TARGET_EFAULT;
4283                 goto fail;
4284             }
4285         }
4286         unlock_user(host_msg, msg, len);
4287     } else {
4288 fail:
4289         unlock_user(host_msg, msg, 0);
4290     }
4291     return ret;
4292 }
4293 
4294 #ifdef TARGET_NR_socketcall
4295 /* do_socketcall() must return target values and target errnos. */
4296 static abi_long do_socketcall(int num, abi_ulong vptr)
4297 {
4298     static const unsigned nargs[] = { /* number of arguments per operation */
4299         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
4300         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
4301         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
4302         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
4303         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
4304         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4305         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4306         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
4307         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
4308         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
4309         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
4310         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
4311         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
4312         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
4313         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
4314         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
4315         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
4316         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
4317         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
4318         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
4319     };
4320     abi_long a[6]; /* max 6 args */
4321     unsigned i;
4322 
4323     /* check the range of the first argument num */
4324     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4325     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4326         return -TARGET_EINVAL;
4327     }
4328     /* ensure we have space for args */
4329     if (nargs[num] > ARRAY_SIZE(a)) {
4330         return -TARGET_EINVAL;
4331     }
4332     /* collect the arguments in a[] according to nargs[] */
4333     for (i = 0; i < nargs[num]; ++i) {
4334         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4335             return -TARGET_EFAULT;
4336         }
4337     }
4338     /* now when we have the args, invoke the appropriate underlying function */
4339     switch (num) {
4340     case TARGET_SYS_SOCKET: /* domain, type, protocol */
4341         return do_socket(a[0], a[1], a[2]);
4342     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4343         return do_bind(a[0], a[1], a[2]);
4344     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4345         return do_connect(a[0], a[1], a[2]);
4346     case TARGET_SYS_LISTEN: /* sockfd, backlog */
4347         return get_errno(listen(a[0], a[1]));
4348     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4349         return do_accept4(a[0], a[1], a[2], 0);
4350     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4351         return do_getsockname(a[0], a[1], a[2]);
4352     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4353         return do_getpeername(a[0], a[1], a[2]);
4354     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4355         return do_socketpair(a[0], a[1], a[2], a[3]);
4356     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4357         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4358     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4359         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4360     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4361         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4362     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4363         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4364     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4365         return get_errno(shutdown(a[0], a[1]));
4366     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4367         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4368     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4369         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4370     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4371         return do_sendrecvmsg(a[0], a[1], a[2], 1);
4372     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4373         return do_sendrecvmsg(a[0], a[1], a[2], 0);
4374     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4375         return do_accept4(a[0], a[1], a[2], a[3]);
4376     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4377         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4378     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4379         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4380     default:
4381         gemu_log("Unsupported socketcall: %d\n", num);
4382         return -TARGET_EINVAL;
4383     }
4384 }
4385 #endif
4386 
4387 #define N_SHM_REGIONS	32
4388 
4389 static struct shm_region {
4390     abi_ulong start;
4391     abi_ulong size;
4392     bool in_use;
4393 } shm_regions[N_SHM_REGIONS];
4394 
4395 #ifndef TARGET_SEMID64_DS
4396 /* asm-generic version of this struct */
4397 struct target_semid64_ds
4398 {
4399   struct target_ipc_perm sem_perm;
4400   abi_ulong sem_otime;
4401 #if TARGET_ABI_BITS == 32
4402   abi_ulong __unused1;
4403 #endif
4404   abi_ulong sem_ctime;
4405 #if TARGET_ABI_BITS == 32
4406   abi_ulong __unused2;
4407 #endif
4408   abi_ulong sem_nsems;
4409   abi_ulong __unused3;
4410   abi_ulong __unused4;
4411 };
4412 #endif
4413 
4414 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4415                                                abi_ulong target_addr)
4416 {
4417     struct target_ipc_perm *target_ip;
4418     struct target_semid64_ds *target_sd;
4419 
4420     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4421         return -TARGET_EFAULT;
4422     target_ip = &(target_sd->sem_perm);
4423     host_ip->__key = tswap32(target_ip->__key);
4424     host_ip->uid = tswap32(target_ip->uid);
4425     host_ip->gid = tswap32(target_ip->gid);
4426     host_ip->cuid = tswap32(target_ip->cuid);
4427     host_ip->cgid = tswap32(target_ip->cgid);
4428 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4429     host_ip->mode = tswap32(target_ip->mode);
4430 #else
4431     host_ip->mode = tswap16(target_ip->mode);
4432 #endif
4433 #if defined(TARGET_PPC)
4434     host_ip->__seq = tswap32(target_ip->__seq);
4435 #else
4436     host_ip->__seq = tswap16(target_ip->__seq);
4437 #endif
4438     unlock_user_struct(target_sd, target_addr, 0);
4439     return 0;
4440 }
4441 
4442 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4443                                                struct ipc_perm *host_ip)
4444 {
4445     struct target_ipc_perm *target_ip;
4446     struct target_semid64_ds *target_sd;
4447 
4448     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4449         return -TARGET_EFAULT;
4450     target_ip = &(target_sd->sem_perm);
4451     target_ip->__key = tswap32(host_ip->__key);
4452     target_ip->uid = tswap32(host_ip->uid);
4453     target_ip->gid = tswap32(host_ip->gid);
4454     target_ip->cuid = tswap32(host_ip->cuid);
4455     target_ip->cgid = tswap32(host_ip->cgid);
4456 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4457     target_ip->mode = tswap32(host_ip->mode);
4458 #else
4459     target_ip->mode = tswap16(host_ip->mode);
4460 #endif
4461 #if defined(TARGET_PPC)
4462     target_ip->__seq = tswap32(host_ip->__seq);
4463 #else
4464     target_ip->__seq = tswap16(host_ip->__seq);
4465 #endif
4466     unlock_user_struct(target_sd, target_addr, 1);
4467     return 0;
4468 }
4469 
4470 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4471                                                abi_ulong target_addr)
4472 {
4473     struct target_semid64_ds *target_sd;
4474 
4475     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4476         return -TARGET_EFAULT;
4477     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4478         return -TARGET_EFAULT;
4479     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4480     host_sd->sem_otime = tswapal(target_sd->sem_otime);
4481     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4482     unlock_user_struct(target_sd, target_addr, 0);
4483     return 0;
4484 }
4485 
4486 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4487                                                struct semid_ds *host_sd)
4488 {
4489     struct target_semid64_ds *target_sd;
4490 
4491     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4492         return -TARGET_EFAULT;
4493     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4494         return -TARGET_EFAULT;
4495     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4496     target_sd->sem_otime = tswapal(host_sd->sem_otime);
4497     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4498     unlock_user_struct(target_sd, target_addr, 1);
4499     return 0;
4500 }
4501 
4502 struct target_seminfo {
4503     int semmap;
4504     int semmni;
4505     int semmns;
4506     int semmnu;
4507     int semmsl;
4508     int semopm;
4509     int semume;
4510     int semusz;
4511     int semvmx;
4512     int semaem;
4513 };
4514 
4515 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4516                                               struct seminfo *host_seminfo)
4517 {
4518     struct target_seminfo *target_seminfo;
4519     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4520         return -TARGET_EFAULT;
4521     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4522     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4523     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4524     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4525     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4526     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4527     __put_user(host_seminfo->semume, &target_seminfo->semume);
4528     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4529     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4530     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4531     unlock_user_struct(target_seminfo, target_addr, 1);
4532     return 0;
4533 }
4534 
4535 union semun {
4536 	int val;
4537 	struct semid_ds *buf;
4538 	unsigned short *array;
4539 	struct seminfo *__buf;
4540 };
4541 
4542 union target_semun {
4543 	int val;
4544 	abi_ulong buf;
4545 	abi_ulong array;
4546 	abi_ulong __buf;
4547 };
4548 
4549 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4550                                                abi_ulong target_addr)
4551 {
4552     int nsems;
4553     unsigned short *array;
4554     union semun semun;
4555     struct semid_ds semid_ds;
4556     int i, ret;
4557 
4558     semun.buf = &semid_ds;
4559 
4560     ret = semctl(semid, 0, IPC_STAT, semun);
4561     if (ret == -1)
4562         return get_errno(ret);
4563 
4564     nsems = semid_ds.sem_nsems;
4565 
4566     *host_array = g_try_new(unsigned short, nsems);
4567     if (!*host_array) {
4568         return -TARGET_ENOMEM;
4569     }
4570     array = lock_user(VERIFY_READ, target_addr,
4571                       nsems*sizeof(unsigned short), 1);
4572     if (!array) {
4573         g_free(*host_array);
4574         return -TARGET_EFAULT;
4575     }
4576 
4577     for(i=0; i<nsems; i++) {
4578         __get_user((*host_array)[i], &array[i]);
4579     }
4580     unlock_user(array, target_addr, 0);
4581 
4582     return 0;
4583 }
4584 
4585 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4586                                                unsigned short **host_array)
4587 {
4588     int nsems;
4589     unsigned short *array;
4590     union semun semun;
4591     struct semid_ds semid_ds;
4592     int i, ret;
4593 
4594     semun.buf = &semid_ds;
4595 
4596     ret = semctl(semid, 0, IPC_STAT, semun);
4597     if (ret == -1)
4598         return get_errno(ret);
4599 
4600     nsems = semid_ds.sem_nsems;
4601 
4602     array = lock_user(VERIFY_WRITE, target_addr,
4603                       nsems*sizeof(unsigned short), 0);
4604     if (!array)
4605         return -TARGET_EFAULT;
4606 
4607     for(i=0; i<nsems; i++) {
4608         __put_user((*host_array)[i], &array[i]);
4609     }
4610     g_free(*host_array);
4611     unlock_user(array, target_addr, 1);
4612 
4613     return 0;
4614 }
4615 
4616 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4617                                  abi_ulong target_arg)
4618 {
4619     union target_semun target_su = { .buf = target_arg };
4620     union semun arg;
4621     struct semid_ds dsarg;
4622     unsigned short *array = NULL;
4623     struct seminfo seminfo;
4624     abi_long ret = -TARGET_EINVAL;
4625     abi_long err;
4626     cmd &= 0xff;
4627 
4628     switch( cmd ) {
4629 	case GETVAL:
4630 	case SETVAL:
4631             /* In 64 bit cross-endian situations, we will erroneously pick up
4632              * the wrong half of the union for the "val" element.  To rectify
4633              * this, the entire 8-byte structure is byteswapped, followed by
4634 	     * a swap of the 4 byte val field. In other cases, the data is
4635 	     * already in proper host byte order. */
4636 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4637 		target_su.buf = tswapal(target_su.buf);
4638 		arg.val = tswap32(target_su.val);
4639 	    } else {
4640 		arg.val = target_su.val;
4641 	    }
4642             ret = get_errno(semctl(semid, semnum, cmd, arg));
4643             break;
4644 	case GETALL:
4645 	case SETALL:
4646             err = target_to_host_semarray(semid, &array, target_su.array);
4647             if (err)
4648                 return err;
4649             arg.array = array;
4650             ret = get_errno(semctl(semid, semnum, cmd, arg));
4651             err = host_to_target_semarray(semid, target_su.array, &array);
4652             if (err)
4653                 return err;
4654             break;
4655 	case IPC_STAT:
4656 	case IPC_SET:
4657 	case SEM_STAT:
4658             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4659             if (err)
4660                 return err;
4661             arg.buf = &dsarg;
4662             ret = get_errno(semctl(semid, semnum, cmd, arg));
4663             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4664             if (err)
4665                 return err;
4666             break;
4667 	case IPC_INFO:
4668 	case SEM_INFO:
4669             arg.__buf = &seminfo;
4670             ret = get_errno(semctl(semid, semnum, cmd, arg));
4671             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4672             if (err)
4673                 return err;
4674             break;
4675 	case IPC_RMID:
4676 	case GETPID:
4677 	case GETNCNT:
4678 	case GETZCNT:
4679             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4680             break;
4681     }
4682 
4683     return ret;
4684 }
4685 
4686 struct target_sembuf {
4687     unsigned short sem_num;
4688     short sem_op;
4689     short sem_flg;
4690 };
4691 
4692 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4693                                              abi_ulong target_addr,
4694                                              unsigned nsops)
4695 {
4696     struct target_sembuf *target_sembuf;
4697     int i;
4698 
4699     target_sembuf = lock_user(VERIFY_READ, target_addr,
4700                               nsops*sizeof(struct target_sembuf), 1);
4701     if (!target_sembuf)
4702         return -TARGET_EFAULT;
4703 
4704     for(i=0; i<nsops; i++) {
4705         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4706         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4707         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4708     }
4709 
4710     unlock_user(target_sembuf, target_addr, 0);
4711 
4712     return 0;
4713 }
4714 
4715 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4716 {
4717     struct sembuf sops[nsops];
4718 
4719     if (target_to_host_sembuf(sops, ptr, nsops))
4720         return -TARGET_EFAULT;
4721 
4722     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4723 }
4724 
4725 struct target_msqid_ds
4726 {
4727     struct target_ipc_perm msg_perm;
4728     abi_ulong msg_stime;
4729 #if TARGET_ABI_BITS == 32
4730     abi_ulong __unused1;
4731 #endif
4732     abi_ulong msg_rtime;
4733 #if TARGET_ABI_BITS == 32
4734     abi_ulong __unused2;
4735 #endif
4736     abi_ulong msg_ctime;
4737 #if TARGET_ABI_BITS == 32
4738     abi_ulong __unused3;
4739 #endif
4740     abi_ulong __msg_cbytes;
4741     abi_ulong msg_qnum;
4742     abi_ulong msg_qbytes;
4743     abi_ulong msg_lspid;
4744     abi_ulong msg_lrpid;
4745     abi_ulong __unused4;
4746     abi_ulong __unused5;
4747 };
4748 
4749 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4750                                                abi_ulong target_addr)
4751 {
4752     struct target_msqid_ds *target_md;
4753 
4754     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4755         return -TARGET_EFAULT;
4756     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4757         return -TARGET_EFAULT;
4758     host_md->msg_stime = tswapal(target_md->msg_stime);
4759     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4760     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4761     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4762     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4763     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4764     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4765     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4766     unlock_user_struct(target_md, target_addr, 0);
4767     return 0;
4768 }
4769 
4770 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4771                                                struct msqid_ds *host_md)
4772 {
4773     struct target_msqid_ds *target_md;
4774 
4775     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4776         return -TARGET_EFAULT;
4777     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4778         return -TARGET_EFAULT;
4779     target_md->msg_stime = tswapal(host_md->msg_stime);
4780     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4781     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4782     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4783     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4784     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4785     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4786     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4787     unlock_user_struct(target_md, target_addr, 1);
4788     return 0;
4789 }
4790 
4791 struct target_msginfo {
4792     int msgpool;
4793     int msgmap;
4794     int msgmax;
4795     int msgmnb;
4796     int msgmni;
4797     int msgssz;
4798     int msgtql;
4799     unsigned short int msgseg;
4800 };
4801 
4802 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4803                                               struct msginfo *host_msginfo)
4804 {
4805     struct target_msginfo *target_msginfo;
4806     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4807         return -TARGET_EFAULT;
4808     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4809     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4810     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4811     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4812     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4813     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4814     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4815     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4816     unlock_user_struct(target_msginfo, target_addr, 1);
4817     return 0;
4818 }
4819 
4820 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4821 {
4822     struct msqid_ds dsarg;
4823     struct msginfo msginfo;
4824     abi_long ret = -TARGET_EINVAL;
4825 
4826     cmd &= 0xff;
4827 
4828     switch (cmd) {
4829     case IPC_STAT:
4830     case IPC_SET:
4831     case MSG_STAT:
4832         if (target_to_host_msqid_ds(&dsarg,ptr))
4833             return -TARGET_EFAULT;
4834         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4835         if (host_to_target_msqid_ds(ptr,&dsarg))
4836             return -TARGET_EFAULT;
4837         break;
4838     case IPC_RMID:
4839         ret = get_errno(msgctl(msgid, cmd, NULL));
4840         break;
4841     case IPC_INFO:
4842     case MSG_INFO:
4843         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4844         if (host_to_target_msginfo(ptr, &msginfo))
4845             return -TARGET_EFAULT;
4846         break;
4847     }
4848 
4849     return ret;
4850 }
4851 
4852 struct target_msgbuf {
4853     abi_long mtype;
4854     char	mtext[1];
4855 };
4856 
4857 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4858                                  ssize_t msgsz, int msgflg)
4859 {
4860     struct target_msgbuf *target_mb;
4861     struct msgbuf *host_mb;
4862     abi_long ret = 0;
4863 
4864     if (msgsz < 0) {
4865         return -TARGET_EINVAL;
4866     }
4867 
4868     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4869         return -TARGET_EFAULT;
4870     host_mb = g_try_malloc(msgsz + sizeof(long));
4871     if (!host_mb) {
4872         unlock_user_struct(target_mb, msgp, 0);
4873         return -TARGET_ENOMEM;
4874     }
4875     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4876     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4877     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4878     g_free(host_mb);
4879     unlock_user_struct(target_mb, msgp, 0);
4880 
4881     return ret;
4882 }
4883 
4884 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4885                                  ssize_t msgsz, abi_long msgtyp,
4886                                  int msgflg)
4887 {
4888     struct target_msgbuf *target_mb;
4889     char *target_mtext;
4890     struct msgbuf *host_mb;
4891     abi_long ret = 0;
4892 
4893     if (msgsz < 0) {
4894         return -TARGET_EINVAL;
4895     }
4896 
4897     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4898         return -TARGET_EFAULT;
4899 
4900     host_mb = g_try_malloc(msgsz + sizeof(long));
4901     if (!host_mb) {
4902         ret = -TARGET_ENOMEM;
4903         goto end;
4904     }
4905     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4906 
4907     if (ret > 0) {
4908         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4909         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4910         if (!target_mtext) {
4911             ret = -TARGET_EFAULT;
4912             goto end;
4913         }
4914         memcpy(target_mb->mtext, host_mb->mtext, ret);
4915         unlock_user(target_mtext, target_mtext_addr, ret);
4916     }
4917 
4918     target_mb->mtype = tswapal(host_mb->mtype);
4919 
4920 end:
4921     if (target_mb)
4922         unlock_user_struct(target_mb, msgp, 1);
4923     g_free(host_mb);
4924     return ret;
4925 }
4926 
4927 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4928                                                abi_ulong target_addr)
4929 {
4930     struct target_shmid_ds *target_sd;
4931 
4932     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4933         return -TARGET_EFAULT;
4934     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4935         return -TARGET_EFAULT;
4936     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4937     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4938     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4939     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4940     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4941     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4942     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4943     unlock_user_struct(target_sd, target_addr, 0);
4944     return 0;
4945 }
4946 
4947 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4948                                                struct shmid_ds *host_sd)
4949 {
4950     struct target_shmid_ds *target_sd;
4951 
4952     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4953         return -TARGET_EFAULT;
4954     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4955         return -TARGET_EFAULT;
4956     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4957     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4958     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4959     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4960     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4961     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4962     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4963     unlock_user_struct(target_sd, target_addr, 1);
4964     return 0;
4965 }
4966 
4967 struct  target_shminfo {
4968     abi_ulong shmmax;
4969     abi_ulong shmmin;
4970     abi_ulong shmmni;
4971     abi_ulong shmseg;
4972     abi_ulong shmall;
4973 };
4974 
4975 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4976                                               struct shminfo *host_shminfo)
4977 {
4978     struct target_shminfo *target_shminfo;
4979     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4980         return -TARGET_EFAULT;
4981     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4982     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4983     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4984     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4985     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4986     unlock_user_struct(target_shminfo, target_addr, 1);
4987     return 0;
4988 }
4989 
4990 struct target_shm_info {
4991     int used_ids;
4992     abi_ulong shm_tot;
4993     abi_ulong shm_rss;
4994     abi_ulong shm_swp;
4995     abi_ulong swap_attempts;
4996     abi_ulong swap_successes;
4997 };
4998 
4999 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
5000                                                struct shm_info *host_shm_info)
5001 {
5002     struct target_shm_info *target_shm_info;
5003     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
5004         return -TARGET_EFAULT;
5005     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
5006     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
5007     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
5008     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
5009     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
5010     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
5011     unlock_user_struct(target_shm_info, target_addr, 1);
5012     return 0;
5013 }
5014 
5015 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
5016 {
5017     struct shmid_ds dsarg;
5018     struct shminfo shminfo;
5019     struct shm_info shm_info;
5020     abi_long ret = -TARGET_EINVAL;
5021 
5022     cmd &= 0xff;
5023 
5024     switch(cmd) {
5025     case IPC_STAT:
5026     case IPC_SET:
5027     case SHM_STAT:
5028         if (target_to_host_shmid_ds(&dsarg, buf))
5029             return -TARGET_EFAULT;
5030         ret = get_errno(shmctl(shmid, cmd, &dsarg));
5031         if (host_to_target_shmid_ds(buf, &dsarg))
5032             return -TARGET_EFAULT;
5033         break;
5034     case IPC_INFO:
5035         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
5036         if (host_to_target_shminfo(buf, &shminfo))
5037             return -TARGET_EFAULT;
5038         break;
5039     case SHM_INFO:
5040         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
5041         if (host_to_target_shm_info(buf, &shm_info))
5042             return -TARGET_EFAULT;
5043         break;
5044     case IPC_RMID:
5045     case SHM_LOCK:
5046     case SHM_UNLOCK:
5047         ret = get_errno(shmctl(shmid, cmd, NULL));
5048         break;
5049     }
5050 
5051     return ret;
5052 }
5053 
5054 #ifndef TARGET_FORCE_SHMLBA
5055 /* For most architectures, SHMLBA is the same as the page size;
5056  * some architectures have larger values, in which case they should
5057  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
5058  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
5059  * and defining its own value for SHMLBA.
5060  *
5061  * The kernel also permits SHMLBA to be set by the architecture to a
5062  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
5063  * this means that addresses are rounded to the large size if
5064  * SHM_RND is set but addresses not aligned to that size are not rejected
5065  * as long as they are at least page-aligned. Since the only architecture
5066  * which uses this is ia64 this code doesn't provide for that oddity.
5067  */
5068 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
5069 {
5070     return TARGET_PAGE_SIZE;
5071 }
5072 #endif
5073 
5074 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
5075                                  int shmid, abi_ulong shmaddr, int shmflg)
5076 {
5077     abi_long raddr;
5078     void *host_raddr;
5079     struct shmid_ds shm_info;
5080     int i,ret;
5081     abi_ulong shmlba;
5082 
5083     /* find out the length of the shared memory segment */
5084     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
5085     if (is_error(ret)) {
5086         /* can't get length, bail out */
5087         return ret;
5088     }
5089 
5090     shmlba = target_shmlba(cpu_env);
5091 
5092     if (shmaddr & (shmlba - 1)) {
5093         if (shmflg & SHM_RND) {
5094             shmaddr &= ~(shmlba - 1);
5095         } else {
5096             return -TARGET_EINVAL;
5097         }
5098     }
5099     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
5100         return -TARGET_EINVAL;
5101     }
5102 
5103     mmap_lock();
5104 
5105     if (shmaddr)
5106         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
5107     else {
5108         abi_ulong mmap_start;
5109 
5110         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
5111 
5112         if (mmap_start == -1) {
5113             errno = ENOMEM;
5114             host_raddr = (void *)-1;
5115         } else
5116             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
5117     }
5118 
5119     if (host_raddr == (void *)-1) {
5120         mmap_unlock();
5121         return get_errno((long)host_raddr);
5122     }
5123     raddr=h2g((unsigned long)host_raddr);
5124 
5125     page_set_flags(raddr, raddr + shm_info.shm_segsz,
5126                    PAGE_VALID | PAGE_READ |
5127                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
5128 
5129     for (i = 0; i < N_SHM_REGIONS; i++) {
5130         if (!shm_regions[i].in_use) {
5131             shm_regions[i].in_use = true;
5132             shm_regions[i].start = raddr;
5133             shm_regions[i].size = shm_info.shm_segsz;
5134             break;
5135         }
5136     }
5137 
5138     mmap_unlock();
5139     return raddr;
5140 
5141 }
5142 
5143 static inline abi_long do_shmdt(abi_ulong shmaddr)
5144 {
5145     int i;
5146     abi_long rv;
5147 
5148     mmap_lock();
5149 
5150     for (i = 0; i < N_SHM_REGIONS; ++i) {
5151         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
5152             shm_regions[i].in_use = false;
5153             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
5154             break;
5155         }
5156     }
5157     rv = get_errno(shmdt(g2h(shmaddr)));
5158 
5159     mmap_unlock();
5160 
5161     return rv;
5162 }
5163 
5164 #ifdef TARGET_NR_ipc
5165 /* ??? This only works with linear mappings.  */
5166 /* do_ipc() must return target values and target errnos. */
5167 static abi_long do_ipc(CPUArchState *cpu_env,
5168                        unsigned int call, abi_long first,
5169                        abi_long second, abi_long third,
5170                        abi_long ptr, abi_long fifth)
5171 {
5172     int version;
5173     abi_long ret = 0;
5174 
5175     version = call >> 16;
5176     call &= 0xffff;
5177 
5178     switch (call) {
5179     case IPCOP_semop:
5180         ret = do_semop(first, ptr, second);
5181         break;
5182 
5183     case IPCOP_semget:
5184         ret = get_errno(semget(first, second, third));
5185         break;
5186 
5187     case IPCOP_semctl: {
5188         /* The semun argument to semctl is passed by value, so dereference the
5189          * ptr argument. */
5190         abi_ulong atptr;
5191         get_user_ual(atptr, ptr);
5192         ret = do_semctl(first, second, third, atptr);
5193         break;
5194     }
5195 
5196     case IPCOP_msgget:
5197         ret = get_errno(msgget(first, second));
5198         break;
5199 
5200     case IPCOP_msgsnd:
5201         ret = do_msgsnd(first, ptr, second, third);
5202         break;
5203 
5204     case IPCOP_msgctl:
5205         ret = do_msgctl(first, second, ptr);
5206         break;
5207 
5208     case IPCOP_msgrcv:
5209         switch (version) {
5210         case 0:
5211             {
5212                 struct target_ipc_kludge {
5213                     abi_long msgp;
5214                     abi_long msgtyp;
5215                 } *tmp;
5216 
5217                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5218                     ret = -TARGET_EFAULT;
5219                     break;
5220                 }
5221 
5222                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5223 
5224                 unlock_user_struct(tmp, ptr, 0);
5225                 break;
5226             }
5227         default:
5228             ret = do_msgrcv(first, ptr, second, fifth, third);
5229         }
5230         break;
5231 
5232     case IPCOP_shmat:
5233         switch (version) {
5234         default:
5235         {
5236             abi_ulong raddr;
5237             raddr = do_shmat(cpu_env, first, ptr, second);
5238             if (is_error(raddr))
5239                 return get_errno(raddr);
5240             if (put_user_ual(raddr, third))
5241                 return -TARGET_EFAULT;
5242             break;
5243         }
5244         case 1:
5245             ret = -TARGET_EINVAL;
5246             break;
5247         }
5248 	break;
5249     case IPCOP_shmdt:
5250         ret = do_shmdt(ptr);
5251 	break;
5252 
5253     case IPCOP_shmget:
5254 	/* IPC_* flag values are the same on all linux platforms */
5255 	ret = get_errno(shmget(first, second, third));
5256 	break;
5257 
5258 	/* IPC_* and SHM_* command values are the same on all linux platforms */
5259     case IPCOP_shmctl:
5260         ret = do_shmctl(first, second, ptr);
5261         break;
5262     default:
5263 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5264 	ret = -TARGET_ENOSYS;
5265 	break;
5266     }
5267     return ret;
5268 }
5269 #endif
5270 
5271 /* kernel structure types definitions */
5272 
5273 #define STRUCT(name, ...) STRUCT_ ## name,
5274 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5275 enum {
5276 #include "syscall_types.h"
5277 STRUCT_MAX
5278 };
5279 #undef STRUCT
5280 #undef STRUCT_SPECIAL
5281 
5282 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
5283 #define STRUCT_SPECIAL(name)
5284 #include "syscall_types.h"
5285 #undef STRUCT
5286 #undef STRUCT_SPECIAL
5287 
5288 typedef struct IOCTLEntry IOCTLEntry;
5289 
5290 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5291                              int fd, int cmd, abi_long arg);
5292 
5293 struct IOCTLEntry {
5294     int target_cmd;
5295     unsigned int host_cmd;
5296     const char *name;
5297     int access;
5298     do_ioctl_fn *do_ioctl;
5299     const argtype arg_type[5];
5300 };
5301 
5302 #define IOC_R 0x0001
5303 #define IOC_W 0x0002
5304 #define IOC_RW (IOC_R | IOC_W)
5305 
5306 #define MAX_STRUCT_SIZE 4096
5307 
5308 #ifdef CONFIG_FIEMAP
5309 /* So fiemap access checks don't overflow on 32 bit systems.
5310  * This is very slightly smaller than the limit imposed by
5311  * the underlying kernel.
5312  */
5313 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
5314                             / sizeof(struct fiemap_extent))
5315 
5316 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5317                                        int fd, int cmd, abi_long arg)
5318 {
5319     /* The parameter for this ioctl is a struct fiemap followed
5320      * by an array of struct fiemap_extent whose size is set
5321      * in fiemap->fm_extent_count. The array is filled in by the
5322      * ioctl.
5323      */
5324     int target_size_in, target_size_out;
5325     struct fiemap *fm;
5326     const argtype *arg_type = ie->arg_type;
5327     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5328     void *argptr, *p;
5329     abi_long ret;
5330     int i, extent_size = thunk_type_size(extent_arg_type, 0);
5331     uint32_t outbufsz;
5332     int free_fm = 0;
5333 
5334     assert(arg_type[0] == TYPE_PTR);
5335     assert(ie->access == IOC_RW);
5336     arg_type++;
5337     target_size_in = thunk_type_size(arg_type, 0);
5338     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5339     if (!argptr) {
5340         return -TARGET_EFAULT;
5341     }
5342     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5343     unlock_user(argptr, arg, 0);
5344     fm = (struct fiemap *)buf_temp;
5345     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5346         return -TARGET_EINVAL;
5347     }
5348 
5349     outbufsz = sizeof (*fm) +
5350         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5351 
5352     if (outbufsz > MAX_STRUCT_SIZE) {
5353         /* We can't fit all the extents into the fixed size buffer.
5354          * Allocate one that is large enough and use it instead.
5355          */
5356         fm = g_try_malloc(outbufsz);
5357         if (!fm) {
5358             return -TARGET_ENOMEM;
5359         }
5360         memcpy(fm, buf_temp, sizeof(struct fiemap));
5361         free_fm = 1;
5362     }
5363     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5364     if (!is_error(ret)) {
5365         target_size_out = target_size_in;
5366         /* An extent_count of 0 means we were only counting the extents
5367          * so there are no structs to copy
5368          */
5369         if (fm->fm_extent_count != 0) {
5370             target_size_out += fm->fm_mapped_extents * extent_size;
5371         }
5372         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5373         if (!argptr) {
5374             ret = -TARGET_EFAULT;
5375         } else {
5376             /* Convert the struct fiemap */
5377             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5378             if (fm->fm_extent_count != 0) {
5379                 p = argptr + target_size_in;
5380                 /* ...and then all the struct fiemap_extents */
5381                 for (i = 0; i < fm->fm_mapped_extents; i++) {
5382                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5383                                   THUNK_TARGET);
5384                     p += extent_size;
5385                 }
5386             }
5387             unlock_user(argptr, arg, target_size_out);
5388         }
5389     }
5390     if (free_fm) {
5391         g_free(fm);
5392     }
5393     return ret;
5394 }
5395 #endif
5396 
5397 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5398                                 int fd, int cmd, abi_long arg)
5399 {
5400     const argtype *arg_type = ie->arg_type;
5401     int target_size;
5402     void *argptr;
5403     int ret;
5404     struct ifconf *host_ifconf;
5405     uint32_t outbufsz;
5406     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5407     int target_ifreq_size;
5408     int nb_ifreq;
5409     int free_buf = 0;
5410     int i;
5411     int target_ifc_len;
5412     abi_long target_ifc_buf;
5413     int host_ifc_len;
5414     char *host_ifc_buf;
5415 
5416     assert(arg_type[0] == TYPE_PTR);
5417     assert(ie->access == IOC_RW);
5418 
5419     arg_type++;
5420     target_size = thunk_type_size(arg_type, 0);
5421 
5422     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5423     if (!argptr)
5424         return -TARGET_EFAULT;
5425     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5426     unlock_user(argptr, arg, 0);
5427 
5428     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5429     target_ifc_len = host_ifconf->ifc_len;
5430     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5431 
5432     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5433     nb_ifreq = target_ifc_len / target_ifreq_size;
5434     host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5435 
5436     outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5437     if (outbufsz > MAX_STRUCT_SIZE) {
5438         /* We can't fit all the extents into the fixed size buffer.
5439          * Allocate one that is large enough and use it instead.
5440          */
5441         host_ifconf = malloc(outbufsz);
5442         if (!host_ifconf) {
5443             return -TARGET_ENOMEM;
5444         }
5445         memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5446         free_buf = 1;
5447     }
5448     host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5449 
5450     host_ifconf->ifc_len = host_ifc_len;
5451     host_ifconf->ifc_buf = host_ifc_buf;
5452 
5453     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5454     if (!is_error(ret)) {
5455 	/* convert host ifc_len to target ifc_len */
5456 
5457         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5458         target_ifc_len = nb_ifreq * target_ifreq_size;
5459         host_ifconf->ifc_len = target_ifc_len;
5460 
5461 	/* restore target ifc_buf */
5462 
5463         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5464 
5465 	/* copy struct ifconf to target user */
5466 
5467         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5468         if (!argptr)
5469             return -TARGET_EFAULT;
5470         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5471         unlock_user(argptr, arg, target_size);
5472 
5473 	/* copy ifreq[] to target user */
5474 
5475         argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5476         for (i = 0; i < nb_ifreq ; i++) {
5477             thunk_convert(argptr + i * target_ifreq_size,
5478                           host_ifc_buf + i * sizeof(struct ifreq),
5479                           ifreq_arg_type, THUNK_TARGET);
5480         }
5481         unlock_user(argptr, target_ifc_buf, target_ifc_len);
5482     }
5483 
5484     if (free_buf) {
5485         free(host_ifconf);
5486     }
5487 
5488     return ret;
5489 }
5490 
5491 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5492                             int cmd, abi_long arg)
5493 {
5494     void *argptr;
5495     struct dm_ioctl *host_dm;
5496     abi_long guest_data;
5497     uint32_t guest_data_size;
5498     int target_size;
5499     const argtype *arg_type = ie->arg_type;
5500     abi_long ret;
5501     void *big_buf = NULL;
5502     char *host_data;
5503 
5504     arg_type++;
5505     target_size = thunk_type_size(arg_type, 0);
5506     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5507     if (!argptr) {
5508         ret = -TARGET_EFAULT;
5509         goto out;
5510     }
5511     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5512     unlock_user(argptr, arg, 0);
5513 
5514     /* buf_temp is too small, so fetch things into a bigger buffer */
5515     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5516     memcpy(big_buf, buf_temp, target_size);
5517     buf_temp = big_buf;
5518     host_dm = big_buf;
5519 
5520     guest_data = arg + host_dm->data_start;
5521     if ((guest_data - arg) < 0) {
5522         ret = -TARGET_EINVAL;
5523         goto out;
5524     }
5525     guest_data_size = host_dm->data_size - host_dm->data_start;
5526     host_data = (char*)host_dm + host_dm->data_start;
5527 
5528     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5529     if (!argptr) {
5530         ret = -TARGET_EFAULT;
5531         goto out;
5532     }
5533 
5534     switch (ie->host_cmd) {
5535     case DM_REMOVE_ALL:
5536     case DM_LIST_DEVICES:
5537     case DM_DEV_CREATE:
5538     case DM_DEV_REMOVE:
5539     case DM_DEV_SUSPEND:
5540     case DM_DEV_STATUS:
5541     case DM_DEV_WAIT:
5542     case DM_TABLE_STATUS:
5543     case DM_TABLE_CLEAR:
5544     case DM_TABLE_DEPS:
5545     case DM_LIST_VERSIONS:
5546         /* no input data */
5547         break;
5548     case DM_DEV_RENAME:
5549     case DM_DEV_SET_GEOMETRY:
5550         /* data contains only strings */
5551         memcpy(host_data, argptr, guest_data_size);
5552         break;
5553     case DM_TARGET_MSG:
5554         memcpy(host_data, argptr, guest_data_size);
5555         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5556         break;
5557     case DM_TABLE_LOAD:
5558     {
5559         void *gspec = argptr;
5560         void *cur_data = host_data;
5561         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5562         int spec_size = thunk_type_size(arg_type, 0);
5563         int i;
5564 
5565         for (i = 0; i < host_dm->target_count; i++) {
5566             struct dm_target_spec *spec = cur_data;
5567             uint32_t next;
5568             int slen;
5569 
5570             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5571             slen = strlen((char*)gspec + spec_size) + 1;
5572             next = spec->next;
5573             spec->next = sizeof(*spec) + slen;
5574             strcpy((char*)&spec[1], gspec + spec_size);
5575             gspec += next;
5576             cur_data += spec->next;
5577         }
5578         break;
5579     }
5580     default:
5581         ret = -TARGET_EINVAL;
5582         unlock_user(argptr, guest_data, 0);
5583         goto out;
5584     }
5585     unlock_user(argptr, guest_data, 0);
5586 
5587     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5588     if (!is_error(ret)) {
5589         guest_data = arg + host_dm->data_start;
5590         guest_data_size = host_dm->data_size - host_dm->data_start;
5591         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5592         switch (ie->host_cmd) {
5593         case DM_REMOVE_ALL:
5594         case DM_DEV_CREATE:
5595         case DM_DEV_REMOVE:
5596         case DM_DEV_RENAME:
5597         case DM_DEV_SUSPEND:
5598         case DM_DEV_STATUS:
5599         case DM_TABLE_LOAD:
5600         case DM_TABLE_CLEAR:
5601         case DM_TARGET_MSG:
5602         case DM_DEV_SET_GEOMETRY:
5603             /* no return data */
5604             break;
5605         case DM_LIST_DEVICES:
5606         {
5607             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5608             uint32_t remaining_data = guest_data_size;
5609             void *cur_data = argptr;
5610             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5611             int nl_size = 12; /* can't use thunk_size due to alignment */
5612 
5613             while (1) {
5614                 uint32_t next = nl->next;
5615                 if (next) {
5616                     nl->next = nl_size + (strlen(nl->name) + 1);
5617                 }
5618                 if (remaining_data < nl->next) {
5619                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5620                     break;
5621                 }
5622                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5623                 strcpy(cur_data + nl_size, nl->name);
5624                 cur_data += nl->next;
5625                 remaining_data -= nl->next;
5626                 if (!next) {
5627                     break;
5628                 }
5629                 nl = (void*)nl + next;
5630             }
5631             break;
5632         }
5633         case DM_DEV_WAIT:
5634         case DM_TABLE_STATUS:
5635         {
5636             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5637             void *cur_data = argptr;
5638             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5639             int spec_size = thunk_type_size(arg_type, 0);
5640             int i;
5641 
5642             for (i = 0; i < host_dm->target_count; i++) {
5643                 uint32_t next = spec->next;
5644                 int slen = strlen((char*)&spec[1]) + 1;
5645                 spec->next = (cur_data - argptr) + spec_size + slen;
5646                 if (guest_data_size < spec->next) {
5647                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5648                     break;
5649                 }
5650                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5651                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5652                 cur_data = argptr + spec->next;
5653                 spec = (void*)host_dm + host_dm->data_start + next;
5654             }
5655             break;
5656         }
5657         case DM_TABLE_DEPS:
5658         {
5659             void *hdata = (void*)host_dm + host_dm->data_start;
5660             int count = *(uint32_t*)hdata;
5661             uint64_t *hdev = hdata + 8;
5662             uint64_t *gdev = argptr + 8;
5663             int i;
5664 
5665             *(uint32_t*)argptr = tswap32(count);
5666             for (i = 0; i < count; i++) {
5667                 *gdev = tswap64(*hdev);
5668                 gdev++;
5669                 hdev++;
5670             }
5671             break;
5672         }
5673         case DM_LIST_VERSIONS:
5674         {
5675             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5676             uint32_t remaining_data = guest_data_size;
5677             void *cur_data = argptr;
5678             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5679             int vers_size = thunk_type_size(arg_type, 0);
5680 
5681             while (1) {
5682                 uint32_t next = vers->next;
5683                 if (next) {
5684                     vers->next = vers_size + (strlen(vers->name) + 1);
5685                 }
5686                 if (remaining_data < vers->next) {
5687                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5688                     break;
5689                 }
5690                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5691                 strcpy(cur_data + vers_size, vers->name);
5692                 cur_data += vers->next;
5693                 remaining_data -= vers->next;
5694                 if (!next) {
5695                     break;
5696                 }
5697                 vers = (void*)vers + next;
5698             }
5699             break;
5700         }
5701         default:
5702             unlock_user(argptr, guest_data, 0);
5703             ret = -TARGET_EINVAL;
5704             goto out;
5705         }
5706         unlock_user(argptr, guest_data, guest_data_size);
5707 
5708         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5709         if (!argptr) {
5710             ret = -TARGET_EFAULT;
5711             goto out;
5712         }
5713         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5714         unlock_user(argptr, arg, target_size);
5715     }
5716 out:
5717     g_free(big_buf);
5718     return ret;
5719 }
5720 
5721 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5722                                int cmd, abi_long arg)
5723 {
5724     void *argptr;
5725     int target_size;
5726     const argtype *arg_type = ie->arg_type;
5727     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5728     abi_long ret;
5729 
5730     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5731     struct blkpg_partition host_part;
5732 
5733     /* Read and convert blkpg */
5734     arg_type++;
5735     target_size = thunk_type_size(arg_type, 0);
5736     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5737     if (!argptr) {
5738         ret = -TARGET_EFAULT;
5739         goto out;
5740     }
5741     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5742     unlock_user(argptr, arg, 0);
5743 
5744     switch (host_blkpg->op) {
5745     case BLKPG_ADD_PARTITION:
5746     case BLKPG_DEL_PARTITION:
5747         /* payload is struct blkpg_partition */
5748         break;
5749     default:
5750         /* Unknown opcode */
5751         ret = -TARGET_EINVAL;
5752         goto out;
5753     }
5754 
5755     /* Read and convert blkpg->data */
5756     arg = (abi_long)(uintptr_t)host_blkpg->data;
5757     target_size = thunk_type_size(part_arg_type, 0);
5758     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5759     if (!argptr) {
5760         ret = -TARGET_EFAULT;
5761         goto out;
5762     }
5763     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5764     unlock_user(argptr, arg, 0);
5765 
5766     /* Swizzle the data pointer to our local copy and call! */
5767     host_blkpg->data = &host_part;
5768     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5769 
5770 out:
5771     return ret;
5772 }
5773 
5774 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5775                                 int fd, int cmd, abi_long arg)
5776 {
5777     const argtype *arg_type = ie->arg_type;
5778     const StructEntry *se;
5779     const argtype *field_types;
5780     const int *dst_offsets, *src_offsets;
5781     int target_size;
5782     void *argptr;
5783     abi_ulong *target_rt_dev_ptr;
5784     unsigned long *host_rt_dev_ptr;
5785     abi_long ret;
5786     int i;
5787 
5788     assert(ie->access == IOC_W);
5789     assert(*arg_type == TYPE_PTR);
5790     arg_type++;
5791     assert(*arg_type == TYPE_STRUCT);
5792     target_size = thunk_type_size(arg_type, 0);
5793     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5794     if (!argptr) {
5795         return -TARGET_EFAULT;
5796     }
5797     arg_type++;
5798     assert(*arg_type == (int)STRUCT_rtentry);
5799     se = struct_entries + *arg_type++;
5800     assert(se->convert[0] == NULL);
5801     /* convert struct here to be able to catch rt_dev string */
5802     field_types = se->field_types;
5803     dst_offsets = se->field_offsets[THUNK_HOST];
5804     src_offsets = se->field_offsets[THUNK_TARGET];
5805     for (i = 0; i < se->nb_fields; i++) {
5806         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5807             assert(*field_types == TYPE_PTRVOID);
5808             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5809             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5810             if (*target_rt_dev_ptr != 0) {
5811                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5812                                                   tswapal(*target_rt_dev_ptr));
5813                 if (!*host_rt_dev_ptr) {
5814                     unlock_user(argptr, arg, 0);
5815                     return -TARGET_EFAULT;
5816                 }
5817             } else {
5818                 *host_rt_dev_ptr = 0;
5819             }
5820             field_types++;
5821             continue;
5822         }
5823         field_types = thunk_convert(buf_temp + dst_offsets[i],
5824                                     argptr + src_offsets[i],
5825                                     field_types, THUNK_HOST);
5826     }
5827     unlock_user(argptr, arg, 0);
5828 
5829     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5830     if (*host_rt_dev_ptr != 0) {
5831         unlock_user((void *)*host_rt_dev_ptr,
5832                     *target_rt_dev_ptr, 0);
5833     }
5834     return ret;
5835 }
5836 
5837 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5838                                      int fd, int cmd, abi_long arg)
5839 {
5840     int sig = target_to_host_signal(arg);
5841     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5842 }
5843 
5844 #ifdef TIOCGPTPEER
5845 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5846                                      int fd, int cmd, abi_long arg)
5847 {
5848     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5849     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5850 }
5851 #endif
5852 
5853 static IOCTLEntry ioctl_entries[] = {
5854 #define IOCTL(cmd, access, ...) \
5855     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5856 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5857     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5858 #define IOCTL_IGNORE(cmd) \
5859     { TARGET_ ## cmd, 0, #cmd },
5860 #include "ioctls.h"
5861     { 0, 0, },
5862 };
5863 
5864 /* ??? Implement proper locking for ioctls.  */
5865 /* do_ioctl() Must return target values and target errnos. */
5866 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5867 {
5868     const IOCTLEntry *ie;
5869     const argtype *arg_type;
5870     abi_long ret;
5871     uint8_t buf_temp[MAX_STRUCT_SIZE];
5872     int target_size;
5873     void *argptr;
5874 
5875     ie = ioctl_entries;
5876     for(;;) {
5877         if (ie->target_cmd == 0) {
5878             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5879             return -TARGET_ENOSYS;
5880         }
5881         if (ie->target_cmd == cmd)
5882             break;
5883         ie++;
5884     }
5885     arg_type = ie->arg_type;
5886     if (ie->do_ioctl) {
5887         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5888     } else if (!ie->host_cmd) {
5889         /* Some architectures define BSD ioctls in their headers
5890            that are not implemented in Linux.  */
5891         return -TARGET_ENOSYS;
5892     }
5893 
5894     switch(arg_type[0]) {
5895     case TYPE_NULL:
5896         /* no argument */
5897         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5898         break;
5899     case TYPE_PTRVOID:
5900     case TYPE_INT:
5901         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5902         break;
5903     case TYPE_PTR:
5904         arg_type++;
5905         target_size = thunk_type_size(arg_type, 0);
5906         switch(ie->access) {
5907         case IOC_R:
5908             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5909             if (!is_error(ret)) {
5910                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5911                 if (!argptr)
5912                     return -TARGET_EFAULT;
5913                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5914                 unlock_user(argptr, arg, target_size);
5915             }
5916             break;
5917         case IOC_W:
5918             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5919             if (!argptr)
5920                 return -TARGET_EFAULT;
5921             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5922             unlock_user(argptr, arg, 0);
5923             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5924             break;
5925         default:
5926         case IOC_RW:
5927             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5928             if (!argptr)
5929                 return -TARGET_EFAULT;
5930             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5931             unlock_user(argptr, arg, 0);
5932             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5933             if (!is_error(ret)) {
5934                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5935                 if (!argptr)
5936                     return -TARGET_EFAULT;
5937                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5938                 unlock_user(argptr, arg, target_size);
5939             }
5940             break;
5941         }
5942         break;
5943     default:
5944         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5945                  (long)cmd, arg_type[0]);
5946         ret = -TARGET_ENOSYS;
5947         break;
5948     }
5949     return ret;
5950 }
5951 
5952 static const bitmask_transtbl iflag_tbl[] = {
5953         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5954         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5955         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5956         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5957         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5958         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5959         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5960         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5961         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5962         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5963         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5964         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5965         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5966         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5967         { 0, 0, 0, 0 }
5968 };
5969 
5970 static const bitmask_transtbl oflag_tbl[] = {
5971 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5972 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5973 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5974 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5975 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5976 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5977 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5978 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5979 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5980 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5981 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5982 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5983 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5984 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5985 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5986 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5987 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5988 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5989 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5990 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5991 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5992 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5993 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5994 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5995 	{ 0, 0, 0, 0 }
5996 };
5997 
5998 static const bitmask_transtbl cflag_tbl[] = {
5999 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
6000 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
6001 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
6002 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
6003 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
6004 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
6005 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
6006 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
6007 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
6008 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
6009 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
6010 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
6011 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
6012 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
6013 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
6014 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
6015 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
6016 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
6017 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
6018 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
6019 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
6020 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
6021 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
6022 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
6023 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
6024 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
6025 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
6026 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
6027 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
6028 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
6029 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
6030 	{ 0, 0, 0, 0 }
6031 };
6032 
6033 static const bitmask_transtbl lflag_tbl[] = {
6034 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
6035 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
6036 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
6037 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
6038 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
6039 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
6040 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
6041 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
6042 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
6043 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
6044 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
6045 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
6046 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
6047 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
6048 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
6049 	{ 0, 0, 0, 0 }
6050 };
6051 
6052 static void target_to_host_termios (void *dst, const void *src)
6053 {
6054     struct host_termios *host = dst;
6055     const struct target_termios *target = src;
6056 
6057     host->c_iflag =
6058         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
6059     host->c_oflag =
6060         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
6061     host->c_cflag =
6062         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
6063     host->c_lflag =
6064         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
6065     host->c_line = target->c_line;
6066 
6067     memset(host->c_cc, 0, sizeof(host->c_cc));
6068     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
6069     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
6070     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
6071     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
6072     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
6073     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
6074     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
6075     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
6076     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
6077     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
6078     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
6079     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
6080     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
6081     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
6082     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
6083     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
6084     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
6085 }
6086 
6087 static void host_to_target_termios (void *dst, const void *src)
6088 {
6089     struct target_termios *target = dst;
6090     const struct host_termios *host = src;
6091 
6092     target->c_iflag =
6093         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
6094     target->c_oflag =
6095         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
6096     target->c_cflag =
6097         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
6098     target->c_lflag =
6099         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
6100     target->c_line = host->c_line;
6101 
6102     memset(target->c_cc, 0, sizeof(target->c_cc));
6103     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6104     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6105     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6106     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6107     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6108     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6109     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6110     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6111     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6112     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6113     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6114     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6115     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6116     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6117     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6118     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6119     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6120 }
6121 
6122 static const StructEntry struct_termios_def = {
6123     .convert = { host_to_target_termios, target_to_host_termios },
6124     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6125     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6126 };
6127 
6128 static bitmask_transtbl mmap_flags_tbl[] = {
6129     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6130     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6131     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6132     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6133       MAP_ANONYMOUS, MAP_ANONYMOUS },
6134     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6135       MAP_GROWSDOWN, MAP_GROWSDOWN },
6136     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6137       MAP_DENYWRITE, MAP_DENYWRITE },
6138     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6139       MAP_EXECUTABLE, MAP_EXECUTABLE },
6140     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6141     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6142       MAP_NORESERVE, MAP_NORESERVE },
6143     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6144     /* MAP_STACK had been ignored by the kernel for quite some time.
6145        Recognize it for the target insofar as we do not want to pass
6146        it through to the host.  */
6147     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6148     { 0, 0, 0, 0 }
6149 };
6150 
6151 #if defined(TARGET_I386)
6152 
6153 /* NOTE: there is really one LDT for all the threads */
6154 static uint8_t *ldt_table;
6155 
6156 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6157 {
6158     int size;
6159     void *p;
6160 
6161     if (!ldt_table)
6162         return 0;
6163     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6164     if (size > bytecount)
6165         size = bytecount;
6166     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6167     if (!p)
6168         return -TARGET_EFAULT;
6169     /* ??? Should this by byteswapped?  */
6170     memcpy(p, ldt_table, size);
6171     unlock_user(p, ptr, size);
6172     return size;
6173 }
6174 
6175 /* XXX: add locking support */
6176 static abi_long write_ldt(CPUX86State *env,
6177                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6178 {
6179     struct target_modify_ldt_ldt_s ldt_info;
6180     struct target_modify_ldt_ldt_s *target_ldt_info;
6181     int seg_32bit, contents, read_exec_only, limit_in_pages;
6182     int seg_not_present, useable, lm;
6183     uint32_t *lp, entry_1, entry_2;
6184 
6185     if (bytecount != sizeof(ldt_info))
6186         return -TARGET_EINVAL;
6187     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6188         return -TARGET_EFAULT;
6189     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6190     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6191     ldt_info.limit = tswap32(target_ldt_info->limit);
6192     ldt_info.flags = tswap32(target_ldt_info->flags);
6193     unlock_user_struct(target_ldt_info, ptr, 0);
6194 
6195     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6196         return -TARGET_EINVAL;
6197     seg_32bit = ldt_info.flags & 1;
6198     contents = (ldt_info.flags >> 1) & 3;
6199     read_exec_only = (ldt_info.flags >> 3) & 1;
6200     limit_in_pages = (ldt_info.flags >> 4) & 1;
6201     seg_not_present = (ldt_info.flags >> 5) & 1;
6202     useable = (ldt_info.flags >> 6) & 1;
6203 #ifdef TARGET_ABI32
6204     lm = 0;
6205 #else
6206     lm = (ldt_info.flags >> 7) & 1;
6207 #endif
6208     if (contents == 3) {
6209         if (oldmode)
6210             return -TARGET_EINVAL;
6211         if (seg_not_present == 0)
6212             return -TARGET_EINVAL;
6213     }
6214     /* allocate the LDT */
6215     if (!ldt_table) {
6216         env->ldt.base = target_mmap(0,
6217                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6218                                     PROT_READ|PROT_WRITE,
6219                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6220         if (env->ldt.base == -1)
6221             return -TARGET_ENOMEM;
6222         memset(g2h(env->ldt.base), 0,
6223                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6224         env->ldt.limit = 0xffff;
6225         ldt_table = g2h(env->ldt.base);
6226     }
6227 
6228     /* NOTE: same code as Linux kernel */
6229     /* Allow LDTs to be cleared by the user. */
6230     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6231         if (oldmode ||
6232             (contents == 0		&&
6233              read_exec_only == 1	&&
6234              seg_32bit == 0		&&
6235              limit_in_pages == 0	&&
6236              seg_not_present == 1	&&
6237              useable == 0 )) {
6238             entry_1 = 0;
6239             entry_2 = 0;
6240             goto install;
6241         }
6242     }
6243 
6244     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6245         (ldt_info.limit & 0x0ffff);
6246     entry_2 = (ldt_info.base_addr & 0xff000000) |
6247         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6248         (ldt_info.limit & 0xf0000) |
6249         ((read_exec_only ^ 1) << 9) |
6250         (contents << 10) |
6251         ((seg_not_present ^ 1) << 15) |
6252         (seg_32bit << 22) |
6253         (limit_in_pages << 23) |
6254         (lm << 21) |
6255         0x7000;
6256     if (!oldmode)
6257         entry_2 |= (useable << 20);
6258 
6259     /* Install the new entry ...  */
6260 install:
6261     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6262     lp[0] = tswap32(entry_1);
6263     lp[1] = tswap32(entry_2);
6264     return 0;
6265 }
6266 
6267 /* specific and weird i386 syscalls */
6268 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6269                               unsigned long bytecount)
6270 {
6271     abi_long ret;
6272 
6273     switch (func) {
6274     case 0:
6275         ret = read_ldt(ptr, bytecount);
6276         break;
6277     case 1:
6278         ret = write_ldt(env, ptr, bytecount, 1);
6279         break;
6280     case 0x11:
6281         ret = write_ldt(env, ptr, bytecount, 0);
6282         break;
6283     default:
6284         ret = -TARGET_ENOSYS;
6285         break;
6286     }
6287     return ret;
6288 }
6289 
6290 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6291 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6292 {
6293     uint64_t *gdt_table = g2h(env->gdt.base);
6294     struct target_modify_ldt_ldt_s ldt_info;
6295     struct target_modify_ldt_ldt_s *target_ldt_info;
6296     int seg_32bit, contents, read_exec_only, limit_in_pages;
6297     int seg_not_present, useable, lm;
6298     uint32_t *lp, entry_1, entry_2;
6299     int i;
6300 
6301     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6302     if (!target_ldt_info)
6303         return -TARGET_EFAULT;
6304     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6305     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6306     ldt_info.limit = tswap32(target_ldt_info->limit);
6307     ldt_info.flags = tswap32(target_ldt_info->flags);
6308     if (ldt_info.entry_number == -1) {
6309         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6310             if (gdt_table[i] == 0) {
6311                 ldt_info.entry_number = i;
6312                 target_ldt_info->entry_number = tswap32(i);
6313                 break;
6314             }
6315         }
6316     }
6317     unlock_user_struct(target_ldt_info, ptr, 1);
6318 
6319     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6320         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6321            return -TARGET_EINVAL;
6322     seg_32bit = ldt_info.flags & 1;
6323     contents = (ldt_info.flags >> 1) & 3;
6324     read_exec_only = (ldt_info.flags >> 3) & 1;
6325     limit_in_pages = (ldt_info.flags >> 4) & 1;
6326     seg_not_present = (ldt_info.flags >> 5) & 1;
6327     useable = (ldt_info.flags >> 6) & 1;
6328 #ifdef TARGET_ABI32
6329     lm = 0;
6330 #else
6331     lm = (ldt_info.flags >> 7) & 1;
6332 #endif
6333 
6334     if (contents == 3) {
6335         if (seg_not_present == 0)
6336             return -TARGET_EINVAL;
6337     }
6338 
6339     /* NOTE: same code as Linux kernel */
6340     /* Allow LDTs to be cleared by the user. */
6341     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6342         if ((contents == 0             &&
6343              read_exec_only == 1       &&
6344              seg_32bit == 0            &&
6345              limit_in_pages == 0       &&
6346              seg_not_present == 1      &&
6347              useable == 0 )) {
6348             entry_1 = 0;
6349             entry_2 = 0;
6350             goto install;
6351         }
6352     }
6353 
6354     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6355         (ldt_info.limit & 0x0ffff);
6356     entry_2 = (ldt_info.base_addr & 0xff000000) |
6357         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6358         (ldt_info.limit & 0xf0000) |
6359         ((read_exec_only ^ 1) << 9) |
6360         (contents << 10) |
6361         ((seg_not_present ^ 1) << 15) |
6362         (seg_32bit << 22) |
6363         (limit_in_pages << 23) |
6364         (useable << 20) |
6365         (lm << 21) |
6366         0x7000;
6367 
6368     /* Install the new entry ...  */
6369 install:
6370     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6371     lp[0] = tswap32(entry_1);
6372     lp[1] = tswap32(entry_2);
6373     return 0;
6374 }
6375 
6376 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6377 {
6378     struct target_modify_ldt_ldt_s *target_ldt_info;
6379     uint64_t *gdt_table = g2h(env->gdt.base);
6380     uint32_t base_addr, limit, flags;
6381     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6382     int seg_not_present, useable, lm;
6383     uint32_t *lp, entry_1, entry_2;
6384 
6385     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6386     if (!target_ldt_info)
6387         return -TARGET_EFAULT;
6388     idx = tswap32(target_ldt_info->entry_number);
6389     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6390         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6391         unlock_user_struct(target_ldt_info, ptr, 1);
6392         return -TARGET_EINVAL;
6393     }
6394     lp = (uint32_t *)(gdt_table + idx);
6395     entry_1 = tswap32(lp[0]);
6396     entry_2 = tswap32(lp[1]);
6397 
6398     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6399     contents = (entry_2 >> 10) & 3;
6400     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6401     seg_32bit = (entry_2 >> 22) & 1;
6402     limit_in_pages = (entry_2 >> 23) & 1;
6403     useable = (entry_2 >> 20) & 1;
6404 #ifdef TARGET_ABI32
6405     lm = 0;
6406 #else
6407     lm = (entry_2 >> 21) & 1;
6408 #endif
6409     flags = (seg_32bit << 0) | (contents << 1) |
6410         (read_exec_only << 3) | (limit_in_pages << 4) |
6411         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6412     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6413     base_addr = (entry_1 >> 16) |
6414         (entry_2 & 0xff000000) |
6415         ((entry_2 & 0xff) << 16);
6416     target_ldt_info->base_addr = tswapal(base_addr);
6417     target_ldt_info->limit = tswap32(limit);
6418     target_ldt_info->flags = tswap32(flags);
6419     unlock_user_struct(target_ldt_info, ptr, 1);
6420     return 0;
6421 }
6422 #endif /* TARGET_I386 && TARGET_ABI32 */
6423 
6424 #ifndef TARGET_ABI32
6425 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6426 {
6427     abi_long ret = 0;
6428     abi_ulong val;
6429     int idx;
6430 
6431     switch(code) {
6432     case TARGET_ARCH_SET_GS:
6433     case TARGET_ARCH_SET_FS:
6434         if (code == TARGET_ARCH_SET_GS)
6435             idx = R_GS;
6436         else
6437             idx = R_FS;
6438         cpu_x86_load_seg(env, idx, 0);
6439         env->segs[idx].base = addr;
6440         break;
6441     case TARGET_ARCH_GET_GS:
6442     case TARGET_ARCH_GET_FS:
6443         if (code == TARGET_ARCH_GET_GS)
6444             idx = R_GS;
6445         else
6446             idx = R_FS;
6447         val = env->segs[idx].base;
6448         if (put_user(val, addr, abi_ulong))
6449             ret = -TARGET_EFAULT;
6450         break;
6451     default:
6452         ret = -TARGET_EINVAL;
6453         break;
6454     }
6455     return ret;
6456 }
6457 #endif
6458 
6459 #endif /* defined(TARGET_I386) */
6460 
6461 #define NEW_STACK_SIZE 0x40000
6462 
6463 
6464 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6465 typedef struct {
6466     CPUArchState *env;
6467     pthread_mutex_t mutex;
6468     pthread_cond_t cond;
6469     pthread_t thread;
6470     uint32_t tid;
6471     abi_ulong child_tidptr;
6472     abi_ulong parent_tidptr;
6473     sigset_t sigmask;
6474 } new_thread_info;
6475 
6476 static void *clone_func(void *arg)
6477 {
6478     new_thread_info *info = arg;
6479     CPUArchState *env;
6480     CPUState *cpu;
6481     TaskState *ts;
6482 
6483     rcu_register_thread();
6484     tcg_register_thread();
6485     env = info->env;
6486     cpu = ENV_GET_CPU(env);
6487     thread_cpu = cpu;
6488     ts = (TaskState *)cpu->opaque;
6489     info->tid = gettid();
6490     task_settid(ts);
6491     if (info->child_tidptr)
6492         put_user_u32(info->tid, info->child_tidptr);
6493     if (info->parent_tidptr)
6494         put_user_u32(info->tid, info->parent_tidptr);
6495     /* Enable signals.  */
6496     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6497     /* Signal to the parent that we're ready.  */
6498     pthread_mutex_lock(&info->mutex);
6499     pthread_cond_broadcast(&info->cond);
6500     pthread_mutex_unlock(&info->mutex);
6501     /* Wait until the parent has finished initializing the tls state.  */
6502     pthread_mutex_lock(&clone_lock);
6503     pthread_mutex_unlock(&clone_lock);
6504     cpu_loop(env);
6505     /* never exits */
6506     return NULL;
6507 }
6508 
6509 /* do_fork() Must return host values and target errnos (unlike most
6510    do_*() functions). */
6511 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6512                    abi_ulong parent_tidptr, target_ulong newtls,
6513                    abi_ulong child_tidptr)
6514 {
6515     CPUState *cpu = ENV_GET_CPU(env);
6516     int ret;
6517     TaskState *ts;
6518     CPUState *new_cpu;
6519     CPUArchState *new_env;
6520     sigset_t sigmask;
6521 
6522     flags &= ~CLONE_IGNORED_FLAGS;
6523 
6524     /* Emulate vfork() with fork() */
6525     if (flags & CLONE_VFORK)
6526         flags &= ~(CLONE_VFORK | CLONE_VM);
6527 
6528     if (flags & CLONE_VM) {
6529         TaskState *parent_ts = (TaskState *)cpu->opaque;
6530         new_thread_info info;
6531         pthread_attr_t attr;
6532 
6533         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6534             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6535             return -TARGET_EINVAL;
6536         }
6537 
6538         ts = g_new0(TaskState, 1);
6539         init_task_state(ts);
6540 
6541         /* Grab a mutex so that thread setup appears atomic.  */
6542         pthread_mutex_lock(&clone_lock);
6543 
6544         /* we create a new CPU instance. */
6545         new_env = cpu_copy(env);
6546         /* Init regs that differ from the parent.  */
6547         cpu_clone_regs(new_env, newsp);
6548         new_cpu = ENV_GET_CPU(new_env);
6549         new_cpu->opaque = ts;
6550         ts->bprm = parent_ts->bprm;
6551         ts->info = parent_ts->info;
6552         ts->signal_mask = parent_ts->signal_mask;
6553 
6554         if (flags & CLONE_CHILD_CLEARTID) {
6555             ts->child_tidptr = child_tidptr;
6556         }
6557 
6558         if (flags & CLONE_SETTLS) {
6559             cpu_set_tls (new_env, newtls);
6560         }
6561 
6562         memset(&info, 0, sizeof(info));
6563         pthread_mutex_init(&info.mutex, NULL);
6564         pthread_mutex_lock(&info.mutex);
6565         pthread_cond_init(&info.cond, NULL);
6566         info.env = new_env;
6567         if (flags & CLONE_CHILD_SETTID) {
6568             info.child_tidptr = child_tidptr;
6569         }
6570         if (flags & CLONE_PARENT_SETTID) {
6571             info.parent_tidptr = parent_tidptr;
6572         }
6573 
6574         ret = pthread_attr_init(&attr);
6575         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6576         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6577         /* It is not safe to deliver signals until the child has finished
6578            initializing, so temporarily block all signals.  */
6579         sigfillset(&sigmask);
6580         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6581 
6582         /* If this is our first additional thread, we need to ensure we
6583          * generate code for parallel execution and flush old translations.
6584          */
6585         if (!parallel_cpus) {
6586             parallel_cpus = true;
6587             tb_flush(cpu);
6588         }
6589 
6590         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6591         /* TODO: Free new CPU state if thread creation failed.  */
6592 
6593         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6594         pthread_attr_destroy(&attr);
6595         if (ret == 0) {
6596             /* Wait for the child to initialize.  */
6597             pthread_cond_wait(&info.cond, &info.mutex);
6598             ret = info.tid;
6599         } else {
6600             ret = -1;
6601         }
6602         pthread_mutex_unlock(&info.mutex);
6603         pthread_cond_destroy(&info.cond);
6604         pthread_mutex_destroy(&info.mutex);
6605         pthread_mutex_unlock(&clone_lock);
6606     } else {
6607         /* if no CLONE_VM, we consider it is a fork */
6608         if (flags & CLONE_INVALID_FORK_FLAGS) {
6609             return -TARGET_EINVAL;
6610         }
6611 
6612         /* We can't support custom termination signals */
6613         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6614             return -TARGET_EINVAL;
6615         }
6616 
6617         if (block_signals()) {
6618             return -TARGET_ERESTARTSYS;
6619         }
6620 
6621         fork_start();
6622         ret = fork();
6623         if (ret == 0) {
6624             /* Child Process.  */
6625             cpu_clone_regs(env, newsp);
6626             fork_end(1);
6627             /* There is a race condition here.  The parent process could
6628                theoretically read the TID in the child process before the child
6629                tid is set.  This would require using either ptrace
6630                (not implemented) or having *_tidptr to point at a shared memory
6631                mapping.  We can't repeat the spinlock hack used above because
6632                the child process gets its own copy of the lock.  */
6633             if (flags & CLONE_CHILD_SETTID)
6634                 put_user_u32(gettid(), child_tidptr);
6635             if (flags & CLONE_PARENT_SETTID)
6636                 put_user_u32(gettid(), parent_tidptr);
6637             ts = (TaskState *)cpu->opaque;
6638             if (flags & CLONE_SETTLS)
6639                 cpu_set_tls (env, newtls);
6640             if (flags & CLONE_CHILD_CLEARTID)
6641                 ts->child_tidptr = child_tidptr;
6642         } else {
6643             fork_end(0);
6644         }
6645     }
6646     return ret;
6647 }
6648 
6649 /* warning : doesn't handle linux specific flags... */
6650 static int target_to_host_fcntl_cmd(int cmd)
6651 {
6652     int ret;
6653 
6654     switch(cmd) {
6655     case TARGET_F_DUPFD:
6656     case TARGET_F_GETFD:
6657     case TARGET_F_SETFD:
6658     case TARGET_F_GETFL:
6659     case TARGET_F_SETFL:
6660         ret = cmd;
6661         break;
6662     case TARGET_F_GETLK:
6663         ret = F_GETLK64;
6664         break;
6665     case TARGET_F_SETLK:
6666         ret = F_SETLK64;
6667         break;
6668     case TARGET_F_SETLKW:
6669         ret = F_SETLKW64;
6670         break;
6671     case TARGET_F_GETOWN:
6672         ret = F_GETOWN;
6673         break;
6674     case TARGET_F_SETOWN:
6675         ret = F_SETOWN;
6676         break;
6677     case TARGET_F_GETSIG:
6678         ret = F_GETSIG;
6679         break;
6680     case TARGET_F_SETSIG:
6681         ret = F_SETSIG;
6682         break;
6683 #if TARGET_ABI_BITS == 32
6684     case TARGET_F_GETLK64:
6685         ret = F_GETLK64;
6686         break;
6687     case TARGET_F_SETLK64:
6688         ret = F_SETLK64;
6689         break;
6690     case TARGET_F_SETLKW64:
6691         ret = F_SETLKW64;
6692         break;
6693 #endif
6694     case TARGET_F_SETLEASE:
6695         ret = F_SETLEASE;
6696         break;
6697     case TARGET_F_GETLEASE:
6698         ret = F_GETLEASE;
6699         break;
6700 #ifdef F_DUPFD_CLOEXEC
6701     case TARGET_F_DUPFD_CLOEXEC:
6702         ret = F_DUPFD_CLOEXEC;
6703         break;
6704 #endif
6705     case TARGET_F_NOTIFY:
6706         ret = F_NOTIFY;
6707         break;
6708 #ifdef F_GETOWN_EX
6709     case TARGET_F_GETOWN_EX:
6710         ret = F_GETOWN_EX;
6711         break;
6712 #endif
6713 #ifdef F_SETOWN_EX
6714     case TARGET_F_SETOWN_EX:
6715         ret = F_SETOWN_EX;
6716         break;
6717 #endif
6718 #ifdef F_SETPIPE_SZ
6719     case TARGET_F_SETPIPE_SZ:
6720         ret = F_SETPIPE_SZ;
6721         break;
6722     case TARGET_F_GETPIPE_SZ:
6723         ret = F_GETPIPE_SZ;
6724         break;
6725 #endif
6726     default:
6727         ret = -TARGET_EINVAL;
6728         break;
6729     }
6730 
6731 #if defined(__powerpc64__)
6732     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6733      * is not supported by kernel. The glibc fcntl call actually adjusts
6734      * them to 5, 6 and 7 before making the syscall(). Since we make the
6735      * syscall directly, adjust to what is supported by the kernel.
6736      */
6737     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6738         ret -= F_GETLK64 - 5;
6739     }
6740 #endif
6741 
6742     return ret;
6743 }
6744 
6745 #define FLOCK_TRANSTBL \
6746     switch (type) { \
6747     TRANSTBL_CONVERT(F_RDLCK); \
6748     TRANSTBL_CONVERT(F_WRLCK); \
6749     TRANSTBL_CONVERT(F_UNLCK); \
6750     TRANSTBL_CONVERT(F_EXLCK); \
6751     TRANSTBL_CONVERT(F_SHLCK); \
6752     }
6753 
6754 static int target_to_host_flock(int type)
6755 {
6756 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6757     FLOCK_TRANSTBL
6758 #undef  TRANSTBL_CONVERT
6759     return -TARGET_EINVAL;
6760 }
6761 
6762 static int host_to_target_flock(int type)
6763 {
6764 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6765     FLOCK_TRANSTBL
6766 #undef  TRANSTBL_CONVERT
6767     /* if we don't know how to convert the value coming
6768      * from the host we copy to the target field as-is
6769      */
6770     return type;
6771 }
6772 
6773 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6774                                             abi_ulong target_flock_addr)
6775 {
6776     struct target_flock *target_fl;
6777     int l_type;
6778 
6779     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6780         return -TARGET_EFAULT;
6781     }
6782 
6783     __get_user(l_type, &target_fl->l_type);
6784     l_type = target_to_host_flock(l_type);
6785     if (l_type < 0) {
6786         return l_type;
6787     }
6788     fl->l_type = l_type;
6789     __get_user(fl->l_whence, &target_fl->l_whence);
6790     __get_user(fl->l_start, &target_fl->l_start);
6791     __get_user(fl->l_len, &target_fl->l_len);
6792     __get_user(fl->l_pid, &target_fl->l_pid);
6793     unlock_user_struct(target_fl, target_flock_addr, 0);
6794     return 0;
6795 }
6796 
6797 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6798                                           const struct flock64 *fl)
6799 {
6800     struct target_flock *target_fl;
6801     short l_type;
6802 
6803     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6804         return -TARGET_EFAULT;
6805     }
6806 
6807     l_type = host_to_target_flock(fl->l_type);
6808     __put_user(l_type, &target_fl->l_type);
6809     __put_user(fl->l_whence, &target_fl->l_whence);
6810     __put_user(fl->l_start, &target_fl->l_start);
6811     __put_user(fl->l_len, &target_fl->l_len);
6812     __put_user(fl->l_pid, &target_fl->l_pid);
6813     unlock_user_struct(target_fl, target_flock_addr, 1);
6814     return 0;
6815 }
6816 
6817 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6818 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6819 
6820 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6821 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6822                                                    abi_ulong target_flock_addr)
6823 {
6824     struct target_oabi_flock64 *target_fl;
6825     int l_type;
6826 
6827     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6828         return -TARGET_EFAULT;
6829     }
6830 
6831     __get_user(l_type, &target_fl->l_type);
6832     l_type = target_to_host_flock(l_type);
6833     if (l_type < 0) {
6834         return l_type;
6835     }
6836     fl->l_type = l_type;
6837     __get_user(fl->l_whence, &target_fl->l_whence);
6838     __get_user(fl->l_start, &target_fl->l_start);
6839     __get_user(fl->l_len, &target_fl->l_len);
6840     __get_user(fl->l_pid, &target_fl->l_pid);
6841     unlock_user_struct(target_fl, target_flock_addr, 0);
6842     return 0;
6843 }
6844 
6845 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6846                                                  const struct flock64 *fl)
6847 {
6848     struct target_oabi_flock64 *target_fl;
6849     short l_type;
6850 
6851     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6852         return -TARGET_EFAULT;
6853     }
6854 
6855     l_type = host_to_target_flock(fl->l_type);
6856     __put_user(l_type, &target_fl->l_type);
6857     __put_user(fl->l_whence, &target_fl->l_whence);
6858     __put_user(fl->l_start, &target_fl->l_start);
6859     __put_user(fl->l_len, &target_fl->l_len);
6860     __put_user(fl->l_pid, &target_fl->l_pid);
6861     unlock_user_struct(target_fl, target_flock_addr, 1);
6862     return 0;
6863 }
6864 #endif
6865 
6866 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6867                                               abi_ulong target_flock_addr)
6868 {
6869     struct target_flock64 *target_fl;
6870     int l_type;
6871 
6872     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6873         return -TARGET_EFAULT;
6874     }
6875 
6876     __get_user(l_type, &target_fl->l_type);
6877     l_type = target_to_host_flock(l_type);
6878     if (l_type < 0) {
6879         return l_type;
6880     }
6881     fl->l_type = l_type;
6882     __get_user(fl->l_whence, &target_fl->l_whence);
6883     __get_user(fl->l_start, &target_fl->l_start);
6884     __get_user(fl->l_len, &target_fl->l_len);
6885     __get_user(fl->l_pid, &target_fl->l_pid);
6886     unlock_user_struct(target_fl, target_flock_addr, 0);
6887     return 0;
6888 }
6889 
6890 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6891                                             const struct flock64 *fl)
6892 {
6893     struct target_flock64 *target_fl;
6894     short l_type;
6895 
6896     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6897         return -TARGET_EFAULT;
6898     }
6899 
6900     l_type = host_to_target_flock(fl->l_type);
6901     __put_user(l_type, &target_fl->l_type);
6902     __put_user(fl->l_whence, &target_fl->l_whence);
6903     __put_user(fl->l_start, &target_fl->l_start);
6904     __put_user(fl->l_len, &target_fl->l_len);
6905     __put_user(fl->l_pid, &target_fl->l_pid);
6906     unlock_user_struct(target_fl, target_flock_addr, 1);
6907     return 0;
6908 }
6909 
6910 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6911 {
6912     struct flock64 fl64;
6913 #ifdef F_GETOWN_EX
6914     struct f_owner_ex fox;
6915     struct target_f_owner_ex *target_fox;
6916 #endif
6917     abi_long ret;
6918     int host_cmd = target_to_host_fcntl_cmd(cmd);
6919 
6920     if (host_cmd == -TARGET_EINVAL)
6921 	    return host_cmd;
6922 
6923     switch(cmd) {
6924     case TARGET_F_GETLK:
6925         ret = copy_from_user_flock(&fl64, arg);
6926         if (ret) {
6927             return ret;
6928         }
6929         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6930         if (ret == 0) {
6931             ret = copy_to_user_flock(arg, &fl64);
6932         }
6933         break;
6934 
6935     case TARGET_F_SETLK:
6936     case TARGET_F_SETLKW:
6937         ret = copy_from_user_flock(&fl64, arg);
6938         if (ret) {
6939             return ret;
6940         }
6941         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6942         break;
6943 
6944     case TARGET_F_GETLK64:
6945         ret = copy_from_user_flock64(&fl64, arg);
6946         if (ret) {
6947             return ret;
6948         }
6949         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6950         if (ret == 0) {
6951             ret = copy_to_user_flock64(arg, &fl64);
6952         }
6953         break;
6954     case TARGET_F_SETLK64:
6955     case TARGET_F_SETLKW64:
6956         ret = copy_from_user_flock64(&fl64, arg);
6957         if (ret) {
6958             return ret;
6959         }
6960         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6961         break;
6962 
6963     case TARGET_F_GETFL:
6964         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6965         if (ret >= 0) {
6966             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6967         }
6968         break;
6969 
6970     case TARGET_F_SETFL:
6971         ret = get_errno(safe_fcntl(fd, host_cmd,
6972                                    target_to_host_bitmask(arg,
6973                                                           fcntl_flags_tbl)));
6974         break;
6975 
6976 #ifdef F_GETOWN_EX
6977     case TARGET_F_GETOWN_EX:
6978         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6979         if (ret >= 0) {
6980             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6981                 return -TARGET_EFAULT;
6982             target_fox->type = tswap32(fox.type);
6983             target_fox->pid = tswap32(fox.pid);
6984             unlock_user_struct(target_fox, arg, 1);
6985         }
6986         break;
6987 #endif
6988 
6989 #ifdef F_SETOWN_EX
6990     case TARGET_F_SETOWN_EX:
6991         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6992             return -TARGET_EFAULT;
6993         fox.type = tswap32(target_fox->type);
6994         fox.pid = tswap32(target_fox->pid);
6995         unlock_user_struct(target_fox, arg, 0);
6996         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6997         break;
6998 #endif
6999 
7000     case TARGET_F_SETOWN:
7001     case TARGET_F_GETOWN:
7002     case TARGET_F_SETSIG:
7003     case TARGET_F_GETSIG:
7004     case TARGET_F_SETLEASE:
7005     case TARGET_F_GETLEASE:
7006     case TARGET_F_SETPIPE_SZ:
7007     case TARGET_F_GETPIPE_SZ:
7008         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7009         break;
7010 
7011     default:
7012         ret = get_errno(safe_fcntl(fd, cmd, arg));
7013         break;
7014     }
7015     return ret;
7016 }
7017 
7018 #ifdef USE_UID16
7019 
7020 static inline int high2lowuid(int uid)
7021 {
7022     if (uid > 65535)
7023         return 65534;
7024     else
7025         return uid;
7026 }
7027 
7028 static inline int high2lowgid(int gid)
7029 {
7030     if (gid > 65535)
7031         return 65534;
7032     else
7033         return gid;
7034 }
7035 
7036 static inline int low2highuid(int uid)
7037 {
7038     if ((int16_t)uid == -1)
7039         return -1;
7040     else
7041         return uid;
7042 }
7043 
7044 static inline int low2highgid(int gid)
7045 {
7046     if ((int16_t)gid == -1)
7047         return -1;
7048     else
7049         return gid;
7050 }
7051 static inline int tswapid(int id)
7052 {
7053     return tswap16(id);
7054 }
7055 
7056 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7057 
7058 #else /* !USE_UID16 */
7059 static inline int high2lowuid(int uid)
7060 {
7061     return uid;
7062 }
7063 static inline int high2lowgid(int gid)
7064 {
7065     return gid;
7066 }
7067 static inline int low2highuid(int uid)
7068 {
7069     return uid;
7070 }
7071 static inline int low2highgid(int gid)
7072 {
7073     return gid;
7074 }
7075 static inline int tswapid(int id)
7076 {
7077     return tswap32(id);
7078 }
7079 
7080 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7081 
7082 #endif /* USE_UID16 */
7083 
7084 /* We must do direct syscalls for setting UID/GID, because we want to
7085  * implement the Linux system call semantics of "change only for this thread",
7086  * not the libc/POSIX semantics of "change for all threads in process".
7087  * (See http://ewontfix.com/17/ for more details.)
7088  * We use the 32-bit version of the syscalls if present; if it is not
7089  * then either the host architecture supports 32-bit UIDs natively with
7090  * the standard syscall, or the 16-bit UID is the best we can do.
7091  */
7092 #ifdef __NR_setuid32
7093 #define __NR_sys_setuid __NR_setuid32
7094 #else
7095 #define __NR_sys_setuid __NR_setuid
7096 #endif
7097 #ifdef __NR_setgid32
7098 #define __NR_sys_setgid __NR_setgid32
7099 #else
7100 #define __NR_sys_setgid __NR_setgid
7101 #endif
7102 #ifdef __NR_setresuid32
7103 #define __NR_sys_setresuid __NR_setresuid32
7104 #else
7105 #define __NR_sys_setresuid __NR_setresuid
7106 #endif
7107 #ifdef __NR_setresgid32
7108 #define __NR_sys_setresgid __NR_setresgid32
7109 #else
7110 #define __NR_sys_setresgid __NR_setresgid
7111 #endif
7112 
7113 _syscall1(int, sys_setuid, uid_t, uid)
7114 _syscall1(int, sys_setgid, gid_t, gid)
7115 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7116 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7117 
7118 void syscall_init(void)
7119 {
7120     IOCTLEntry *ie;
7121     const argtype *arg_type;
7122     int size;
7123     int i;
7124 
7125     thunk_init(STRUCT_MAX);
7126 
7127 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7128 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7129 #include "syscall_types.h"
7130 #undef STRUCT
7131 #undef STRUCT_SPECIAL
7132 
7133     /* Build target_to_host_errno_table[] table from
7134      * host_to_target_errno_table[]. */
7135     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7136         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7137     }
7138 
7139     /* we patch the ioctl size if necessary. We rely on the fact that
7140        no ioctl has all the bits at '1' in the size field */
7141     ie = ioctl_entries;
7142     while (ie->target_cmd != 0) {
7143         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7144             TARGET_IOC_SIZEMASK) {
7145             arg_type = ie->arg_type;
7146             if (arg_type[0] != TYPE_PTR) {
7147                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7148                         ie->target_cmd);
7149                 exit(1);
7150             }
7151             arg_type++;
7152             size = thunk_type_size(arg_type, 0);
7153             ie->target_cmd = (ie->target_cmd &
7154                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7155                 (size << TARGET_IOC_SIZESHIFT);
7156         }
7157 
7158         /* automatic consistency check if same arch */
7159 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7160     (defined(__x86_64__) && defined(TARGET_X86_64))
7161         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7162             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7163                     ie->name, ie->target_cmd, ie->host_cmd);
7164         }
7165 #endif
7166         ie++;
7167     }
7168 }
7169 
7170 #if TARGET_ABI_BITS == 32
7171 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
7172 {
7173 #ifdef TARGET_WORDS_BIGENDIAN
7174     return ((uint64_t)word0 << 32) | word1;
7175 #else
7176     return ((uint64_t)word1 << 32) | word0;
7177 #endif
7178 }
7179 #else /* TARGET_ABI_BITS == 32 */
7180 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
7181 {
7182     return word0;
7183 }
7184 #endif /* TARGET_ABI_BITS != 32 */
7185 
7186 #ifdef TARGET_NR_truncate64
7187 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7188                                          abi_long arg2,
7189                                          abi_long arg3,
7190                                          abi_long arg4)
7191 {
7192     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7193         arg2 = arg3;
7194         arg3 = arg4;
7195     }
7196     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7197 }
7198 #endif
7199 
7200 #ifdef TARGET_NR_ftruncate64
7201 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7202                                           abi_long arg2,
7203                                           abi_long arg3,
7204                                           abi_long arg4)
7205 {
7206     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7207         arg2 = arg3;
7208         arg3 = arg4;
7209     }
7210     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7211 }
7212 #endif
7213 
7214 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
7215                                                abi_ulong target_addr)
7216 {
7217     struct target_timespec *target_ts;
7218 
7219     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
7220         return -TARGET_EFAULT;
7221     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
7222     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7223     unlock_user_struct(target_ts, target_addr, 0);
7224     return 0;
7225 }
7226 
7227 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
7228                                                struct timespec *host_ts)
7229 {
7230     struct target_timespec *target_ts;
7231 
7232     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
7233         return -TARGET_EFAULT;
7234     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
7235     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7236     unlock_user_struct(target_ts, target_addr, 1);
7237     return 0;
7238 }
7239 
7240 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
7241                                                  abi_ulong target_addr)
7242 {
7243     struct target_itimerspec *target_itspec;
7244 
7245     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
7246         return -TARGET_EFAULT;
7247     }
7248 
7249     host_itspec->it_interval.tv_sec =
7250                             tswapal(target_itspec->it_interval.tv_sec);
7251     host_itspec->it_interval.tv_nsec =
7252                             tswapal(target_itspec->it_interval.tv_nsec);
7253     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
7254     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
7255 
7256     unlock_user_struct(target_itspec, target_addr, 1);
7257     return 0;
7258 }
7259 
7260 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7261                                                struct itimerspec *host_its)
7262 {
7263     struct target_itimerspec *target_itspec;
7264 
7265     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
7266         return -TARGET_EFAULT;
7267     }
7268 
7269     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
7270     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
7271 
7272     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
7273     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7274 
7275     unlock_user_struct(target_itspec, target_addr, 0);
7276     return 0;
7277 }
7278 
7279 static inline abi_long target_to_host_timex(struct timex *host_tx,
7280                                             abi_long target_addr)
7281 {
7282     struct target_timex *target_tx;
7283 
7284     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7285         return -TARGET_EFAULT;
7286     }
7287 
7288     __get_user(host_tx->modes, &target_tx->modes);
7289     __get_user(host_tx->offset, &target_tx->offset);
7290     __get_user(host_tx->freq, &target_tx->freq);
7291     __get_user(host_tx->maxerror, &target_tx->maxerror);
7292     __get_user(host_tx->esterror, &target_tx->esterror);
7293     __get_user(host_tx->status, &target_tx->status);
7294     __get_user(host_tx->constant, &target_tx->constant);
7295     __get_user(host_tx->precision, &target_tx->precision);
7296     __get_user(host_tx->tolerance, &target_tx->tolerance);
7297     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7298     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7299     __get_user(host_tx->tick, &target_tx->tick);
7300     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7301     __get_user(host_tx->jitter, &target_tx->jitter);
7302     __get_user(host_tx->shift, &target_tx->shift);
7303     __get_user(host_tx->stabil, &target_tx->stabil);
7304     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7305     __get_user(host_tx->calcnt, &target_tx->calcnt);
7306     __get_user(host_tx->errcnt, &target_tx->errcnt);
7307     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7308     __get_user(host_tx->tai, &target_tx->tai);
7309 
7310     unlock_user_struct(target_tx, target_addr, 0);
7311     return 0;
7312 }
7313 
7314 static inline abi_long host_to_target_timex(abi_long target_addr,
7315                                             struct timex *host_tx)
7316 {
7317     struct target_timex *target_tx;
7318 
7319     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7320         return -TARGET_EFAULT;
7321     }
7322 
7323     __put_user(host_tx->modes, &target_tx->modes);
7324     __put_user(host_tx->offset, &target_tx->offset);
7325     __put_user(host_tx->freq, &target_tx->freq);
7326     __put_user(host_tx->maxerror, &target_tx->maxerror);
7327     __put_user(host_tx->esterror, &target_tx->esterror);
7328     __put_user(host_tx->status, &target_tx->status);
7329     __put_user(host_tx->constant, &target_tx->constant);
7330     __put_user(host_tx->precision, &target_tx->precision);
7331     __put_user(host_tx->tolerance, &target_tx->tolerance);
7332     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7333     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7334     __put_user(host_tx->tick, &target_tx->tick);
7335     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7336     __put_user(host_tx->jitter, &target_tx->jitter);
7337     __put_user(host_tx->shift, &target_tx->shift);
7338     __put_user(host_tx->stabil, &target_tx->stabil);
7339     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7340     __put_user(host_tx->calcnt, &target_tx->calcnt);
7341     __put_user(host_tx->errcnt, &target_tx->errcnt);
7342     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7343     __put_user(host_tx->tai, &target_tx->tai);
7344 
7345     unlock_user_struct(target_tx, target_addr, 1);
7346     return 0;
7347 }
7348 
7349 
7350 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7351                                                abi_ulong target_addr)
7352 {
7353     struct target_sigevent *target_sevp;
7354 
7355     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7356         return -TARGET_EFAULT;
7357     }
7358 
7359     /* This union is awkward on 64 bit systems because it has a 32 bit
7360      * integer and a pointer in it; we follow the conversion approach
7361      * used for handling sigval types in signal.c so the guest should get
7362      * the correct value back even if we did a 64 bit byteswap and it's
7363      * using the 32 bit integer.
7364      */
7365     host_sevp->sigev_value.sival_ptr =
7366         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7367     host_sevp->sigev_signo =
7368         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7369     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7370     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7371 
7372     unlock_user_struct(target_sevp, target_addr, 1);
7373     return 0;
7374 }
7375 
7376 #if defined(TARGET_NR_mlockall)
7377 static inline int target_to_host_mlockall_arg(int arg)
7378 {
7379     int result = 0;
7380 
7381     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7382         result |= MCL_CURRENT;
7383     }
7384     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7385         result |= MCL_FUTURE;
7386     }
7387     return result;
7388 }
7389 #endif
7390 
7391 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7392      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7393      defined(TARGET_NR_newfstatat))
7394 static inline abi_long host_to_target_stat64(void *cpu_env,
7395                                              abi_ulong target_addr,
7396                                              struct stat *host_st)
7397 {
7398 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7399     if (((CPUARMState *)cpu_env)->eabi) {
7400         struct target_eabi_stat64 *target_st;
7401 
7402         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7403             return -TARGET_EFAULT;
7404         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7405         __put_user(host_st->st_dev, &target_st->st_dev);
7406         __put_user(host_st->st_ino, &target_st->st_ino);
7407 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7408         __put_user(host_st->st_ino, &target_st->__st_ino);
7409 #endif
7410         __put_user(host_st->st_mode, &target_st->st_mode);
7411         __put_user(host_st->st_nlink, &target_st->st_nlink);
7412         __put_user(host_st->st_uid, &target_st->st_uid);
7413         __put_user(host_st->st_gid, &target_st->st_gid);
7414         __put_user(host_st->st_rdev, &target_st->st_rdev);
7415         __put_user(host_st->st_size, &target_st->st_size);
7416         __put_user(host_st->st_blksize, &target_st->st_blksize);
7417         __put_user(host_st->st_blocks, &target_st->st_blocks);
7418         __put_user(host_st->st_atime, &target_st->target_st_atime);
7419         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7420         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7421         unlock_user_struct(target_st, target_addr, 1);
7422     } else
7423 #endif
7424     {
7425 #if defined(TARGET_HAS_STRUCT_STAT64)
7426         struct target_stat64 *target_st;
7427 #else
7428         struct target_stat *target_st;
7429 #endif
7430 
7431         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7432             return -TARGET_EFAULT;
7433         memset(target_st, 0, sizeof(*target_st));
7434         __put_user(host_st->st_dev, &target_st->st_dev);
7435         __put_user(host_st->st_ino, &target_st->st_ino);
7436 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7437         __put_user(host_st->st_ino, &target_st->__st_ino);
7438 #endif
7439         __put_user(host_st->st_mode, &target_st->st_mode);
7440         __put_user(host_st->st_nlink, &target_st->st_nlink);
7441         __put_user(host_st->st_uid, &target_st->st_uid);
7442         __put_user(host_st->st_gid, &target_st->st_gid);
7443         __put_user(host_st->st_rdev, &target_st->st_rdev);
7444         /* XXX: better use of kernel struct */
7445         __put_user(host_st->st_size, &target_st->st_size);
7446         __put_user(host_st->st_blksize, &target_st->st_blksize);
7447         __put_user(host_st->st_blocks, &target_st->st_blocks);
7448         __put_user(host_st->st_atime, &target_st->target_st_atime);
7449         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7450         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7451         unlock_user_struct(target_st, target_addr, 1);
7452     }
7453 
7454     return 0;
7455 }
7456 #endif
7457 
7458 /* ??? Using host futex calls even when target atomic operations
7459    are not really atomic probably breaks things.  However implementing
7460    futexes locally would make futexes shared between multiple processes
7461    tricky.  However they're probably useless because guest atomic
7462    operations won't work either.  */
7463 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7464                     target_ulong uaddr2, int val3)
7465 {
7466     struct timespec ts, *pts;
7467     int base_op;
7468 
7469     /* ??? We assume FUTEX_* constants are the same on both host
7470        and target.  */
7471 #ifdef FUTEX_CMD_MASK
7472     base_op = op & FUTEX_CMD_MASK;
7473 #else
7474     base_op = op;
7475 #endif
7476     switch (base_op) {
7477     case FUTEX_WAIT:
7478     case FUTEX_WAIT_BITSET:
7479         if (timeout) {
7480             pts = &ts;
7481             target_to_host_timespec(pts, timeout);
7482         } else {
7483             pts = NULL;
7484         }
7485         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7486                          pts, NULL, val3));
7487     case FUTEX_WAKE:
7488         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7489     case FUTEX_FD:
7490         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7491     case FUTEX_REQUEUE:
7492     case FUTEX_CMP_REQUEUE:
7493     case FUTEX_WAKE_OP:
7494         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7495            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7496            But the prototype takes a `struct timespec *'; insert casts
7497            to satisfy the compiler.  We do not need to tswap TIMEOUT
7498            since it's not compared to guest memory.  */
7499         pts = (struct timespec *)(uintptr_t) timeout;
7500         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7501                                     g2h(uaddr2),
7502                                     (base_op == FUTEX_CMP_REQUEUE
7503                                      ? tswap32(val3)
7504                                      : val3)));
7505     default:
7506         return -TARGET_ENOSYS;
7507     }
7508 }
7509 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7510 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7511                                      abi_long handle, abi_long mount_id,
7512                                      abi_long flags)
7513 {
7514     struct file_handle *target_fh;
7515     struct file_handle *fh;
7516     int mid = 0;
7517     abi_long ret;
7518     char *name;
7519     unsigned int size, total_size;
7520 
7521     if (get_user_s32(size, handle)) {
7522         return -TARGET_EFAULT;
7523     }
7524 
7525     name = lock_user_string(pathname);
7526     if (!name) {
7527         return -TARGET_EFAULT;
7528     }
7529 
7530     total_size = sizeof(struct file_handle) + size;
7531     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7532     if (!target_fh) {
7533         unlock_user(name, pathname, 0);
7534         return -TARGET_EFAULT;
7535     }
7536 
7537     fh = g_malloc0(total_size);
7538     fh->handle_bytes = size;
7539 
7540     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7541     unlock_user(name, pathname, 0);
7542 
7543     /* man name_to_handle_at(2):
7544      * Other than the use of the handle_bytes field, the caller should treat
7545      * the file_handle structure as an opaque data type
7546      */
7547 
7548     memcpy(target_fh, fh, total_size);
7549     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7550     target_fh->handle_type = tswap32(fh->handle_type);
7551     g_free(fh);
7552     unlock_user(target_fh, handle, total_size);
7553 
7554     if (put_user_s32(mid, mount_id)) {
7555         return -TARGET_EFAULT;
7556     }
7557 
7558     return ret;
7559 
7560 }
7561 #endif
7562 
7563 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7564 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7565                                      abi_long flags)
7566 {
7567     struct file_handle *target_fh;
7568     struct file_handle *fh;
7569     unsigned int size, total_size;
7570     abi_long ret;
7571 
7572     if (get_user_s32(size, handle)) {
7573         return -TARGET_EFAULT;
7574     }
7575 
7576     total_size = sizeof(struct file_handle) + size;
7577     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7578     if (!target_fh) {
7579         return -TARGET_EFAULT;
7580     }
7581 
7582     fh = g_memdup(target_fh, total_size);
7583     fh->handle_bytes = size;
7584     fh->handle_type = tswap32(target_fh->handle_type);
7585 
7586     ret = get_errno(open_by_handle_at(mount_fd, fh,
7587                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7588 
7589     g_free(fh);
7590 
7591     unlock_user(target_fh, handle, total_size);
7592 
7593     return ret;
7594 }
7595 #endif
7596 
7597 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7598 
7599 /* signalfd siginfo conversion */
7600 
7601 static void
7602 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7603                                 const struct signalfd_siginfo *info)
7604 {
7605     int sig = host_to_target_signal(info->ssi_signo);
7606 
7607     /* linux/signalfd.h defines a ssi_addr_lsb
7608      * not defined in sys/signalfd.h but used by some kernels
7609      */
7610 
7611 #ifdef BUS_MCEERR_AO
7612     if (tinfo->ssi_signo == SIGBUS &&
7613         (tinfo->ssi_code == BUS_MCEERR_AR ||
7614          tinfo->ssi_code == BUS_MCEERR_AO)) {
7615         uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7616         uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7617         *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7618     }
7619 #endif
7620 
7621     tinfo->ssi_signo = tswap32(sig);
7622     tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7623     tinfo->ssi_code = tswap32(info->ssi_code);
7624     tinfo->ssi_pid = tswap32(info->ssi_pid);
7625     tinfo->ssi_uid = tswap32(info->ssi_uid);
7626     tinfo->ssi_fd = tswap32(info->ssi_fd);
7627     tinfo->ssi_tid = tswap32(info->ssi_tid);
7628     tinfo->ssi_band = tswap32(info->ssi_band);
7629     tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7630     tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7631     tinfo->ssi_status = tswap32(info->ssi_status);
7632     tinfo->ssi_int = tswap32(info->ssi_int);
7633     tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7634     tinfo->ssi_utime = tswap64(info->ssi_utime);
7635     tinfo->ssi_stime = tswap64(info->ssi_stime);
7636     tinfo->ssi_addr = tswap64(info->ssi_addr);
7637 }
7638 
7639 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7640 {
7641     int i;
7642 
7643     for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7644         host_to_target_signalfd_siginfo(buf + i, buf + i);
7645     }
7646 
7647     return len;
7648 }
7649 
7650 static TargetFdTrans target_signalfd_trans = {
7651     .host_to_target_data = host_to_target_data_signalfd,
7652 };
7653 
7654 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7655 {
7656     int host_flags;
7657     target_sigset_t *target_mask;
7658     sigset_t host_mask;
7659     abi_long ret;
7660 
7661     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7662         return -TARGET_EINVAL;
7663     }
7664     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7665         return -TARGET_EFAULT;
7666     }
7667 
7668     target_to_host_sigset(&host_mask, target_mask);
7669 
7670     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7671 
7672     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7673     if (ret >= 0) {
7674         fd_trans_register(ret, &target_signalfd_trans);
7675     }
7676 
7677     unlock_user_struct(target_mask, mask, 0);
7678 
7679     return ret;
7680 }
7681 #endif
7682 
7683 /* Map host to target signal numbers for the wait family of syscalls.
7684    Assume all other status bits are the same.  */
7685 int host_to_target_waitstatus(int status)
7686 {
7687     if (WIFSIGNALED(status)) {
7688         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7689     }
7690     if (WIFSTOPPED(status)) {
7691         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7692                | (status & 0xff);
7693     }
7694     return status;
7695 }
7696 
7697 static int open_self_cmdline(void *cpu_env, int fd)
7698 {
7699     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7700     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7701     int i;
7702 
7703     for (i = 0; i < bprm->argc; i++) {
7704         size_t len = strlen(bprm->argv[i]) + 1;
7705 
7706         if (write(fd, bprm->argv[i], len) != len) {
7707             return -1;
7708         }
7709     }
7710 
7711     return 0;
7712 }
7713 
7714 static int open_self_maps(void *cpu_env, int fd)
7715 {
7716     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7717     TaskState *ts = cpu->opaque;
7718     FILE *fp;
7719     char *line = NULL;
7720     size_t len = 0;
7721     ssize_t read;
7722 
7723     fp = fopen("/proc/self/maps", "r");
7724     if (fp == NULL) {
7725         return -1;
7726     }
7727 
7728     while ((read = getline(&line, &len, fp)) != -1) {
7729         int fields, dev_maj, dev_min, inode;
7730         uint64_t min, max, offset;
7731         char flag_r, flag_w, flag_x, flag_p;
7732         char path[512] = "";
7733         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7734                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7735                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7736 
7737         if ((fields < 10) || (fields > 11)) {
7738             continue;
7739         }
7740         if (h2g_valid(min)) {
7741             int flags = page_get_flags(h2g(min));
7742             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7743             if (page_check_range(h2g(min), max - min, flags) == -1) {
7744                 continue;
7745             }
7746             if (h2g(min) == ts->info->stack_limit) {
7747                 pstrcpy(path, sizeof(path), "      [stack]");
7748             }
7749             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7750                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7751                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7752                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
7753                     path[0] ? "         " : "", path);
7754         }
7755     }
7756 
7757     free(line);
7758     fclose(fp);
7759 
7760     return 0;
7761 }
7762 
7763 static int open_self_stat(void *cpu_env, int fd)
7764 {
7765     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7766     TaskState *ts = cpu->opaque;
7767     abi_ulong start_stack = ts->info->start_stack;
7768     int i;
7769 
7770     for (i = 0; i < 44; i++) {
7771       char buf[128];
7772       int len;
7773       uint64_t val = 0;
7774 
7775       if (i == 0) {
7776         /* pid */
7777         val = getpid();
7778         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7779       } else if (i == 1) {
7780         /* app name */
7781         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7782       } else if (i == 27) {
7783         /* stack bottom */
7784         val = start_stack;
7785         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7786       } else {
7787         /* for the rest, there is MasterCard */
7788         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7789       }
7790 
7791       len = strlen(buf);
7792       if (write(fd, buf, len) != len) {
7793           return -1;
7794       }
7795     }
7796 
7797     return 0;
7798 }
7799 
7800 static int open_self_auxv(void *cpu_env, int fd)
7801 {
7802     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7803     TaskState *ts = cpu->opaque;
7804     abi_ulong auxv = ts->info->saved_auxv;
7805     abi_ulong len = ts->info->auxv_len;
7806     char *ptr;
7807 
7808     /*
7809      * Auxiliary vector is stored in target process stack.
7810      * read in whole auxv vector and copy it to file
7811      */
7812     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7813     if (ptr != NULL) {
7814         while (len > 0) {
7815             ssize_t r;
7816             r = write(fd, ptr, len);
7817             if (r <= 0) {
7818                 break;
7819             }
7820             len -= r;
7821             ptr += r;
7822         }
7823         lseek(fd, 0, SEEK_SET);
7824         unlock_user(ptr, auxv, len);
7825     }
7826 
7827     return 0;
7828 }
7829 
7830 static int is_proc_myself(const char *filename, const char *entry)
7831 {
7832     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7833         filename += strlen("/proc/");
7834         if (!strncmp(filename, "self/", strlen("self/"))) {
7835             filename += strlen("self/");
7836         } else if (*filename >= '1' && *filename <= '9') {
7837             char myself[80];
7838             snprintf(myself, sizeof(myself), "%d/", getpid());
7839             if (!strncmp(filename, myself, strlen(myself))) {
7840                 filename += strlen(myself);
7841             } else {
7842                 return 0;
7843             }
7844         } else {
7845             return 0;
7846         }
7847         if (!strcmp(filename, entry)) {
7848             return 1;
7849         }
7850     }
7851     return 0;
7852 }
7853 
7854 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7855 static int is_proc(const char *filename, const char *entry)
7856 {
7857     return strcmp(filename, entry) == 0;
7858 }
7859 
7860 static int open_net_route(void *cpu_env, int fd)
7861 {
7862     FILE *fp;
7863     char *line = NULL;
7864     size_t len = 0;
7865     ssize_t read;
7866 
7867     fp = fopen("/proc/net/route", "r");
7868     if (fp == NULL) {
7869         return -1;
7870     }
7871 
7872     /* read header */
7873 
7874     read = getline(&line, &len, fp);
7875     dprintf(fd, "%s", line);
7876 
7877     /* read routes */
7878 
7879     while ((read = getline(&line, &len, fp)) != -1) {
7880         char iface[16];
7881         uint32_t dest, gw, mask;
7882         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7883         sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7884                      iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7885                      &mask, &mtu, &window, &irtt);
7886         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7887                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7888                 metric, tswap32(mask), mtu, window, irtt);
7889     }
7890 
7891     free(line);
7892     fclose(fp);
7893 
7894     return 0;
7895 }
7896 #endif
7897 
7898 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7899 {
7900     struct fake_open {
7901         const char *filename;
7902         int (*fill)(void *cpu_env, int fd);
7903         int (*cmp)(const char *s1, const char *s2);
7904     };
7905     const struct fake_open *fake_open;
7906     static const struct fake_open fakes[] = {
7907         { "maps", open_self_maps, is_proc_myself },
7908         { "stat", open_self_stat, is_proc_myself },
7909         { "auxv", open_self_auxv, is_proc_myself },
7910         { "cmdline", open_self_cmdline, is_proc_myself },
7911 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7912         { "/proc/net/route", open_net_route, is_proc },
7913 #endif
7914         { NULL, NULL, NULL }
7915     };
7916 
7917     if (is_proc_myself(pathname, "exe")) {
7918         int execfd = qemu_getauxval(AT_EXECFD);
7919         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7920     }
7921 
7922     for (fake_open = fakes; fake_open->filename; fake_open++) {
7923         if (fake_open->cmp(pathname, fake_open->filename)) {
7924             break;
7925         }
7926     }
7927 
7928     if (fake_open->filename) {
7929         const char *tmpdir;
7930         char filename[PATH_MAX];
7931         int fd, r;
7932 
7933         /* create temporary file to map stat to */
7934         tmpdir = getenv("TMPDIR");
7935         if (!tmpdir)
7936             tmpdir = "/tmp";
7937         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7938         fd = mkstemp(filename);
7939         if (fd < 0) {
7940             return fd;
7941         }
7942         unlink(filename);
7943 
7944         if ((r = fake_open->fill(cpu_env, fd))) {
7945             int e = errno;
7946             close(fd);
7947             errno = e;
7948             return r;
7949         }
7950         lseek(fd, 0, SEEK_SET);
7951 
7952         return fd;
7953     }
7954 
7955     return safe_openat(dirfd, path(pathname), flags, mode);
7956 }
7957 
7958 #define TIMER_MAGIC 0x0caf0000
7959 #define TIMER_MAGIC_MASK 0xffff0000
7960 
7961 /* Convert QEMU provided timer ID back to internal 16bit index format */
7962 static target_timer_t get_timer_id(abi_long arg)
7963 {
7964     target_timer_t timerid = arg;
7965 
7966     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7967         return -TARGET_EINVAL;
7968     }
7969 
7970     timerid &= 0xffff;
7971 
7972     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7973         return -TARGET_EINVAL;
7974     }
7975 
7976     return timerid;
7977 }
7978 
7979 static abi_long swap_data_eventfd(void *buf, size_t len)
7980 {
7981     uint64_t *counter = buf;
7982     int i;
7983 
7984     if (len < sizeof(uint64_t)) {
7985         return -EINVAL;
7986     }
7987 
7988     for (i = 0; i < len; i += sizeof(uint64_t)) {
7989         *counter = tswap64(*counter);
7990         counter++;
7991     }
7992 
7993     return len;
7994 }
7995 
7996 static TargetFdTrans target_eventfd_trans = {
7997     .host_to_target_data = swap_data_eventfd,
7998     .target_to_host_data = swap_data_eventfd,
7999 };
8000 
8001 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
8002     (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
8003      defined(__NR_inotify_init1))
8004 static abi_long host_to_target_data_inotify(void *buf, size_t len)
8005 {
8006     struct inotify_event *ev;
8007     int i;
8008     uint32_t name_len;
8009 
8010     for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
8011         ev = (struct inotify_event *)((char *)buf + i);
8012         name_len = ev->len;
8013 
8014         ev->wd = tswap32(ev->wd);
8015         ev->mask = tswap32(ev->mask);
8016         ev->cookie = tswap32(ev->cookie);
8017         ev->len = tswap32(name_len);
8018     }
8019 
8020     return len;
8021 }
8022 
8023 static TargetFdTrans target_inotify_trans = {
8024     .host_to_target_data = host_to_target_data_inotify,
8025 };
8026 #endif
8027 
8028 static int target_to_host_cpu_mask(unsigned long *host_mask,
8029                                    size_t host_size,
8030                                    abi_ulong target_addr,
8031                                    size_t target_size)
8032 {
8033     unsigned target_bits = sizeof(abi_ulong) * 8;
8034     unsigned host_bits = sizeof(*host_mask) * 8;
8035     abi_ulong *target_mask;
8036     unsigned i, j;
8037 
8038     assert(host_size >= target_size);
8039 
8040     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8041     if (!target_mask) {
8042         return -TARGET_EFAULT;
8043     }
8044     memset(host_mask, 0, host_size);
8045 
8046     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8047         unsigned bit = i * target_bits;
8048         abi_ulong val;
8049 
8050         __get_user(val, &target_mask[i]);
8051         for (j = 0; j < target_bits; j++, bit++) {
8052             if (val & (1UL << j)) {
8053                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8054             }
8055         }
8056     }
8057 
8058     unlock_user(target_mask, target_addr, 0);
8059     return 0;
8060 }
8061 
8062 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8063                                    size_t host_size,
8064                                    abi_ulong target_addr,
8065                                    size_t target_size)
8066 {
8067     unsigned target_bits = sizeof(abi_ulong) * 8;
8068     unsigned host_bits = sizeof(*host_mask) * 8;
8069     abi_ulong *target_mask;
8070     unsigned i, j;
8071 
8072     assert(host_size >= target_size);
8073 
8074     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8075     if (!target_mask) {
8076         return -TARGET_EFAULT;
8077     }
8078 
8079     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8080         unsigned bit = i * target_bits;
8081         abi_ulong val = 0;
8082 
8083         for (j = 0; j < target_bits; j++, bit++) {
8084             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8085                 val |= 1UL << j;
8086             }
8087         }
8088         __put_user(val, &target_mask[i]);
8089     }
8090 
8091     unlock_user(target_mask, target_addr, target_size);
8092     return 0;
8093 }
8094 
8095 /* This is an internal helper for do_syscall so that it is easier
8096  * to have a single return point, so that actions, such as logging
8097  * of syscall results, can be performed.
8098  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8099  */
8100 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8101                             abi_long arg2, abi_long arg3, abi_long arg4,
8102                             abi_long arg5, abi_long arg6, abi_long arg7,
8103                             abi_long arg8)
8104 {
8105     CPUState *cpu = ENV_GET_CPU(cpu_env);
8106     abi_long ret;
8107 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8108     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8109     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
8110     struct stat st;
8111 #endif
8112 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8113     || defined(TARGET_NR_fstatfs)
8114     struct statfs stfs;
8115 #endif
8116     void *p;
8117 
8118     switch(num) {
8119     case TARGET_NR_exit:
8120         /* In old applications this may be used to implement _exit(2).
8121            However in threaded applictions it is used for thread termination,
8122            and _exit_group is used for application termination.
8123            Do thread termination if we have more then one thread.  */
8124 
8125         if (block_signals()) {
8126             return -TARGET_ERESTARTSYS;
8127         }
8128 
8129         cpu_list_lock();
8130 
8131         if (CPU_NEXT(first_cpu)) {
8132             TaskState *ts;
8133 
8134             /* Remove the CPU from the list.  */
8135             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
8136 
8137             cpu_list_unlock();
8138 
8139             ts = cpu->opaque;
8140             if (ts->child_tidptr) {
8141                 put_user_u32(0, ts->child_tidptr);
8142                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8143                           NULL, NULL, 0);
8144             }
8145             thread_cpu = NULL;
8146             object_unref(OBJECT(cpu));
8147             g_free(ts);
8148             rcu_unregister_thread();
8149             pthread_exit(NULL);
8150         }
8151 
8152         cpu_list_unlock();
8153         preexit_cleanup(cpu_env, arg1);
8154         _exit(arg1);
8155         return 0; /* avoid warning */
8156     case TARGET_NR_read:
8157         if (arg3 == 0) {
8158             return 0;
8159         } else {
8160             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8161                 return -TARGET_EFAULT;
8162             ret = get_errno(safe_read(arg1, p, arg3));
8163             if (ret >= 0 &&
8164                 fd_trans_host_to_target_data(arg1)) {
8165                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8166             }
8167             unlock_user(p, arg2, ret);
8168         }
8169         return ret;
8170     case TARGET_NR_write:
8171         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8172             return -TARGET_EFAULT;
8173         if (fd_trans_target_to_host_data(arg1)) {
8174             void *copy = g_malloc(arg3);
8175             memcpy(copy, p, arg3);
8176             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8177             if (ret >= 0) {
8178                 ret = get_errno(safe_write(arg1, copy, ret));
8179             }
8180             g_free(copy);
8181         } else {
8182             ret = get_errno(safe_write(arg1, p, arg3));
8183         }
8184         unlock_user(p, arg2, 0);
8185         return ret;
8186 
8187 #ifdef TARGET_NR_open
8188     case TARGET_NR_open:
8189         if (!(p = lock_user_string(arg1)))
8190             return -TARGET_EFAULT;
8191         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8192                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8193                                   arg3));
8194         fd_trans_unregister(ret);
8195         unlock_user(p, arg1, 0);
8196         return ret;
8197 #endif
8198     case TARGET_NR_openat:
8199         if (!(p = lock_user_string(arg2)))
8200             return -TARGET_EFAULT;
8201         ret = get_errno(do_openat(cpu_env, arg1, p,
8202                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8203                                   arg4));
8204         fd_trans_unregister(ret);
8205         unlock_user(p, arg2, 0);
8206         return ret;
8207 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8208     case TARGET_NR_name_to_handle_at:
8209         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8210         return ret;
8211 #endif
8212 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8213     case TARGET_NR_open_by_handle_at:
8214         ret = do_open_by_handle_at(arg1, arg2, arg3);
8215         fd_trans_unregister(ret);
8216         return ret;
8217 #endif
8218     case TARGET_NR_close:
8219         fd_trans_unregister(arg1);
8220         return get_errno(close(arg1));
8221 
8222     case TARGET_NR_brk:
8223         return do_brk(arg1);
8224 #ifdef TARGET_NR_fork
8225     case TARGET_NR_fork:
8226         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8227 #endif
8228 #ifdef TARGET_NR_waitpid
8229     case TARGET_NR_waitpid:
8230         {
8231             int status;
8232             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8233             if (!is_error(ret) && arg2 && ret
8234                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8235                 return -TARGET_EFAULT;
8236         }
8237         return ret;
8238 #endif
8239 #ifdef TARGET_NR_waitid
8240     case TARGET_NR_waitid:
8241         {
8242             siginfo_t info;
8243             info.si_pid = 0;
8244             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8245             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8246                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8247                     return -TARGET_EFAULT;
8248                 host_to_target_siginfo(p, &info);
8249                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8250             }
8251         }
8252         return ret;
8253 #endif
8254 #ifdef TARGET_NR_creat /* not on alpha */
8255     case TARGET_NR_creat:
8256         if (!(p = lock_user_string(arg1)))
8257             return -TARGET_EFAULT;
8258         ret = get_errno(creat(p, arg2));
8259         fd_trans_unregister(ret);
8260         unlock_user(p, arg1, 0);
8261         return ret;
8262 #endif
8263 #ifdef TARGET_NR_link
8264     case TARGET_NR_link:
8265         {
8266             void * p2;
8267             p = lock_user_string(arg1);
8268             p2 = lock_user_string(arg2);
8269             if (!p || !p2)
8270                 ret = -TARGET_EFAULT;
8271             else
8272                 ret = get_errno(link(p, p2));
8273             unlock_user(p2, arg2, 0);
8274             unlock_user(p, arg1, 0);
8275         }
8276         return ret;
8277 #endif
8278 #if defined(TARGET_NR_linkat)
8279     case TARGET_NR_linkat:
8280         {
8281             void * p2 = NULL;
8282             if (!arg2 || !arg4)
8283                 return -TARGET_EFAULT;
8284             p  = lock_user_string(arg2);
8285             p2 = lock_user_string(arg4);
8286             if (!p || !p2)
8287                 ret = -TARGET_EFAULT;
8288             else
8289                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8290             unlock_user(p, arg2, 0);
8291             unlock_user(p2, arg4, 0);
8292         }
8293         return ret;
8294 #endif
8295 #ifdef TARGET_NR_unlink
8296     case TARGET_NR_unlink:
8297         if (!(p = lock_user_string(arg1)))
8298             return -TARGET_EFAULT;
8299         ret = get_errno(unlink(p));
8300         unlock_user(p, arg1, 0);
8301         return ret;
8302 #endif
8303 #if defined(TARGET_NR_unlinkat)
8304     case TARGET_NR_unlinkat:
8305         if (!(p = lock_user_string(arg2)))
8306             return -TARGET_EFAULT;
8307         ret = get_errno(unlinkat(arg1, p, arg3));
8308         unlock_user(p, arg2, 0);
8309         return ret;
8310 #endif
8311     case TARGET_NR_execve:
8312         {
8313             char **argp, **envp;
8314             int argc, envc;
8315             abi_ulong gp;
8316             abi_ulong guest_argp;
8317             abi_ulong guest_envp;
8318             abi_ulong addr;
8319             char **q;
8320             int total_size = 0;
8321 
8322             argc = 0;
8323             guest_argp = arg2;
8324             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8325                 if (get_user_ual(addr, gp))
8326                     return -TARGET_EFAULT;
8327                 if (!addr)
8328                     break;
8329                 argc++;
8330             }
8331             envc = 0;
8332             guest_envp = arg3;
8333             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8334                 if (get_user_ual(addr, gp))
8335                     return -TARGET_EFAULT;
8336                 if (!addr)
8337                     break;
8338                 envc++;
8339             }
8340 
8341             argp = g_new0(char *, argc + 1);
8342             envp = g_new0(char *, envc + 1);
8343 
8344             for (gp = guest_argp, q = argp; gp;
8345                   gp += sizeof(abi_ulong), q++) {
8346                 if (get_user_ual(addr, gp))
8347                     goto execve_efault;
8348                 if (!addr)
8349                     break;
8350                 if (!(*q = lock_user_string(addr)))
8351                     goto execve_efault;
8352                 total_size += strlen(*q) + 1;
8353             }
8354             *q = NULL;
8355 
8356             for (gp = guest_envp, q = envp; gp;
8357                   gp += sizeof(abi_ulong), q++) {
8358                 if (get_user_ual(addr, gp))
8359                     goto execve_efault;
8360                 if (!addr)
8361                     break;
8362                 if (!(*q = lock_user_string(addr)))
8363                     goto execve_efault;
8364                 total_size += strlen(*q) + 1;
8365             }
8366             *q = NULL;
8367 
8368             if (!(p = lock_user_string(arg1)))
8369                 goto execve_efault;
8370             /* Although execve() is not an interruptible syscall it is
8371              * a special case where we must use the safe_syscall wrapper:
8372              * if we allow a signal to happen before we make the host
8373              * syscall then we will 'lose' it, because at the point of
8374              * execve the process leaves QEMU's control. So we use the
8375              * safe syscall wrapper to ensure that we either take the
8376              * signal as a guest signal, or else it does not happen
8377              * before the execve completes and makes it the other
8378              * program's problem.
8379              */
8380             ret = get_errno(safe_execve(p, argp, envp));
8381             unlock_user(p, arg1, 0);
8382 
8383             goto execve_end;
8384 
8385         execve_efault:
8386             ret = -TARGET_EFAULT;
8387 
8388         execve_end:
8389             for (gp = guest_argp, q = argp; *q;
8390                   gp += sizeof(abi_ulong), q++) {
8391                 if (get_user_ual(addr, gp)
8392                     || !addr)
8393                     break;
8394                 unlock_user(*q, addr, 0);
8395             }
8396             for (gp = guest_envp, q = envp; *q;
8397                   gp += sizeof(abi_ulong), q++) {
8398                 if (get_user_ual(addr, gp)
8399                     || !addr)
8400                     break;
8401                 unlock_user(*q, addr, 0);
8402             }
8403 
8404             g_free(argp);
8405             g_free(envp);
8406         }
8407         return ret;
8408     case TARGET_NR_chdir:
8409         if (!(p = lock_user_string(arg1)))
8410             return -TARGET_EFAULT;
8411         ret = get_errno(chdir(p));
8412         unlock_user(p, arg1, 0);
8413         return ret;
8414 #ifdef TARGET_NR_time
8415     case TARGET_NR_time:
8416         {
8417             time_t host_time;
8418             ret = get_errno(time(&host_time));
8419             if (!is_error(ret)
8420                 && arg1
8421                 && put_user_sal(host_time, arg1))
8422                 return -TARGET_EFAULT;
8423         }
8424         return ret;
8425 #endif
8426 #ifdef TARGET_NR_mknod
8427     case TARGET_NR_mknod:
8428         if (!(p = lock_user_string(arg1)))
8429             return -TARGET_EFAULT;
8430         ret = get_errno(mknod(p, arg2, arg3));
8431         unlock_user(p, arg1, 0);
8432         return ret;
8433 #endif
8434 #if defined(TARGET_NR_mknodat)
8435     case TARGET_NR_mknodat:
8436         if (!(p = lock_user_string(arg2)))
8437             return -TARGET_EFAULT;
8438         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8439         unlock_user(p, arg2, 0);
8440         return ret;
8441 #endif
8442 #ifdef TARGET_NR_chmod
8443     case TARGET_NR_chmod:
8444         if (!(p = lock_user_string(arg1)))
8445             return -TARGET_EFAULT;
8446         ret = get_errno(chmod(p, arg2));
8447         unlock_user(p, arg1, 0);
8448         return ret;
8449 #endif
8450 #ifdef TARGET_NR_lseek
8451     case TARGET_NR_lseek:
8452         return get_errno(lseek(arg1, arg2, arg3));
8453 #endif
8454 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8455     /* Alpha specific */
8456     case TARGET_NR_getxpid:
8457         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8458         return get_errno(getpid());
8459 #endif
8460 #ifdef TARGET_NR_getpid
8461     case TARGET_NR_getpid:
8462         return get_errno(getpid());
8463 #endif
8464     case TARGET_NR_mount:
8465         {
8466             /* need to look at the data field */
8467             void *p2, *p3;
8468 
8469             if (arg1) {
8470                 p = lock_user_string(arg1);
8471                 if (!p) {
8472                     return -TARGET_EFAULT;
8473                 }
8474             } else {
8475                 p = NULL;
8476             }
8477 
8478             p2 = lock_user_string(arg2);
8479             if (!p2) {
8480                 if (arg1) {
8481                     unlock_user(p, arg1, 0);
8482                 }
8483                 return -TARGET_EFAULT;
8484             }
8485 
8486             if (arg3) {
8487                 p3 = lock_user_string(arg3);
8488                 if (!p3) {
8489                     if (arg1) {
8490                         unlock_user(p, arg1, 0);
8491                     }
8492                     unlock_user(p2, arg2, 0);
8493                     return -TARGET_EFAULT;
8494                 }
8495             } else {
8496                 p3 = NULL;
8497             }
8498 
8499             /* FIXME - arg5 should be locked, but it isn't clear how to
8500              * do that since it's not guaranteed to be a NULL-terminated
8501              * string.
8502              */
8503             if (!arg5) {
8504                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8505             } else {
8506                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8507             }
8508             ret = get_errno(ret);
8509 
8510             if (arg1) {
8511                 unlock_user(p, arg1, 0);
8512             }
8513             unlock_user(p2, arg2, 0);
8514             if (arg3) {
8515                 unlock_user(p3, arg3, 0);
8516             }
8517         }
8518         return ret;
8519 #ifdef TARGET_NR_umount
8520     case TARGET_NR_umount:
8521         if (!(p = lock_user_string(arg1)))
8522             return -TARGET_EFAULT;
8523         ret = get_errno(umount(p));
8524         unlock_user(p, arg1, 0);
8525         return ret;
8526 #endif
8527 #ifdef TARGET_NR_stime /* not on alpha */
8528     case TARGET_NR_stime:
8529         {
8530             time_t host_time;
8531             if (get_user_sal(host_time, arg1))
8532                 return -TARGET_EFAULT;
8533             return get_errno(stime(&host_time));
8534         }
8535 #endif
8536 #ifdef TARGET_NR_alarm /* not on alpha */
8537     case TARGET_NR_alarm:
8538         return alarm(arg1);
8539 #endif
8540 #ifdef TARGET_NR_pause /* not on alpha */
8541     case TARGET_NR_pause:
8542         if (!block_signals()) {
8543             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8544         }
8545         return -TARGET_EINTR;
8546 #endif
8547 #ifdef TARGET_NR_utime
8548     case TARGET_NR_utime:
8549         {
8550             struct utimbuf tbuf, *host_tbuf;
8551             struct target_utimbuf *target_tbuf;
8552             if (arg2) {
8553                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8554                     return -TARGET_EFAULT;
8555                 tbuf.actime = tswapal(target_tbuf->actime);
8556                 tbuf.modtime = tswapal(target_tbuf->modtime);
8557                 unlock_user_struct(target_tbuf, arg2, 0);
8558                 host_tbuf = &tbuf;
8559             } else {
8560                 host_tbuf = NULL;
8561             }
8562             if (!(p = lock_user_string(arg1)))
8563                 return -TARGET_EFAULT;
8564             ret = get_errno(utime(p, host_tbuf));
8565             unlock_user(p, arg1, 0);
8566         }
8567         return ret;
8568 #endif
8569 #ifdef TARGET_NR_utimes
8570     case TARGET_NR_utimes:
8571         {
8572             struct timeval *tvp, tv[2];
8573             if (arg2) {
8574                 if (copy_from_user_timeval(&tv[0], arg2)
8575                     || copy_from_user_timeval(&tv[1],
8576                                               arg2 + sizeof(struct target_timeval)))
8577                     return -TARGET_EFAULT;
8578                 tvp = tv;
8579             } else {
8580                 tvp = NULL;
8581             }
8582             if (!(p = lock_user_string(arg1)))
8583                 return -TARGET_EFAULT;
8584             ret = get_errno(utimes(p, tvp));
8585             unlock_user(p, arg1, 0);
8586         }
8587         return ret;
8588 #endif
8589 #if defined(TARGET_NR_futimesat)
8590     case TARGET_NR_futimesat:
8591         {
8592             struct timeval *tvp, tv[2];
8593             if (arg3) {
8594                 if (copy_from_user_timeval(&tv[0], arg3)
8595                     || copy_from_user_timeval(&tv[1],
8596                                               arg3 + sizeof(struct target_timeval)))
8597                     return -TARGET_EFAULT;
8598                 tvp = tv;
8599             } else {
8600                 tvp = NULL;
8601             }
8602             if (!(p = lock_user_string(arg2))) {
8603                 return -TARGET_EFAULT;
8604             }
8605             ret = get_errno(futimesat(arg1, path(p), tvp));
8606             unlock_user(p, arg2, 0);
8607         }
8608         return ret;
8609 #endif
8610 #ifdef TARGET_NR_access
8611     case TARGET_NR_access:
8612         if (!(p = lock_user_string(arg1))) {
8613             return -TARGET_EFAULT;
8614         }
8615         ret = get_errno(access(path(p), arg2));
8616         unlock_user(p, arg1, 0);
8617         return ret;
8618 #endif
8619 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8620     case TARGET_NR_faccessat:
8621         if (!(p = lock_user_string(arg2))) {
8622             return -TARGET_EFAULT;
8623         }
8624         ret = get_errno(faccessat(arg1, p, arg3, 0));
8625         unlock_user(p, arg2, 0);
8626         return ret;
8627 #endif
8628 #ifdef TARGET_NR_nice /* not on alpha */
8629     case TARGET_NR_nice:
8630         return get_errno(nice(arg1));
8631 #endif
8632     case TARGET_NR_sync:
8633         sync();
8634         return 0;
8635 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8636     case TARGET_NR_syncfs:
8637         return get_errno(syncfs(arg1));
8638 #endif
8639     case TARGET_NR_kill:
8640         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8641 #ifdef TARGET_NR_rename
8642     case TARGET_NR_rename:
8643         {
8644             void *p2;
8645             p = lock_user_string(arg1);
8646             p2 = lock_user_string(arg2);
8647             if (!p || !p2)
8648                 ret = -TARGET_EFAULT;
8649             else
8650                 ret = get_errno(rename(p, p2));
8651             unlock_user(p2, arg2, 0);
8652             unlock_user(p, arg1, 0);
8653         }
8654         return ret;
8655 #endif
8656 #if defined(TARGET_NR_renameat)
8657     case TARGET_NR_renameat:
8658         {
8659             void *p2;
8660             p  = lock_user_string(arg2);
8661             p2 = lock_user_string(arg4);
8662             if (!p || !p2)
8663                 ret = -TARGET_EFAULT;
8664             else
8665                 ret = get_errno(renameat(arg1, p, arg3, p2));
8666             unlock_user(p2, arg4, 0);
8667             unlock_user(p, arg2, 0);
8668         }
8669         return ret;
8670 #endif
8671 #if defined(TARGET_NR_renameat2)
8672     case TARGET_NR_renameat2:
8673         {
8674             void *p2;
8675             p  = lock_user_string(arg2);
8676             p2 = lock_user_string(arg4);
8677             if (!p || !p2) {
8678                 ret = -TARGET_EFAULT;
8679             } else {
8680                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8681             }
8682             unlock_user(p2, arg4, 0);
8683             unlock_user(p, arg2, 0);
8684         }
8685         return ret;
8686 #endif
8687 #ifdef TARGET_NR_mkdir
8688     case TARGET_NR_mkdir:
8689         if (!(p = lock_user_string(arg1)))
8690             return -TARGET_EFAULT;
8691         ret = get_errno(mkdir(p, arg2));
8692         unlock_user(p, arg1, 0);
8693         return ret;
8694 #endif
8695 #if defined(TARGET_NR_mkdirat)
8696     case TARGET_NR_mkdirat:
8697         if (!(p = lock_user_string(arg2)))
8698             return -TARGET_EFAULT;
8699         ret = get_errno(mkdirat(arg1, p, arg3));
8700         unlock_user(p, arg2, 0);
8701         return ret;
8702 #endif
8703 #ifdef TARGET_NR_rmdir
8704     case TARGET_NR_rmdir:
8705         if (!(p = lock_user_string(arg1)))
8706             return -TARGET_EFAULT;
8707         ret = get_errno(rmdir(p));
8708         unlock_user(p, arg1, 0);
8709         return ret;
8710 #endif
8711     case TARGET_NR_dup:
8712         ret = get_errno(dup(arg1));
8713         if (ret >= 0) {
8714             fd_trans_dup(arg1, ret);
8715         }
8716         return ret;
8717 #ifdef TARGET_NR_pipe
8718     case TARGET_NR_pipe:
8719         return do_pipe(cpu_env, arg1, 0, 0);
8720 #endif
8721 #ifdef TARGET_NR_pipe2
8722     case TARGET_NR_pipe2:
8723         return do_pipe(cpu_env, arg1,
8724                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8725 #endif
8726     case TARGET_NR_times:
8727         {
8728             struct target_tms *tmsp;
8729             struct tms tms;
8730             ret = get_errno(times(&tms));
8731             if (arg1) {
8732                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8733                 if (!tmsp)
8734                     return -TARGET_EFAULT;
8735                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8736                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8737                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8738                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8739             }
8740             if (!is_error(ret))
8741                 ret = host_to_target_clock_t(ret);
8742         }
8743         return ret;
8744     case TARGET_NR_acct:
8745         if (arg1 == 0) {
8746             ret = get_errno(acct(NULL));
8747         } else {
8748             if (!(p = lock_user_string(arg1))) {
8749                 return -TARGET_EFAULT;
8750             }
8751             ret = get_errno(acct(path(p)));
8752             unlock_user(p, arg1, 0);
8753         }
8754         return ret;
8755 #ifdef TARGET_NR_umount2
8756     case TARGET_NR_umount2:
8757         if (!(p = lock_user_string(arg1)))
8758             return -TARGET_EFAULT;
8759         ret = get_errno(umount2(p, arg2));
8760         unlock_user(p, arg1, 0);
8761         return ret;
8762 #endif
8763     case TARGET_NR_ioctl:
8764         return do_ioctl(arg1, arg2, arg3);
8765 #ifdef TARGET_NR_fcntl
8766     case TARGET_NR_fcntl:
8767         return do_fcntl(arg1, arg2, arg3);
8768 #endif
8769     case TARGET_NR_setpgid:
8770         return get_errno(setpgid(arg1, arg2));
8771     case TARGET_NR_umask:
8772         return get_errno(umask(arg1));
8773     case TARGET_NR_chroot:
8774         if (!(p = lock_user_string(arg1)))
8775             return -TARGET_EFAULT;
8776         ret = get_errno(chroot(p));
8777         unlock_user(p, arg1, 0);
8778         return ret;
8779 #ifdef TARGET_NR_dup2
8780     case TARGET_NR_dup2:
8781         ret = get_errno(dup2(arg1, arg2));
8782         if (ret >= 0) {
8783             fd_trans_dup(arg1, arg2);
8784         }
8785         return ret;
8786 #endif
8787 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8788     case TARGET_NR_dup3:
8789     {
8790         int host_flags;
8791 
8792         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8793             return -EINVAL;
8794         }
8795         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8796         ret = get_errno(dup3(arg1, arg2, host_flags));
8797         if (ret >= 0) {
8798             fd_trans_dup(arg1, arg2);
8799         }
8800         return ret;
8801     }
8802 #endif
8803 #ifdef TARGET_NR_getppid /* not on alpha */
8804     case TARGET_NR_getppid:
8805         return get_errno(getppid());
8806 #endif
8807 #ifdef TARGET_NR_getpgrp
8808     case TARGET_NR_getpgrp:
8809         return get_errno(getpgrp());
8810 #endif
8811     case TARGET_NR_setsid:
8812         return get_errno(setsid());
8813 #ifdef TARGET_NR_sigaction
8814     case TARGET_NR_sigaction:
8815         {
8816 #if defined(TARGET_ALPHA)
8817             struct target_sigaction act, oact, *pact = 0;
8818             struct target_old_sigaction *old_act;
8819             if (arg2) {
8820                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8821                     return -TARGET_EFAULT;
8822                 act._sa_handler = old_act->_sa_handler;
8823                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8824                 act.sa_flags = old_act->sa_flags;
8825                 act.sa_restorer = 0;
8826                 unlock_user_struct(old_act, arg2, 0);
8827                 pact = &act;
8828             }
8829             ret = get_errno(do_sigaction(arg1, pact, &oact));
8830             if (!is_error(ret) && arg3) {
8831                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8832                     return -TARGET_EFAULT;
8833                 old_act->_sa_handler = oact._sa_handler;
8834                 old_act->sa_mask = oact.sa_mask.sig[0];
8835                 old_act->sa_flags = oact.sa_flags;
8836                 unlock_user_struct(old_act, arg3, 1);
8837             }
8838 #elif defined(TARGET_MIPS)
8839 	    struct target_sigaction act, oact, *pact, *old_act;
8840 
8841 	    if (arg2) {
8842                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8843                     return -TARGET_EFAULT;
8844 		act._sa_handler = old_act->_sa_handler;
8845 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8846 		act.sa_flags = old_act->sa_flags;
8847 		unlock_user_struct(old_act, arg2, 0);
8848 		pact = &act;
8849 	    } else {
8850 		pact = NULL;
8851 	    }
8852 
8853 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8854 
8855 	    if (!is_error(ret) && arg3) {
8856                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8857                     return -TARGET_EFAULT;
8858 		old_act->_sa_handler = oact._sa_handler;
8859 		old_act->sa_flags = oact.sa_flags;
8860 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8861 		old_act->sa_mask.sig[1] = 0;
8862 		old_act->sa_mask.sig[2] = 0;
8863 		old_act->sa_mask.sig[3] = 0;
8864 		unlock_user_struct(old_act, arg3, 1);
8865 	    }
8866 #else
8867             struct target_old_sigaction *old_act;
8868             struct target_sigaction act, oact, *pact;
8869             if (arg2) {
8870                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8871                     return -TARGET_EFAULT;
8872                 act._sa_handler = old_act->_sa_handler;
8873                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8874                 act.sa_flags = old_act->sa_flags;
8875                 act.sa_restorer = old_act->sa_restorer;
8876 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8877                 act.ka_restorer = 0;
8878 #endif
8879                 unlock_user_struct(old_act, arg2, 0);
8880                 pact = &act;
8881             } else {
8882                 pact = NULL;
8883             }
8884             ret = get_errno(do_sigaction(arg1, pact, &oact));
8885             if (!is_error(ret) && arg3) {
8886                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8887                     return -TARGET_EFAULT;
8888                 old_act->_sa_handler = oact._sa_handler;
8889                 old_act->sa_mask = oact.sa_mask.sig[0];
8890                 old_act->sa_flags = oact.sa_flags;
8891                 old_act->sa_restorer = oact.sa_restorer;
8892                 unlock_user_struct(old_act, arg3, 1);
8893             }
8894 #endif
8895         }
8896         return ret;
8897 #endif
8898     case TARGET_NR_rt_sigaction:
8899         {
8900 #if defined(TARGET_ALPHA)
8901             /* For Alpha and SPARC this is a 5 argument syscall, with
8902              * a 'restorer' parameter which must be copied into the
8903              * sa_restorer field of the sigaction struct.
8904              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8905              * and arg5 is the sigsetsize.
8906              * Alpha also has a separate rt_sigaction struct that it uses
8907              * here; SPARC uses the usual sigaction struct.
8908              */
8909             struct target_rt_sigaction *rt_act;
8910             struct target_sigaction act, oact, *pact = 0;
8911 
8912             if (arg4 != sizeof(target_sigset_t)) {
8913                 return -TARGET_EINVAL;
8914             }
8915             if (arg2) {
8916                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8917                     return -TARGET_EFAULT;
8918                 act._sa_handler = rt_act->_sa_handler;
8919                 act.sa_mask = rt_act->sa_mask;
8920                 act.sa_flags = rt_act->sa_flags;
8921                 act.sa_restorer = arg5;
8922                 unlock_user_struct(rt_act, arg2, 0);
8923                 pact = &act;
8924             }
8925             ret = get_errno(do_sigaction(arg1, pact, &oact));
8926             if (!is_error(ret) && arg3) {
8927                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8928                     return -TARGET_EFAULT;
8929                 rt_act->_sa_handler = oact._sa_handler;
8930                 rt_act->sa_mask = oact.sa_mask;
8931                 rt_act->sa_flags = oact.sa_flags;
8932                 unlock_user_struct(rt_act, arg3, 1);
8933             }
8934 #else
8935 #ifdef TARGET_SPARC
8936             target_ulong restorer = arg4;
8937             target_ulong sigsetsize = arg5;
8938 #else
8939             target_ulong sigsetsize = arg4;
8940 #endif
8941             struct target_sigaction *act;
8942             struct target_sigaction *oact;
8943 
8944             if (sigsetsize != sizeof(target_sigset_t)) {
8945                 return -TARGET_EINVAL;
8946             }
8947             if (arg2) {
8948                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8949                     return -TARGET_EFAULT;
8950                 }
8951 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8952                 act->ka_restorer = restorer;
8953 #endif
8954             } else {
8955                 act = NULL;
8956             }
8957             if (arg3) {
8958                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8959                     ret = -TARGET_EFAULT;
8960                     goto rt_sigaction_fail;
8961                 }
8962             } else
8963                 oact = NULL;
8964             ret = get_errno(do_sigaction(arg1, act, oact));
8965 	rt_sigaction_fail:
8966             if (act)
8967                 unlock_user_struct(act, arg2, 0);
8968             if (oact)
8969                 unlock_user_struct(oact, arg3, 1);
8970 #endif
8971         }
8972         return ret;
8973 #ifdef TARGET_NR_sgetmask /* not on alpha */
8974     case TARGET_NR_sgetmask:
8975         {
8976             sigset_t cur_set;
8977             abi_ulong target_set;
8978             ret = do_sigprocmask(0, NULL, &cur_set);
8979             if (!ret) {
8980                 host_to_target_old_sigset(&target_set, &cur_set);
8981                 ret = target_set;
8982             }
8983         }
8984         return ret;
8985 #endif
8986 #ifdef TARGET_NR_ssetmask /* not on alpha */
8987     case TARGET_NR_ssetmask:
8988         {
8989             sigset_t set, oset;
8990             abi_ulong target_set = arg1;
8991             target_to_host_old_sigset(&set, &target_set);
8992             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8993             if (!ret) {
8994                 host_to_target_old_sigset(&target_set, &oset);
8995                 ret = target_set;
8996             }
8997         }
8998         return ret;
8999 #endif
9000 #ifdef TARGET_NR_sigprocmask
9001     case TARGET_NR_sigprocmask:
9002         {
9003 #if defined(TARGET_ALPHA)
9004             sigset_t set, oldset;
9005             abi_ulong mask;
9006             int how;
9007 
9008             switch (arg1) {
9009             case TARGET_SIG_BLOCK:
9010                 how = SIG_BLOCK;
9011                 break;
9012             case TARGET_SIG_UNBLOCK:
9013                 how = SIG_UNBLOCK;
9014                 break;
9015             case TARGET_SIG_SETMASK:
9016                 how = SIG_SETMASK;
9017                 break;
9018             default:
9019                 return -TARGET_EINVAL;
9020             }
9021             mask = arg2;
9022             target_to_host_old_sigset(&set, &mask);
9023 
9024             ret = do_sigprocmask(how, &set, &oldset);
9025             if (!is_error(ret)) {
9026                 host_to_target_old_sigset(&mask, &oldset);
9027                 ret = mask;
9028                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9029             }
9030 #else
9031             sigset_t set, oldset, *set_ptr;
9032             int how;
9033 
9034             if (arg2) {
9035                 switch (arg1) {
9036                 case TARGET_SIG_BLOCK:
9037                     how = SIG_BLOCK;
9038                     break;
9039                 case TARGET_SIG_UNBLOCK:
9040                     how = SIG_UNBLOCK;
9041                     break;
9042                 case TARGET_SIG_SETMASK:
9043                     how = SIG_SETMASK;
9044                     break;
9045                 default:
9046                     return -TARGET_EINVAL;
9047                 }
9048                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9049                     return -TARGET_EFAULT;
9050                 target_to_host_old_sigset(&set, p);
9051                 unlock_user(p, arg2, 0);
9052                 set_ptr = &set;
9053             } else {
9054                 how = 0;
9055                 set_ptr = NULL;
9056             }
9057             ret = do_sigprocmask(how, set_ptr, &oldset);
9058             if (!is_error(ret) && arg3) {
9059                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9060                     return -TARGET_EFAULT;
9061                 host_to_target_old_sigset(p, &oldset);
9062                 unlock_user(p, arg3, sizeof(target_sigset_t));
9063             }
9064 #endif
9065         }
9066         return ret;
9067 #endif
9068     case TARGET_NR_rt_sigprocmask:
9069         {
9070             int how = arg1;
9071             sigset_t set, oldset, *set_ptr;
9072 
9073             if (arg4 != sizeof(target_sigset_t)) {
9074                 return -TARGET_EINVAL;
9075             }
9076 
9077             if (arg2) {
9078                 switch(how) {
9079                 case TARGET_SIG_BLOCK:
9080                     how = SIG_BLOCK;
9081                     break;
9082                 case TARGET_SIG_UNBLOCK:
9083                     how = SIG_UNBLOCK;
9084                     break;
9085                 case TARGET_SIG_SETMASK:
9086                     how = SIG_SETMASK;
9087                     break;
9088                 default:
9089                     return -TARGET_EINVAL;
9090                 }
9091                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9092                     return -TARGET_EFAULT;
9093                 target_to_host_sigset(&set, p);
9094                 unlock_user(p, arg2, 0);
9095                 set_ptr = &set;
9096             } else {
9097                 how = 0;
9098                 set_ptr = NULL;
9099             }
9100             ret = do_sigprocmask(how, set_ptr, &oldset);
9101             if (!is_error(ret) && arg3) {
9102                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9103                     return -TARGET_EFAULT;
9104                 host_to_target_sigset(p, &oldset);
9105                 unlock_user(p, arg3, sizeof(target_sigset_t));
9106             }
9107         }
9108         return ret;
9109 #ifdef TARGET_NR_sigpending
9110     case TARGET_NR_sigpending:
9111         {
9112             sigset_t set;
9113             ret = get_errno(sigpending(&set));
9114             if (!is_error(ret)) {
9115                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9116                     return -TARGET_EFAULT;
9117                 host_to_target_old_sigset(p, &set);
9118                 unlock_user(p, arg1, sizeof(target_sigset_t));
9119             }
9120         }
9121         return ret;
9122 #endif
9123     case TARGET_NR_rt_sigpending:
9124         {
9125             sigset_t set;
9126 
9127             /* Yes, this check is >, not != like most. We follow the kernel's
9128              * logic and it does it like this because it implements
9129              * NR_sigpending through the same code path, and in that case
9130              * the old_sigset_t is smaller in size.
9131              */
9132             if (arg2 > sizeof(target_sigset_t)) {
9133                 return -TARGET_EINVAL;
9134             }
9135 
9136             ret = get_errno(sigpending(&set));
9137             if (!is_error(ret)) {
9138                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9139                     return -TARGET_EFAULT;
9140                 host_to_target_sigset(p, &set);
9141                 unlock_user(p, arg1, sizeof(target_sigset_t));
9142             }
9143         }
9144         return ret;
9145 #ifdef TARGET_NR_sigsuspend
9146     case TARGET_NR_sigsuspend:
9147         {
9148             TaskState *ts = cpu->opaque;
9149 #if defined(TARGET_ALPHA)
9150             abi_ulong mask = arg1;
9151             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9152 #else
9153             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9154                 return -TARGET_EFAULT;
9155             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9156             unlock_user(p, arg1, 0);
9157 #endif
9158             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9159                                                SIGSET_T_SIZE));
9160             if (ret != -TARGET_ERESTARTSYS) {
9161                 ts->in_sigsuspend = 1;
9162             }
9163         }
9164         return ret;
9165 #endif
9166     case TARGET_NR_rt_sigsuspend:
9167         {
9168             TaskState *ts = cpu->opaque;
9169 
9170             if (arg2 != sizeof(target_sigset_t)) {
9171                 return -TARGET_EINVAL;
9172             }
9173             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9174                 return -TARGET_EFAULT;
9175             target_to_host_sigset(&ts->sigsuspend_mask, p);
9176             unlock_user(p, arg1, 0);
9177             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9178                                                SIGSET_T_SIZE));
9179             if (ret != -TARGET_ERESTARTSYS) {
9180                 ts->in_sigsuspend = 1;
9181             }
9182         }
9183         return ret;
9184     case TARGET_NR_rt_sigtimedwait:
9185         {
9186             sigset_t set;
9187             struct timespec uts, *puts;
9188             siginfo_t uinfo;
9189 
9190             if (arg4 != sizeof(target_sigset_t)) {
9191                 return -TARGET_EINVAL;
9192             }
9193 
9194             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9195                 return -TARGET_EFAULT;
9196             target_to_host_sigset(&set, p);
9197             unlock_user(p, arg1, 0);
9198             if (arg3) {
9199                 puts = &uts;
9200                 target_to_host_timespec(puts, arg3);
9201             } else {
9202                 puts = NULL;
9203             }
9204             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9205                                                  SIGSET_T_SIZE));
9206             if (!is_error(ret)) {
9207                 if (arg2) {
9208                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9209                                   0);
9210                     if (!p) {
9211                         return -TARGET_EFAULT;
9212                     }
9213                     host_to_target_siginfo(p, &uinfo);
9214                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9215                 }
9216                 ret = host_to_target_signal(ret);
9217             }
9218         }
9219         return ret;
9220     case TARGET_NR_rt_sigqueueinfo:
9221         {
9222             siginfo_t uinfo;
9223 
9224             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9225             if (!p) {
9226                 return -TARGET_EFAULT;
9227             }
9228             target_to_host_siginfo(&uinfo, p);
9229             unlock_user(p, arg3, 0);
9230             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9231         }
9232         return ret;
9233     case TARGET_NR_rt_tgsigqueueinfo:
9234         {
9235             siginfo_t uinfo;
9236 
9237             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9238             if (!p) {
9239                 return -TARGET_EFAULT;
9240             }
9241             target_to_host_siginfo(&uinfo, p);
9242             unlock_user(p, arg4, 0);
9243             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9244         }
9245         return ret;
9246 #ifdef TARGET_NR_sigreturn
9247     case TARGET_NR_sigreturn:
9248         if (block_signals()) {
9249             return -TARGET_ERESTARTSYS;
9250         }
9251         return do_sigreturn(cpu_env);
9252 #endif
9253     case TARGET_NR_rt_sigreturn:
9254         if (block_signals()) {
9255             return -TARGET_ERESTARTSYS;
9256         }
9257         return do_rt_sigreturn(cpu_env);
9258     case TARGET_NR_sethostname:
9259         if (!(p = lock_user_string(arg1)))
9260             return -TARGET_EFAULT;
9261         ret = get_errno(sethostname(p, arg2));
9262         unlock_user(p, arg1, 0);
9263         return ret;
9264 #ifdef TARGET_NR_setrlimit
9265     case TARGET_NR_setrlimit:
9266         {
9267             int resource = target_to_host_resource(arg1);
9268             struct target_rlimit *target_rlim;
9269             struct rlimit rlim;
9270             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9271                 return -TARGET_EFAULT;
9272             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9273             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9274             unlock_user_struct(target_rlim, arg2, 0);
9275             return get_errno(setrlimit(resource, &rlim));
9276         }
9277 #endif
9278 #ifdef TARGET_NR_getrlimit
9279     case TARGET_NR_getrlimit:
9280         {
9281             int resource = target_to_host_resource(arg1);
9282             struct target_rlimit *target_rlim;
9283             struct rlimit rlim;
9284 
9285             ret = get_errno(getrlimit(resource, &rlim));
9286             if (!is_error(ret)) {
9287                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9288                     return -TARGET_EFAULT;
9289                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9290                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9291                 unlock_user_struct(target_rlim, arg2, 1);
9292             }
9293         }
9294         return ret;
9295 #endif
9296     case TARGET_NR_getrusage:
9297         {
9298             struct rusage rusage;
9299             ret = get_errno(getrusage(arg1, &rusage));
9300             if (!is_error(ret)) {
9301                 ret = host_to_target_rusage(arg2, &rusage);
9302             }
9303         }
9304         return ret;
9305     case TARGET_NR_gettimeofday:
9306         {
9307             struct timeval tv;
9308             ret = get_errno(gettimeofday(&tv, NULL));
9309             if (!is_error(ret)) {
9310                 if (copy_to_user_timeval(arg1, &tv))
9311                     return -TARGET_EFAULT;
9312             }
9313         }
9314         return ret;
9315     case TARGET_NR_settimeofday:
9316         {
9317             struct timeval tv, *ptv = NULL;
9318             struct timezone tz, *ptz = NULL;
9319 
9320             if (arg1) {
9321                 if (copy_from_user_timeval(&tv, arg1)) {
9322                     return -TARGET_EFAULT;
9323                 }
9324                 ptv = &tv;
9325             }
9326 
9327             if (arg2) {
9328                 if (copy_from_user_timezone(&tz, arg2)) {
9329                     return -TARGET_EFAULT;
9330                 }
9331                 ptz = &tz;
9332             }
9333 
9334             return get_errno(settimeofday(ptv, ptz));
9335         }
9336 #if defined(TARGET_NR_select)
9337     case TARGET_NR_select:
9338 #if defined(TARGET_WANT_NI_OLD_SELECT)
9339         /* some architectures used to have old_select here
9340          * but now ENOSYS it.
9341          */
9342         ret = -TARGET_ENOSYS;
9343 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9344         ret = do_old_select(arg1);
9345 #else
9346         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9347 #endif
9348         return ret;
9349 #endif
9350 #ifdef TARGET_NR_pselect6
9351     case TARGET_NR_pselect6:
9352         {
9353             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9354             fd_set rfds, wfds, efds;
9355             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9356             struct timespec ts, *ts_ptr;
9357 
9358             /*
9359              * The 6th arg is actually two args smashed together,
9360              * so we cannot use the C library.
9361              */
9362             sigset_t set;
9363             struct {
9364                 sigset_t *set;
9365                 size_t size;
9366             } sig, *sig_ptr;
9367 
9368             abi_ulong arg_sigset, arg_sigsize, *arg7;
9369             target_sigset_t *target_sigset;
9370 
9371             n = arg1;
9372             rfd_addr = arg2;
9373             wfd_addr = arg3;
9374             efd_addr = arg4;
9375             ts_addr = arg5;
9376 
9377             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9378             if (ret) {
9379                 return ret;
9380             }
9381             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9382             if (ret) {
9383                 return ret;
9384             }
9385             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9386             if (ret) {
9387                 return ret;
9388             }
9389 
9390             /*
9391              * This takes a timespec, and not a timeval, so we cannot
9392              * use the do_select() helper ...
9393              */
9394             if (ts_addr) {
9395                 if (target_to_host_timespec(&ts, ts_addr)) {
9396                     return -TARGET_EFAULT;
9397                 }
9398                 ts_ptr = &ts;
9399             } else {
9400                 ts_ptr = NULL;
9401             }
9402 
9403             /* Extract the two packed args for the sigset */
9404             if (arg6) {
9405                 sig_ptr = &sig;
9406                 sig.size = SIGSET_T_SIZE;
9407 
9408                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9409                 if (!arg7) {
9410                     return -TARGET_EFAULT;
9411                 }
9412                 arg_sigset = tswapal(arg7[0]);
9413                 arg_sigsize = tswapal(arg7[1]);
9414                 unlock_user(arg7, arg6, 0);
9415 
9416                 if (arg_sigset) {
9417                     sig.set = &set;
9418                     if (arg_sigsize != sizeof(*target_sigset)) {
9419                         /* Like the kernel, we enforce correct size sigsets */
9420                         return -TARGET_EINVAL;
9421                     }
9422                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
9423                                               sizeof(*target_sigset), 1);
9424                     if (!target_sigset) {
9425                         return -TARGET_EFAULT;
9426                     }
9427                     target_to_host_sigset(&set, target_sigset);
9428                     unlock_user(target_sigset, arg_sigset, 0);
9429                 } else {
9430                     sig.set = NULL;
9431                 }
9432             } else {
9433                 sig_ptr = NULL;
9434             }
9435 
9436             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9437                                           ts_ptr, sig_ptr));
9438 
9439             if (!is_error(ret)) {
9440                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9441                     return -TARGET_EFAULT;
9442                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9443                     return -TARGET_EFAULT;
9444                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9445                     return -TARGET_EFAULT;
9446 
9447                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9448                     return -TARGET_EFAULT;
9449             }
9450         }
9451         return ret;
9452 #endif
9453 #ifdef TARGET_NR_symlink
9454     case TARGET_NR_symlink:
9455         {
9456             void *p2;
9457             p = lock_user_string(arg1);
9458             p2 = lock_user_string(arg2);
9459             if (!p || !p2)
9460                 ret = -TARGET_EFAULT;
9461             else
9462                 ret = get_errno(symlink(p, p2));
9463             unlock_user(p2, arg2, 0);
9464             unlock_user(p, arg1, 0);
9465         }
9466         return ret;
9467 #endif
9468 #if defined(TARGET_NR_symlinkat)
9469     case TARGET_NR_symlinkat:
9470         {
9471             void *p2;
9472             p  = lock_user_string(arg1);
9473             p2 = lock_user_string(arg3);
9474             if (!p || !p2)
9475                 ret = -TARGET_EFAULT;
9476             else
9477                 ret = get_errno(symlinkat(p, arg2, p2));
9478             unlock_user(p2, arg3, 0);
9479             unlock_user(p, arg1, 0);
9480         }
9481         return ret;
9482 #endif
9483 #ifdef TARGET_NR_readlink
9484     case TARGET_NR_readlink:
9485         {
9486             void *p2;
9487             p = lock_user_string(arg1);
9488             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9489             if (!p || !p2) {
9490                 ret = -TARGET_EFAULT;
9491             } else if (!arg3) {
9492                 /* Short circuit this for the magic exe check. */
9493                 ret = -TARGET_EINVAL;
9494             } else if (is_proc_myself((const char *)p, "exe")) {
9495                 char real[PATH_MAX], *temp;
9496                 temp = realpath(exec_path, real);
9497                 /* Return value is # of bytes that we wrote to the buffer. */
9498                 if (temp == NULL) {
9499                     ret = get_errno(-1);
9500                 } else {
9501                     /* Don't worry about sign mismatch as earlier mapping
9502                      * logic would have thrown a bad address error. */
9503                     ret = MIN(strlen(real), arg3);
9504                     /* We cannot NUL terminate the string. */
9505                     memcpy(p2, real, ret);
9506                 }
9507             } else {
9508                 ret = get_errno(readlink(path(p), p2, arg3));
9509             }
9510             unlock_user(p2, arg2, ret);
9511             unlock_user(p, arg1, 0);
9512         }
9513         return ret;
9514 #endif
9515 #if defined(TARGET_NR_readlinkat)
9516     case TARGET_NR_readlinkat:
9517         {
9518             void *p2;
9519             p  = lock_user_string(arg2);
9520             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9521             if (!p || !p2) {
9522                 ret = -TARGET_EFAULT;
9523             } else if (is_proc_myself((const char *)p, "exe")) {
9524                 char real[PATH_MAX], *temp;
9525                 temp = realpath(exec_path, real);
9526                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9527                 snprintf((char *)p2, arg4, "%s", real);
9528             } else {
9529                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9530             }
9531             unlock_user(p2, arg3, ret);
9532             unlock_user(p, arg2, 0);
9533         }
9534         return ret;
9535 #endif
9536 #ifdef TARGET_NR_swapon
9537     case TARGET_NR_swapon:
9538         if (!(p = lock_user_string(arg1)))
9539             return -TARGET_EFAULT;
9540         ret = get_errno(swapon(p, arg2));
9541         unlock_user(p, arg1, 0);
9542         return ret;
9543 #endif
9544     case TARGET_NR_reboot:
9545         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9546            /* arg4 must be ignored in all other cases */
9547            p = lock_user_string(arg4);
9548            if (!p) {
9549                return -TARGET_EFAULT;
9550            }
9551            ret = get_errno(reboot(arg1, arg2, arg3, p));
9552            unlock_user(p, arg4, 0);
9553         } else {
9554            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9555         }
9556         return ret;
9557 #ifdef TARGET_NR_mmap
9558     case TARGET_NR_mmap:
9559 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9560     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9561     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9562     || defined(TARGET_S390X)
9563         {
9564             abi_ulong *v;
9565             abi_ulong v1, v2, v3, v4, v5, v6;
9566             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9567                 return -TARGET_EFAULT;
9568             v1 = tswapal(v[0]);
9569             v2 = tswapal(v[1]);
9570             v3 = tswapal(v[2]);
9571             v4 = tswapal(v[3]);
9572             v5 = tswapal(v[4]);
9573             v6 = tswapal(v[5]);
9574             unlock_user(v, arg1, 0);
9575             ret = get_errno(target_mmap(v1, v2, v3,
9576                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9577                                         v5, v6));
9578         }
9579 #else
9580         ret = get_errno(target_mmap(arg1, arg2, arg3,
9581                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9582                                     arg5,
9583                                     arg6));
9584 #endif
9585         return ret;
9586 #endif
9587 #ifdef TARGET_NR_mmap2
9588     case TARGET_NR_mmap2:
9589 #ifndef MMAP_SHIFT
9590 #define MMAP_SHIFT 12
9591 #endif
9592         ret = target_mmap(arg1, arg2, arg3,
9593                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9594                           arg5, arg6 << MMAP_SHIFT);
9595         return get_errno(ret);
9596 #endif
9597     case TARGET_NR_munmap:
9598         return get_errno(target_munmap(arg1, arg2));
9599     case TARGET_NR_mprotect:
9600         {
9601             TaskState *ts = cpu->opaque;
9602             /* Special hack to detect libc making the stack executable.  */
9603             if ((arg3 & PROT_GROWSDOWN)
9604                 && arg1 >= ts->info->stack_limit
9605                 && arg1 <= ts->info->start_stack) {
9606                 arg3 &= ~PROT_GROWSDOWN;
9607                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9608                 arg1 = ts->info->stack_limit;
9609             }
9610         }
9611         return get_errno(target_mprotect(arg1, arg2, arg3));
9612 #ifdef TARGET_NR_mremap
9613     case TARGET_NR_mremap:
9614         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9615 #endif
9616         /* ??? msync/mlock/munlock are broken for softmmu.  */
9617 #ifdef TARGET_NR_msync
9618     case TARGET_NR_msync:
9619         return get_errno(msync(g2h(arg1), arg2, arg3));
9620 #endif
9621 #ifdef TARGET_NR_mlock
9622     case TARGET_NR_mlock:
9623         return get_errno(mlock(g2h(arg1), arg2));
9624 #endif
9625 #ifdef TARGET_NR_munlock
9626     case TARGET_NR_munlock:
9627         return get_errno(munlock(g2h(arg1), arg2));
9628 #endif
9629 #ifdef TARGET_NR_mlockall
9630     case TARGET_NR_mlockall:
9631         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9632 #endif
9633 #ifdef TARGET_NR_munlockall
9634     case TARGET_NR_munlockall:
9635         return get_errno(munlockall());
9636 #endif
9637 #ifdef TARGET_NR_truncate
9638     case TARGET_NR_truncate:
9639         if (!(p = lock_user_string(arg1)))
9640             return -TARGET_EFAULT;
9641         ret = get_errno(truncate(p, arg2));
9642         unlock_user(p, arg1, 0);
9643         return ret;
9644 #endif
9645 #ifdef TARGET_NR_ftruncate
9646     case TARGET_NR_ftruncate:
9647         return get_errno(ftruncate(arg1, arg2));
9648 #endif
9649     case TARGET_NR_fchmod:
9650         return get_errno(fchmod(arg1, arg2));
9651 #if defined(TARGET_NR_fchmodat)
9652     case TARGET_NR_fchmodat:
9653         if (!(p = lock_user_string(arg2)))
9654             return -TARGET_EFAULT;
9655         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9656         unlock_user(p, arg2, 0);
9657         return ret;
9658 #endif
9659     case TARGET_NR_getpriority:
9660         /* Note that negative values are valid for getpriority, so we must
9661            differentiate based on errno settings.  */
9662         errno = 0;
9663         ret = getpriority(arg1, arg2);
9664         if (ret == -1 && errno != 0) {
9665             return -host_to_target_errno(errno);
9666         }
9667 #ifdef TARGET_ALPHA
9668         /* Return value is the unbiased priority.  Signal no error.  */
9669         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9670 #else
9671         /* Return value is a biased priority to avoid negative numbers.  */
9672         ret = 20 - ret;
9673 #endif
9674         return ret;
9675     case TARGET_NR_setpriority:
9676         return get_errno(setpriority(arg1, arg2, arg3));
9677 #ifdef TARGET_NR_statfs
9678     case TARGET_NR_statfs:
9679         if (!(p = lock_user_string(arg1))) {
9680             return -TARGET_EFAULT;
9681         }
9682         ret = get_errno(statfs(path(p), &stfs));
9683         unlock_user(p, arg1, 0);
9684     convert_statfs:
9685         if (!is_error(ret)) {
9686             struct target_statfs *target_stfs;
9687 
9688             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9689                 return -TARGET_EFAULT;
9690             __put_user(stfs.f_type, &target_stfs->f_type);
9691             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9692             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9693             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9694             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9695             __put_user(stfs.f_files, &target_stfs->f_files);
9696             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9697             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9698             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9699             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9700             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9701 #ifdef _STATFS_F_FLAGS
9702             __put_user(stfs.f_flags, &target_stfs->f_flags);
9703 #else
9704             __put_user(0, &target_stfs->f_flags);
9705 #endif
9706             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9707             unlock_user_struct(target_stfs, arg2, 1);
9708         }
9709         return ret;
9710 #endif
9711 #ifdef TARGET_NR_fstatfs
9712     case TARGET_NR_fstatfs:
9713         ret = get_errno(fstatfs(arg1, &stfs));
9714         goto convert_statfs;
9715 #endif
9716 #ifdef TARGET_NR_statfs64
9717     case TARGET_NR_statfs64:
9718         if (!(p = lock_user_string(arg1))) {
9719             return -TARGET_EFAULT;
9720         }
9721         ret = get_errno(statfs(path(p), &stfs));
9722         unlock_user(p, arg1, 0);
9723     convert_statfs64:
9724         if (!is_error(ret)) {
9725             struct target_statfs64 *target_stfs;
9726 
9727             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9728                 return -TARGET_EFAULT;
9729             __put_user(stfs.f_type, &target_stfs->f_type);
9730             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9731             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9732             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9733             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9734             __put_user(stfs.f_files, &target_stfs->f_files);
9735             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9736             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9737             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9738             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9739             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9740             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9741             unlock_user_struct(target_stfs, arg3, 1);
9742         }
9743         return ret;
9744     case TARGET_NR_fstatfs64:
9745         ret = get_errno(fstatfs(arg1, &stfs));
9746         goto convert_statfs64;
9747 #endif
9748 #ifdef TARGET_NR_socketcall
9749     case TARGET_NR_socketcall:
9750         return do_socketcall(arg1, arg2);
9751 #endif
9752 #ifdef TARGET_NR_accept
9753     case TARGET_NR_accept:
9754         return do_accept4(arg1, arg2, arg3, 0);
9755 #endif
9756 #ifdef TARGET_NR_accept4
9757     case TARGET_NR_accept4:
9758         return do_accept4(arg1, arg2, arg3, arg4);
9759 #endif
9760 #ifdef TARGET_NR_bind
9761     case TARGET_NR_bind:
9762         return do_bind(arg1, arg2, arg3);
9763 #endif
9764 #ifdef TARGET_NR_connect
9765     case TARGET_NR_connect:
9766         return do_connect(arg1, arg2, arg3);
9767 #endif
9768 #ifdef TARGET_NR_getpeername
9769     case TARGET_NR_getpeername:
9770         return do_getpeername(arg1, arg2, arg3);
9771 #endif
9772 #ifdef TARGET_NR_getsockname
9773     case TARGET_NR_getsockname:
9774         return do_getsockname(arg1, arg2, arg3);
9775 #endif
9776 #ifdef TARGET_NR_getsockopt
9777     case TARGET_NR_getsockopt:
9778         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9779 #endif
9780 #ifdef TARGET_NR_listen
9781     case TARGET_NR_listen:
9782         return get_errno(listen(arg1, arg2));
9783 #endif
9784 #ifdef TARGET_NR_recv
9785     case TARGET_NR_recv:
9786         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9787 #endif
9788 #ifdef TARGET_NR_recvfrom
9789     case TARGET_NR_recvfrom:
9790         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9791 #endif
9792 #ifdef TARGET_NR_recvmsg
9793     case TARGET_NR_recvmsg:
9794         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9795 #endif
9796 #ifdef TARGET_NR_send
9797     case TARGET_NR_send:
9798         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9799 #endif
9800 #ifdef TARGET_NR_sendmsg
9801     case TARGET_NR_sendmsg:
9802         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9803 #endif
9804 #ifdef TARGET_NR_sendmmsg
9805     case TARGET_NR_sendmmsg:
9806         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9807     case TARGET_NR_recvmmsg:
9808         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9809 #endif
9810 #ifdef TARGET_NR_sendto
9811     case TARGET_NR_sendto:
9812         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9813 #endif
9814 #ifdef TARGET_NR_shutdown
9815     case TARGET_NR_shutdown:
9816         return get_errno(shutdown(arg1, arg2));
9817 #endif
9818 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9819     case TARGET_NR_getrandom:
9820         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9821         if (!p) {
9822             return -TARGET_EFAULT;
9823         }
9824         ret = get_errno(getrandom(p, arg2, arg3));
9825         unlock_user(p, arg1, ret);
9826         return ret;
9827 #endif
9828 #ifdef TARGET_NR_socket
9829     case TARGET_NR_socket:
9830         return do_socket(arg1, arg2, arg3);
9831 #endif
9832 #ifdef TARGET_NR_socketpair
9833     case TARGET_NR_socketpair:
9834         return do_socketpair(arg1, arg2, arg3, arg4);
9835 #endif
9836 #ifdef TARGET_NR_setsockopt
9837     case TARGET_NR_setsockopt:
9838         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9839 #endif
9840 #if defined(TARGET_NR_syslog)
9841     case TARGET_NR_syslog:
9842         {
9843             int len = arg2;
9844 
9845             switch (arg1) {
9846             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9847             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9848             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9849             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9850             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9851             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9852             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9853             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9854                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9855             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9856             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9857             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9858                 {
9859                     if (len < 0) {
9860                         return -TARGET_EINVAL;
9861                     }
9862                     if (len == 0) {
9863                         return 0;
9864                     }
9865                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9866                     if (!p) {
9867                         return -TARGET_EFAULT;
9868                     }
9869                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9870                     unlock_user(p, arg2, arg3);
9871                 }
9872                 return ret;
9873             default:
9874                 return -TARGET_EINVAL;
9875             }
9876         }
9877         break;
9878 #endif
9879     case TARGET_NR_setitimer:
9880         {
9881             struct itimerval value, ovalue, *pvalue;
9882 
9883             if (arg2) {
9884                 pvalue = &value;
9885                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9886                     || copy_from_user_timeval(&pvalue->it_value,
9887                                               arg2 + sizeof(struct target_timeval)))
9888                     return -TARGET_EFAULT;
9889             } else {
9890                 pvalue = NULL;
9891             }
9892             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9893             if (!is_error(ret) && arg3) {
9894                 if (copy_to_user_timeval(arg3,
9895                                          &ovalue.it_interval)
9896                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9897                                             &ovalue.it_value))
9898                     return -TARGET_EFAULT;
9899             }
9900         }
9901         return ret;
9902     case TARGET_NR_getitimer:
9903         {
9904             struct itimerval value;
9905 
9906             ret = get_errno(getitimer(arg1, &value));
9907             if (!is_error(ret) && arg2) {
9908                 if (copy_to_user_timeval(arg2,
9909                                          &value.it_interval)
9910                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9911                                             &value.it_value))
9912                     return -TARGET_EFAULT;
9913             }
9914         }
9915         return ret;
9916 #ifdef TARGET_NR_stat
9917     case TARGET_NR_stat:
9918         if (!(p = lock_user_string(arg1))) {
9919             return -TARGET_EFAULT;
9920         }
9921         ret = get_errno(stat(path(p), &st));
9922         unlock_user(p, arg1, 0);
9923         goto do_stat;
9924 #endif
9925 #ifdef TARGET_NR_lstat
9926     case TARGET_NR_lstat:
9927         if (!(p = lock_user_string(arg1))) {
9928             return -TARGET_EFAULT;
9929         }
9930         ret = get_errno(lstat(path(p), &st));
9931         unlock_user(p, arg1, 0);
9932         goto do_stat;
9933 #endif
9934 #ifdef TARGET_NR_fstat
9935     case TARGET_NR_fstat:
9936         {
9937             ret = get_errno(fstat(arg1, &st));
9938 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9939         do_stat:
9940 #endif
9941             if (!is_error(ret)) {
9942                 struct target_stat *target_st;
9943 
9944                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9945                     return -TARGET_EFAULT;
9946                 memset(target_st, 0, sizeof(*target_st));
9947                 __put_user(st.st_dev, &target_st->st_dev);
9948                 __put_user(st.st_ino, &target_st->st_ino);
9949                 __put_user(st.st_mode, &target_st->st_mode);
9950                 __put_user(st.st_uid, &target_st->st_uid);
9951                 __put_user(st.st_gid, &target_st->st_gid);
9952                 __put_user(st.st_nlink, &target_st->st_nlink);
9953                 __put_user(st.st_rdev, &target_st->st_rdev);
9954                 __put_user(st.st_size, &target_st->st_size);
9955                 __put_user(st.st_blksize, &target_st->st_blksize);
9956                 __put_user(st.st_blocks, &target_st->st_blocks);
9957                 __put_user(st.st_atime, &target_st->target_st_atime);
9958                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9959                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9960                 unlock_user_struct(target_st, arg2, 1);
9961             }
9962         }
9963         return ret;
9964 #endif
9965     case TARGET_NR_vhangup:
9966         return get_errno(vhangup());
9967 #ifdef TARGET_NR_syscall
9968     case TARGET_NR_syscall:
9969         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9970                           arg6, arg7, arg8, 0);
9971 #endif
9972     case TARGET_NR_wait4:
9973         {
9974             int status;
9975             abi_long status_ptr = arg2;
9976             struct rusage rusage, *rusage_ptr;
9977             abi_ulong target_rusage = arg4;
9978             abi_long rusage_err;
9979             if (target_rusage)
9980                 rusage_ptr = &rusage;
9981             else
9982                 rusage_ptr = NULL;
9983             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9984             if (!is_error(ret)) {
9985                 if (status_ptr && ret) {
9986                     status = host_to_target_waitstatus(status);
9987                     if (put_user_s32(status, status_ptr))
9988                         return -TARGET_EFAULT;
9989                 }
9990                 if (target_rusage) {
9991                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9992                     if (rusage_err) {
9993                         ret = rusage_err;
9994                     }
9995                 }
9996             }
9997         }
9998         return ret;
9999 #ifdef TARGET_NR_swapoff
10000     case TARGET_NR_swapoff:
10001         if (!(p = lock_user_string(arg1)))
10002             return -TARGET_EFAULT;
10003         ret = get_errno(swapoff(p));
10004         unlock_user(p, arg1, 0);
10005         return ret;
10006 #endif
10007     case TARGET_NR_sysinfo:
10008         {
10009             struct target_sysinfo *target_value;
10010             struct sysinfo value;
10011             ret = get_errno(sysinfo(&value));
10012             if (!is_error(ret) && arg1)
10013             {
10014                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10015                     return -TARGET_EFAULT;
10016                 __put_user(value.uptime, &target_value->uptime);
10017                 __put_user(value.loads[0], &target_value->loads[0]);
10018                 __put_user(value.loads[1], &target_value->loads[1]);
10019                 __put_user(value.loads[2], &target_value->loads[2]);
10020                 __put_user(value.totalram, &target_value->totalram);
10021                 __put_user(value.freeram, &target_value->freeram);
10022                 __put_user(value.sharedram, &target_value->sharedram);
10023                 __put_user(value.bufferram, &target_value->bufferram);
10024                 __put_user(value.totalswap, &target_value->totalswap);
10025                 __put_user(value.freeswap, &target_value->freeswap);
10026                 __put_user(value.procs, &target_value->procs);
10027                 __put_user(value.totalhigh, &target_value->totalhigh);
10028                 __put_user(value.freehigh, &target_value->freehigh);
10029                 __put_user(value.mem_unit, &target_value->mem_unit);
10030                 unlock_user_struct(target_value, arg1, 1);
10031             }
10032         }
10033         return ret;
10034 #ifdef TARGET_NR_ipc
10035     case TARGET_NR_ipc:
10036         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10037 #endif
10038 #ifdef TARGET_NR_semget
10039     case TARGET_NR_semget:
10040         return get_errno(semget(arg1, arg2, arg3));
10041 #endif
10042 #ifdef TARGET_NR_semop
10043     case TARGET_NR_semop:
10044         return do_semop(arg1, arg2, arg3);
10045 #endif
10046 #ifdef TARGET_NR_semctl
10047     case TARGET_NR_semctl:
10048         return do_semctl(arg1, arg2, arg3, arg4);
10049 #endif
10050 #ifdef TARGET_NR_msgctl
10051     case TARGET_NR_msgctl:
10052         return do_msgctl(arg1, arg2, arg3);
10053 #endif
10054 #ifdef TARGET_NR_msgget
10055     case TARGET_NR_msgget:
10056         return get_errno(msgget(arg1, arg2));
10057 #endif
10058 #ifdef TARGET_NR_msgrcv
10059     case TARGET_NR_msgrcv:
10060         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10061 #endif
10062 #ifdef TARGET_NR_msgsnd
10063     case TARGET_NR_msgsnd:
10064         return do_msgsnd(arg1, arg2, arg3, arg4);
10065 #endif
10066 #ifdef TARGET_NR_shmget
10067     case TARGET_NR_shmget:
10068         return get_errno(shmget(arg1, arg2, arg3));
10069 #endif
10070 #ifdef TARGET_NR_shmctl
10071     case TARGET_NR_shmctl:
10072         return do_shmctl(arg1, arg2, arg3);
10073 #endif
10074 #ifdef TARGET_NR_shmat
10075     case TARGET_NR_shmat:
10076         return do_shmat(cpu_env, arg1, arg2, arg3);
10077 #endif
10078 #ifdef TARGET_NR_shmdt
10079     case TARGET_NR_shmdt:
10080         return do_shmdt(arg1);
10081 #endif
10082     case TARGET_NR_fsync:
10083         return get_errno(fsync(arg1));
10084     case TARGET_NR_clone:
10085         /* Linux manages to have three different orderings for its
10086          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10087          * match the kernel's CONFIG_CLONE_* settings.
10088          * Microblaze is further special in that it uses a sixth
10089          * implicit argument to clone for the TLS pointer.
10090          */
10091 #if defined(TARGET_MICROBLAZE)
10092         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10093 #elif defined(TARGET_CLONE_BACKWARDS)
10094         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10095 #elif defined(TARGET_CLONE_BACKWARDS2)
10096         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10097 #else
10098         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10099 #endif
10100         return ret;
10101 #ifdef __NR_exit_group
10102         /* new thread calls */
10103     case TARGET_NR_exit_group:
10104         preexit_cleanup(cpu_env, arg1);
10105         return get_errno(exit_group(arg1));
10106 #endif
10107     case TARGET_NR_setdomainname:
10108         if (!(p = lock_user_string(arg1)))
10109             return -TARGET_EFAULT;
10110         ret = get_errno(setdomainname(p, arg2));
10111         unlock_user(p, arg1, 0);
10112         return ret;
10113     case TARGET_NR_uname:
10114         /* no need to transcode because we use the linux syscall */
10115         {
10116             struct new_utsname * buf;
10117 
10118             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10119                 return -TARGET_EFAULT;
10120             ret = get_errno(sys_uname(buf));
10121             if (!is_error(ret)) {
10122                 /* Overwrite the native machine name with whatever is being
10123                    emulated. */
10124                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10125                           sizeof(buf->machine));
10126                 /* Allow the user to override the reported release.  */
10127                 if (qemu_uname_release && *qemu_uname_release) {
10128                     g_strlcpy(buf->release, qemu_uname_release,
10129                               sizeof(buf->release));
10130                 }
10131             }
10132             unlock_user_struct(buf, arg1, 1);
10133         }
10134         return ret;
10135 #ifdef TARGET_I386
10136     case TARGET_NR_modify_ldt:
10137         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10138 #if !defined(TARGET_X86_64)
10139     case TARGET_NR_vm86:
10140         return do_vm86(cpu_env, arg1, arg2);
10141 #endif
10142 #endif
10143     case TARGET_NR_adjtimex:
10144         {
10145             struct timex host_buf;
10146 
10147             if (target_to_host_timex(&host_buf, arg1) != 0) {
10148                 return -TARGET_EFAULT;
10149             }
10150             ret = get_errno(adjtimex(&host_buf));
10151             if (!is_error(ret)) {
10152                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10153                     return -TARGET_EFAULT;
10154                 }
10155             }
10156         }
10157         return ret;
10158 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10159     case TARGET_NR_clock_adjtime:
10160         {
10161             struct timex htx, *phtx = &htx;
10162 
10163             if (target_to_host_timex(phtx, arg2) != 0) {
10164                 return -TARGET_EFAULT;
10165             }
10166             ret = get_errno(clock_adjtime(arg1, phtx));
10167             if (!is_error(ret) && phtx) {
10168                 if (host_to_target_timex(arg2, phtx) != 0) {
10169                     return -TARGET_EFAULT;
10170                 }
10171             }
10172         }
10173         return ret;
10174 #endif
10175     case TARGET_NR_getpgid:
10176         return get_errno(getpgid(arg1));
10177     case TARGET_NR_fchdir:
10178         return get_errno(fchdir(arg1));
10179     case TARGET_NR_personality:
10180         return get_errno(personality(arg1));
10181 #ifdef TARGET_NR__llseek /* Not on alpha */
10182     case TARGET_NR__llseek:
10183         {
10184             int64_t res;
10185 #if !defined(__NR_llseek)
10186             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10187             if (res == -1) {
10188                 ret = get_errno(res);
10189             } else {
10190                 ret = 0;
10191             }
10192 #else
10193             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10194 #endif
10195             if ((ret == 0) && put_user_s64(res, arg4)) {
10196                 return -TARGET_EFAULT;
10197             }
10198         }
10199         return ret;
10200 #endif
10201 #ifdef TARGET_NR_getdents
10202     case TARGET_NR_getdents:
10203 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10204 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10205         {
10206             struct target_dirent *target_dirp;
10207             struct linux_dirent *dirp;
10208             abi_long count = arg3;
10209 
10210             dirp = g_try_malloc(count);
10211             if (!dirp) {
10212                 return -TARGET_ENOMEM;
10213             }
10214 
10215             ret = get_errno(sys_getdents(arg1, dirp, count));
10216             if (!is_error(ret)) {
10217                 struct linux_dirent *de;
10218 		struct target_dirent *tde;
10219                 int len = ret;
10220                 int reclen, treclen;
10221 		int count1, tnamelen;
10222 
10223 		count1 = 0;
10224                 de = dirp;
10225                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10226                     return -TARGET_EFAULT;
10227 		tde = target_dirp;
10228                 while (len > 0) {
10229                     reclen = de->d_reclen;
10230                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10231                     assert(tnamelen >= 0);
10232                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10233                     assert(count1 + treclen <= count);
10234                     tde->d_reclen = tswap16(treclen);
10235                     tde->d_ino = tswapal(de->d_ino);
10236                     tde->d_off = tswapal(de->d_off);
10237                     memcpy(tde->d_name, de->d_name, tnamelen);
10238                     de = (struct linux_dirent *)((char *)de + reclen);
10239                     len -= reclen;
10240                     tde = (struct target_dirent *)((char *)tde + treclen);
10241 		    count1 += treclen;
10242                 }
10243 		ret = count1;
10244                 unlock_user(target_dirp, arg2, ret);
10245             }
10246             g_free(dirp);
10247         }
10248 #else
10249         {
10250             struct linux_dirent *dirp;
10251             abi_long count = arg3;
10252 
10253             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10254                 return -TARGET_EFAULT;
10255             ret = get_errno(sys_getdents(arg1, dirp, count));
10256             if (!is_error(ret)) {
10257                 struct linux_dirent *de;
10258                 int len = ret;
10259                 int reclen;
10260                 de = dirp;
10261                 while (len > 0) {
10262                     reclen = de->d_reclen;
10263                     if (reclen > len)
10264                         break;
10265                     de->d_reclen = tswap16(reclen);
10266                     tswapls(&de->d_ino);
10267                     tswapls(&de->d_off);
10268                     de = (struct linux_dirent *)((char *)de + reclen);
10269                     len -= reclen;
10270                 }
10271             }
10272             unlock_user(dirp, arg2, ret);
10273         }
10274 #endif
10275 #else
10276         /* Implement getdents in terms of getdents64 */
10277         {
10278             struct linux_dirent64 *dirp;
10279             abi_long count = arg3;
10280 
10281             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10282             if (!dirp) {
10283                 return -TARGET_EFAULT;
10284             }
10285             ret = get_errno(sys_getdents64(arg1, dirp, count));
10286             if (!is_error(ret)) {
10287                 /* Convert the dirent64 structs to target dirent.  We do this
10288                  * in-place, since we can guarantee that a target_dirent is no
10289                  * larger than a dirent64; however this means we have to be
10290                  * careful to read everything before writing in the new format.
10291                  */
10292                 struct linux_dirent64 *de;
10293                 struct target_dirent *tde;
10294                 int len = ret;
10295                 int tlen = 0;
10296 
10297                 de = dirp;
10298                 tde = (struct target_dirent *)dirp;
10299                 while (len > 0) {
10300                     int namelen, treclen;
10301                     int reclen = de->d_reclen;
10302                     uint64_t ino = de->d_ino;
10303                     int64_t off = de->d_off;
10304                     uint8_t type = de->d_type;
10305 
10306                     namelen = strlen(de->d_name);
10307                     treclen = offsetof(struct target_dirent, d_name)
10308                         + namelen + 2;
10309                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10310 
10311                     memmove(tde->d_name, de->d_name, namelen + 1);
10312                     tde->d_ino = tswapal(ino);
10313                     tde->d_off = tswapal(off);
10314                     tde->d_reclen = tswap16(treclen);
10315                     /* The target_dirent type is in what was formerly a padding
10316                      * byte at the end of the structure:
10317                      */
10318                     *(((char *)tde) + treclen - 1) = type;
10319 
10320                     de = (struct linux_dirent64 *)((char *)de + reclen);
10321                     tde = (struct target_dirent *)((char *)tde + treclen);
10322                     len -= reclen;
10323                     tlen += treclen;
10324                 }
10325                 ret = tlen;
10326             }
10327             unlock_user(dirp, arg2, ret);
10328         }
10329 #endif
10330         return ret;
10331 #endif /* TARGET_NR_getdents */
10332 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10333     case TARGET_NR_getdents64:
10334         {
10335             struct linux_dirent64 *dirp;
10336             abi_long count = arg3;
10337             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10338                 return -TARGET_EFAULT;
10339             ret = get_errno(sys_getdents64(arg1, dirp, count));
10340             if (!is_error(ret)) {
10341                 struct linux_dirent64 *de;
10342                 int len = ret;
10343                 int reclen;
10344                 de = dirp;
10345                 while (len > 0) {
10346                     reclen = de->d_reclen;
10347                     if (reclen > len)
10348                         break;
10349                     de->d_reclen = tswap16(reclen);
10350                     tswap64s((uint64_t *)&de->d_ino);
10351                     tswap64s((uint64_t *)&de->d_off);
10352                     de = (struct linux_dirent64 *)((char *)de + reclen);
10353                     len -= reclen;
10354                 }
10355             }
10356             unlock_user(dirp, arg2, ret);
10357         }
10358         return ret;
10359 #endif /* TARGET_NR_getdents64 */
10360 #if defined(TARGET_NR__newselect)
10361     case TARGET_NR__newselect:
10362         return do_select(arg1, arg2, arg3, arg4, arg5);
10363 #endif
10364 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10365 # ifdef TARGET_NR_poll
10366     case TARGET_NR_poll:
10367 # endif
10368 # ifdef TARGET_NR_ppoll
10369     case TARGET_NR_ppoll:
10370 # endif
10371         {
10372             struct target_pollfd *target_pfd;
10373             unsigned int nfds = arg2;
10374             struct pollfd *pfd;
10375             unsigned int i;
10376 
10377             pfd = NULL;
10378             target_pfd = NULL;
10379             if (nfds) {
10380                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10381                     return -TARGET_EINVAL;
10382                 }
10383 
10384                 target_pfd = lock_user(VERIFY_WRITE, arg1,
10385                                        sizeof(struct target_pollfd) * nfds, 1);
10386                 if (!target_pfd) {
10387                     return -TARGET_EFAULT;
10388                 }
10389 
10390                 pfd = alloca(sizeof(struct pollfd) * nfds);
10391                 for (i = 0; i < nfds; i++) {
10392                     pfd[i].fd = tswap32(target_pfd[i].fd);
10393                     pfd[i].events = tswap16(target_pfd[i].events);
10394                 }
10395             }
10396 
10397             switch (num) {
10398 # ifdef TARGET_NR_ppoll
10399             case TARGET_NR_ppoll:
10400             {
10401                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10402                 target_sigset_t *target_set;
10403                 sigset_t _set, *set = &_set;
10404 
10405                 if (arg3) {
10406                     if (target_to_host_timespec(timeout_ts, arg3)) {
10407                         unlock_user(target_pfd, arg1, 0);
10408                         return -TARGET_EFAULT;
10409                     }
10410                 } else {
10411                     timeout_ts = NULL;
10412                 }
10413 
10414                 if (arg4) {
10415                     if (arg5 != sizeof(target_sigset_t)) {
10416                         unlock_user(target_pfd, arg1, 0);
10417                         return -TARGET_EINVAL;
10418                     }
10419 
10420                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10421                     if (!target_set) {
10422                         unlock_user(target_pfd, arg1, 0);
10423                         return -TARGET_EFAULT;
10424                     }
10425                     target_to_host_sigset(set, target_set);
10426                 } else {
10427                     set = NULL;
10428                 }
10429 
10430                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10431                                            set, SIGSET_T_SIZE));
10432 
10433                 if (!is_error(ret) && arg3) {
10434                     host_to_target_timespec(arg3, timeout_ts);
10435                 }
10436                 if (arg4) {
10437                     unlock_user(target_set, arg4, 0);
10438                 }
10439                 break;
10440             }
10441 # endif
10442 # ifdef TARGET_NR_poll
10443             case TARGET_NR_poll:
10444             {
10445                 struct timespec ts, *pts;
10446 
10447                 if (arg3 >= 0) {
10448                     /* Convert ms to secs, ns */
10449                     ts.tv_sec = arg3 / 1000;
10450                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10451                     pts = &ts;
10452                 } else {
10453                     /* -ve poll() timeout means "infinite" */
10454                     pts = NULL;
10455                 }
10456                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10457                 break;
10458             }
10459 # endif
10460             default:
10461                 g_assert_not_reached();
10462             }
10463 
10464             if (!is_error(ret)) {
10465                 for(i = 0; i < nfds; i++) {
10466                     target_pfd[i].revents = tswap16(pfd[i].revents);
10467                 }
10468             }
10469             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10470         }
10471         return ret;
10472 #endif
10473     case TARGET_NR_flock:
10474         /* NOTE: the flock constant seems to be the same for every
10475            Linux platform */
10476         return get_errno(safe_flock(arg1, arg2));
10477     case TARGET_NR_readv:
10478         {
10479             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10480             if (vec != NULL) {
10481                 ret = get_errno(safe_readv(arg1, vec, arg3));
10482                 unlock_iovec(vec, arg2, arg3, 1);
10483             } else {
10484                 ret = -host_to_target_errno(errno);
10485             }
10486         }
10487         return ret;
10488     case TARGET_NR_writev:
10489         {
10490             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10491             if (vec != NULL) {
10492                 ret = get_errno(safe_writev(arg1, vec, arg3));
10493                 unlock_iovec(vec, arg2, arg3, 0);
10494             } else {
10495                 ret = -host_to_target_errno(errno);
10496             }
10497         }
10498         return ret;
10499 #if defined(TARGET_NR_preadv)
10500     case TARGET_NR_preadv:
10501         {
10502             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10503             if (vec != NULL) {
10504                 unsigned long low, high;
10505 
10506                 target_to_host_low_high(arg4, arg5, &low, &high);
10507                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10508                 unlock_iovec(vec, arg2, arg3, 1);
10509             } else {
10510                 ret = -host_to_target_errno(errno);
10511            }
10512         }
10513         return ret;
10514 #endif
10515 #if defined(TARGET_NR_pwritev)
10516     case TARGET_NR_pwritev:
10517         {
10518             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10519             if (vec != NULL) {
10520                 unsigned long low, high;
10521 
10522                 target_to_host_low_high(arg4, arg5, &low, &high);
10523                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10524                 unlock_iovec(vec, arg2, arg3, 0);
10525             } else {
10526                 ret = -host_to_target_errno(errno);
10527            }
10528         }
10529         return ret;
10530 #endif
10531     case TARGET_NR_getsid:
10532         return get_errno(getsid(arg1));
10533 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10534     case TARGET_NR_fdatasync:
10535         return get_errno(fdatasync(arg1));
10536 #endif
10537 #ifdef TARGET_NR__sysctl
10538     case TARGET_NR__sysctl:
10539         /* We don't implement this, but ENOTDIR is always a safe
10540            return value. */
10541         return -TARGET_ENOTDIR;
10542 #endif
10543     case TARGET_NR_sched_getaffinity:
10544         {
10545             unsigned int mask_size;
10546             unsigned long *mask;
10547 
10548             /*
10549              * sched_getaffinity needs multiples of ulong, so need to take
10550              * care of mismatches between target ulong and host ulong sizes.
10551              */
10552             if (arg2 & (sizeof(abi_ulong) - 1)) {
10553                 return -TARGET_EINVAL;
10554             }
10555             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10556 
10557             mask = alloca(mask_size);
10558             memset(mask, 0, mask_size);
10559             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10560 
10561             if (!is_error(ret)) {
10562                 if (ret > arg2) {
10563                     /* More data returned than the caller's buffer will fit.
10564                      * This only happens if sizeof(abi_long) < sizeof(long)
10565                      * and the caller passed us a buffer holding an odd number
10566                      * of abi_longs. If the host kernel is actually using the
10567                      * extra 4 bytes then fail EINVAL; otherwise we can just
10568                      * ignore them and only copy the interesting part.
10569                      */
10570                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10571                     if (numcpus > arg2 * 8) {
10572                         return -TARGET_EINVAL;
10573                     }
10574                     ret = arg2;
10575                 }
10576 
10577                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10578                     return -TARGET_EFAULT;
10579                 }
10580             }
10581         }
10582         return ret;
10583     case TARGET_NR_sched_setaffinity:
10584         {
10585             unsigned int mask_size;
10586             unsigned long *mask;
10587 
10588             /*
10589              * sched_setaffinity needs multiples of ulong, so need to take
10590              * care of mismatches between target ulong and host ulong sizes.
10591              */
10592             if (arg2 & (sizeof(abi_ulong) - 1)) {
10593                 return -TARGET_EINVAL;
10594             }
10595             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10596             mask = alloca(mask_size);
10597 
10598             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10599             if (ret) {
10600                 return ret;
10601             }
10602 
10603             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10604         }
10605     case TARGET_NR_getcpu:
10606         {
10607             unsigned cpu, node;
10608             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10609                                        arg2 ? &node : NULL,
10610                                        NULL));
10611             if (is_error(ret)) {
10612                 return ret;
10613             }
10614             if (arg1 && put_user_u32(cpu, arg1)) {
10615                 return -TARGET_EFAULT;
10616             }
10617             if (arg2 && put_user_u32(node, arg2)) {
10618                 return -TARGET_EFAULT;
10619             }
10620         }
10621         return ret;
10622     case TARGET_NR_sched_setparam:
10623         {
10624             struct sched_param *target_schp;
10625             struct sched_param schp;
10626 
10627             if (arg2 == 0) {
10628                 return -TARGET_EINVAL;
10629             }
10630             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10631                 return -TARGET_EFAULT;
10632             schp.sched_priority = tswap32(target_schp->sched_priority);
10633             unlock_user_struct(target_schp, arg2, 0);
10634             return get_errno(sched_setparam(arg1, &schp));
10635         }
10636     case TARGET_NR_sched_getparam:
10637         {
10638             struct sched_param *target_schp;
10639             struct sched_param schp;
10640 
10641             if (arg2 == 0) {
10642                 return -TARGET_EINVAL;
10643             }
10644             ret = get_errno(sched_getparam(arg1, &schp));
10645             if (!is_error(ret)) {
10646                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10647                     return -TARGET_EFAULT;
10648                 target_schp->sched_priority = tswap32(schp.sched_priority);
10649                 unlock_user_struct(target_schp, arg2, 1);
10650             }
10651         }
10652         return ret;
10653     case TARGET_NR_sched_setscheduler:
10654         {
10655             struct sched_param *target_schp;
10656             struct sched_param schp;
10657             if (arg3 == 0) {
10658                 return -TARGET_EINVAL;
10659             }
10660             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10661                 return -TARGET_EFAULT;
10662             schp.sched_priority = tswap32(target_schp->sched_priority);
10663             unlock_user_struct(target_schp, arg3, 0);
10664             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10665         }
10666     case TARGET_NR_sched_getscheduler:
10667         return get_errno(sched_getscheduler(arg1));
10668     case TARGET_NR_sched_yield:
10669         return get_errno(sched_yield());
10670     case TARGET_NR_sched_get_priority_max:
10671         return get_errno(sched_get_priority_max(arg1));
10672     case TARGET_NR_sched_get_priority_min:
10673         return get_errno(sched_get_priority_min(arg1));
10674     case TARGET_NR_sched_rr_get_interval:
10675         {
10676             struct timespec ts;
10677             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10678             if (!is_error(ret)) {
10679                 ret = host_to_target_timespec(arg2, &ts);
10680             }
10681         }
10682         return ret;
10683     case TARGET_NR_nanosleep:
10684         {
10685             struct timespec req, rem;
10686             target_to_host_timespec(&req, arg1);
10687             ret = get_errno(safe_nanosleep(&req, &rem));
10688             if (is_error(ret) && arg2) {
10689                 host_to_target_timespec(arg2, &rem);
10690             }
10691         }
10692         return ret;
10693     case TARGET_NR_prctl:
10694         switch (arg1) {
10695         case PR_GET_PDEATHSIG:
10696         {
10697             int deathsig;
10698             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10699             if (!is_error(ret) && arg2
10700                 && put_user_ual(deathsig, arg2)) {
10701                 return -TARGET_EFAULT;
10702             }
10703             return ret;
10704         }
10705 #ifdef PR_GET_NAME
10706         case PR_GET_NAME:
10707         {
10708             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10709             if (!name) {
10710                 return -TARGET_EFAULT;
10711             }
10712             ret = get_errno(prctl(arg1, (unsigned long)name,
10713                                   arg3, arg4, arg5));
10714             unlock_user(name, arg2, 16);
10715             return ret;
10716         }
10717         case PR_SET_NAME:
10718         {
10719             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10720             if (!name) {
10721                 return -TARGET_EFAULT;
10722             }
10723             ret = get_errno(prctl(arg1, (unsigned long)name,
10724                                   arg3, arg4, arg5));
10725             unlock_user(name, arg2, 0);
10726             return ret;
10727         }
10728 #endif
10729 #ifdef TARGET_AARCH64
10730         case TARGET_PR_SVE_SET_VL:
10731             /*
10732              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10733              * PR_SVE_VL_INHERIT.  Note the kernel definition
10734              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10735              * even though the current architectural maximum is VQ=16.
10736              */
10737             ret = -TARGET_EINVAL;
10738             if (arm_feature(cpu_env, ARM_FEATURE_SVE)
10739                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10740                 CPUARMState *env = cpu_env;
10741                 ARMCPU *cpu = arm_env_get_cpu(env);
10742                 uint32_t vq, old_vq;
10743 
10744                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10745                 vq = MAX(arg2 / 16, 1);
10746                 vq = MIN(vq, cpu->sve_max_vq);
10747 
10748                 if (vq < old_vq) {
10749                     aarch64_sve_narrow_vq(env, vq);
10750                 }
10751                 env->vfp.zcr_el[1] = vq - 1;
10752                 ret = vq * 16;
10753             }
10754             return ret;
10755         case TARGET_PR_SVE_GET_VL:
10756             ret = -TARGET_EINVAL;
10757             if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
10758                 CPUARMState *env = cpu_env;
10759                 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
10760             }
10761             return ret;
10762 #endif /* AARCH64 */
10763         case PR_GET_SECCOMP:
10764         case PR_SET_SECCOMP:
10765             /* Disable seccomp to prevent the target disabling syscalls we
10766              * need. */
10767             return -TARGET_EINVAL;
10768         default:
10769             /* Most prctl options have no pointer arguments */
10770             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10771         }
10772         break;
10773 #ifdef TARGET_NR_arch_prctl
10774     case TARGET_NR_arch_prctl:
10775 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10776         return do_arch_prctl(cpu_env, arg1, arg2);
10777 #else
10778 #error unreachable
10779 #endif
10780 #endif
10781 #ifdef TARGET_NR_pread64
10782     case TARGET_NR_pread64:
10783         if (regpairs_aligned(cpu_env, num)) {
10784             arg4 = arg5;
10785             arg5 = arg6;
10786         }
10787         if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10788             return -TARGET_EFAULT;
10789         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10790         unlock_user(p, arg2, ret);
10791         return ret;
10792     case TARGET_NR_pwrite64:
10793         if (regpairs_aligned(cpu_env, num)) {
10794             arg4 = arg5;
10795             arg5 = arg6;
10796         }
10797         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10798             return -TARGET_EFAULT;
10799         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10800         unlock_user(p, arg2, 0);
10801         return ret;
10802 #endif
10803     case TARGET_NR_getcwd:
10804         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10805             return -TARGET_EFAULT;
10806         ret = get_errno(sys_getcwd1(p, arg2));
10807         unlock_user(p, arg1, ret);
10808         return ret;
10809     case TARGET_NR_capget:
10810     case TARGET_NR_capset:
10811     {
10812         struct target_user_cap_header *target_header;
10813         struct target_user_cap_data *target_data = NULL;
10814         struct __user_cap_header_struct header;
10815         struct __user_cap_data_struct data[2];
10816         struct __user_cap_data_struct *dataptr = NULL;
10817         int i, target_datalen;
10818         int data_items = 1;
10819 
10820         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10821             return -TARGET_EFAULT;
10822         }
10823         header.version = tswap32(target_header->version);
10824         header.pid = tswap32(target_header->pid);
10825 
10826         if (header.version != _LINUX_CAPABILITY_VERSION) {
10827             /* Version 2 and up takes pointer to two user_data structs */
10828             data_items = 2;
10829         }
10830 
10831         target_datalen = sizeof(*target_data) * data_items;
10832 
10833         if (arg2) {
10834             if (num == TARGET_NR_capget) {
10835                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10836             } else {
10837                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10838             }
10839             if (!target_data) {
10840                 unlock_user_struct(target_header, arg1, 0);
10841                 return -TARGET_EFAULT;
10842             }
10843 
10844             if (num == TARGET_NR_capset) {
10845                 for (i = 0; i < data_items; i++) {
10846                     data[i].effective = tswap32(target_data[i].effective);
10847                     data[i].permitted = tswap32(target_data[i].permitted);
10848                     data[i].inheritable = tswap32(target_data[i].inheritable);
10849                 }
10850             }
10851 
10852             dataptr = data;
10853         }
10854 
10855         if (num == TARGET_NR_capget) {
10856             ret = get_errno(capget(&header, dataptr));
10857         } else {
10858             ret = get_errno(capset(&header, dataptr));
10859         }
10860 
10861         /* The kernel always updates version for both capget and capset */
10862         target_header->version = tswap32(header.version);
10863         unlock_user_struct(target_header, arg1, 1);
10864 
10865         if (arg2) {
10866             if (num == TARGET_NR_capget) {
10867                 for (i = 0; i < data_items; i++) {
10868                     target_data[i].effective = tswap32(data[i].effective);
10869                     target_data[i].permitted = tswap32(data[i].permitted);
10870                     target_data[i].inheritable = tswap32(data[i].inheritable);
10871                 }
10872                 unlock_user(target_data, arg2, target_datalen);
10873             } else {
10874                 unlock_user(target_data, arg2, 0);
10875             }
10876         }
10877         return ret;
10878     }
10879     case TARGET_NR_sigaltstack:
10880         return do_sigaltstack(arg1, arg2,
10881                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10882 
10883 #ifdef CONFIG_SENDFILE
10884 #ifdef TARGET_NR_sendfile
10885     case TARGET_NR_sendfile:
10886     {
10887         off_t *offp = NULL;
10888         off_t off;
10889         if (arg3) {
10890             ret = get_user_sal(off, arg3);
10891             if (is_error(ret)) {
10892                 return ret;
10893             }
10894             offp = &off;
10895         }
10896         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10897         if (!is_error(ret) && arg3) {
10898             abi_long ret2 = put_user_sal(off, arg3);
10899             if (is_error(ret2)) {
10900                 ret = ret2;
10901             }
10902         }
10903         return ret;
10904     }
10905 #endif
10906 #ifdef TARGET_NR_sendfile64
10907     case TARGET_NR_sendfile64:
10908     {
10909         off_t *offp = NULL;
10910         off_t off;
10911         if (arg3) {
10912             ret = get_user_s64(off, arg3);
10913             if (is_error(ret)) {
10914                 return ret;
10915             }
10916             offp = &off;
10917         }
10918         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10919         if (!is_error(ret) && arg3) {
10920             abi_long ret2 = put_user_s64(off, arg3);
10921             if (is_error(ret2)) {
10922                 ret = ret2;
10923             }
10924         }
10925         return ret;
10926     }
10927 #endif
10928 #endif
10929 #ifdef TARGET_NR_vfork
10930     case TARGET_NR_vfork:
10931         return get_errno(do_fork(cpu_env,
10932                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10933                          0, 0, 0, 0));
10934 #endif
10935 #ifdef TARGET_NR_ugetrlimit
10936     case TARGET_NR_ugetrlimit:
10937     {
10938 	struct rlimit rlim;
10939 	int resource = target_to_host_resource(arg1);
10940 	ret = get_errno(getrlimit(resource, &rlim));
10941 	if (!is_error(ret)) {
10942 	    struct target_rlimit *target_rlim;
10943             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10944                 return -TARGET_EFAULT;
10945 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10946 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10947             unlock_user_struct(target_rlim, arg2, 1);
10948 	}
10949         return ret;
10950     }
10951 #endif
10952 #ifdef TARGET_NR_truncate64
10953     case TARGET_NR_truncate64:
10954         if (!(p = lock_user_string(arg1)))
10955             return -TARGET_EFAULT;
10956 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10957         unlock_user(p, arg1, 0);
10958         return ret;
10959 #endif
10960 #ifdef TARGET_NR_ftruncate64
10961     case TARGET_NR_ftruncate64:
10962         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10963 #endif
10964 #ifdef TARGET_NR_stat64
10965     case TARGET_NR_stat64:
10966         if (!(p = lock_user_string(arg1))) {
10967             return -TARGET_EFAULT;
10968         }
10969         ret = get_errno(stat(path(p), &st));
10970         unlock_user(p, arg1, 0);
10971         if (!is_error(ret))
10972             ret = host_to_target_stat64(cpu_env, arg2, &st);
10973         return ret;
10974 #endif
10975 #ifdef TARGET_NR_lstat64
10976     case TARGET_NR_lstat64:
10977         if (!(p = lock_user_string(arg1))) {
10978             return -TARGET_EFAULT;
10979         }
10980         ret = get_errno(lstat(path(p), &st));
10981         unlock_user(p, arg1, 0);
10982         if (!is_error(ret))
10983             ret = host_to_target_stat64(cpu_env, arg2, &st);
10984         return ret;
10985 #endif
10986 #ifdef TARGET_NR_fstat64
10987     case TARGET_NR_fstat64:
10988         ret = get_errno(fstat(arg1, &st));
10989         if (!is_error(ret))
10990             ret = host_to_target_stat64(cpu_env, arg2, &st);
10991         return ret;
10992 #endif
10993 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10994 #ifdef TARGET_NR_fstatat64
10995     case TARGET_NR_fstatat64:
10996 #endif
10997 #ifdef TARGET_NR_newfstatat
10998     case TARGET_NR_newfstatat:
10999 #endif
11000         if (!(p = lock_user_string(arg2))) {
11001             return -TARGET_EFAULT;
11002         }
11003         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11004         unlock_user(p, arg2, 0);
11005         if (!is_error(ret))
11006             ret = host_to_target_stat64(cpu_env, arg3, &st);
11007         return ret;
11008 #endif
11009 #ifdef TARGET_NR_lchown
11010     case TARGET_NR_lchown:
11011         if (!(p = lock_user_string(arg1)))
11012             return -TARGET_EFAULT;
11013         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11014         unlock_user(p, arg1, 0);
11015         return ret;
11016 #endif
11017 #ifdef TARGET_NR_getuid
11018     case TARGET_NR_getuid:
11019         return get_errno(high2lowuid(getuid()));
11020 #endif
11021 #ifdef TARGET_NR_getgid
11022     case TARGET_NR_getgid:
11023         return get_errno(high2lowgid(getgid()));
11024 #endif
11025 #ifdef TARGET_NR_geteuid
11026     case TARGET_NR_geteuid:
11027         return get_errno(high2lowuid(geteuid()));
11028 #endif
11029 #ifdef TARGET_NR_getegid
11030     case TARGET_NR_getegid:
11031         return get_errno(high2lowgid(getegid()));
11032 #endif
11033     case TARGET_NR_setreuid:
11034         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11035     case TARGET_NR_setregid:
11036         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11037     case TARGET_NR_getgroups:
11038         {
11039             int gidsetsize = arg1;
11040             target_id *target_grouplist;
11041             gid_t *grouplist;
11042             int i;
11043 
11044             grouplist = alloca(gidsetsize * sizeof(gid_t));
11045             ret = get_errno(getgroups(gidsetsize, grouplist));
11046             if (gidsetsize == 0)
11047                 return ret;
11048             if (!is_error(ret)) {
11049                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11050                 if (!target_grouplist)
11051                     return -TARGET_EFAULT;
11052                 for(i = 0;i < ret; i++)
11053                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11054                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11055             }
11056         }
11057         return ret;
11058     case TARGET_NR_setgroups:
11059         {
11060             int gidsetsize = arg1;
11061             target_id *target_grouplist;
11062             gid_t *grouplist = NULL;
11063             int i;
11064             if (gidsetsize) {
11065                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11066                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11067                 if (!target_grouplist) {
11068                     return -TARGET_EFAULT;
11069                 }
11070                 for (i = 0; i < gidsetsize; i++) {
11071                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11072                 }
11073                 unlock_user(target_grouplist, arg2, 0);
11074             }
11075             return get_errno(setgroups(gidsetsize, grouplist));
11076         }
11077     case TARGET_NR_fchown:
11078         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11079 #if defined(TARGET_NR_fchownat)
11080     case TARGET_NR_fchownat:
11081         if (!(p = lock_user_string(arg2)))
11082             return -TARGET_EFAULT;
11083         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11084                                  low2highgid(arg4), arg5));
11085         unlock_user(p, arg2, 0);
11086         return ret;
11087 #endif
11088 #ifdef TARGET_NR_setresuid
11089     case TARGET_NR_setresuid:
11090         return get_errno(sys_setresuid(low2highuid(arg1),
11091                                        low2highuid(arg2),
11092                                        low2highuid(arg3)));
11093 #endif
11094 #ifdef TARGET_NR_getresuid
11095     case TARGET_NR_getresuid:
11096         {
11097             uid_t ruid, euid, suid;
11098             ret = get_errno(getresuid(&ruid, &euid, &suid));
11099             if (!is_error(ret)) {
11100                 if (put_user_id(high2lowuid(ruid), arg1)
11101                     || put_user_id(high2lowuid(euid), arg2)
11102                     || put_user_id(high2lowuid(suid), arg3))
11103                     return -TARGET_EFAULT;
11104             }
11105         }
11106         return ret;
11107 #endif
11108 #ifdef TARGET_NR_getresgid
11109     case TARGET_NR_setresgid:
11110         return get_errno(sys_setresgid(low2highgid(arg1),
11111                                        low2highgid(arg2),
11112                                        low2highgid(arg3)));
11113 #endif
11114 #ifdef TARGET_NR_getresgid
11115     case TARGET_NR_getresgid:
11116         {
11117             gid_t rgid, egid, sgid;
11118             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11119             if (!is_error(ret)) {
11120                 if (put_user_id(high2lowgid(rgid), arg1)
11121                     || put_user_id(high2lowgid(egid), arg2)
11122                     || put_user_id(high2lowgid(sgid), arg3))
11123                     return -TARGET_EFAULT;
11124             }
11125         }
11126         return ret;
11127 #endif
11128 #ifdef TARGET_NR_chown
11129     case TARGET_NR_chown:
11130         if (!(p = lock_user_string(arg1)))
11131             return -TARGET_EFAULT;
11132         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11133         unlock_user(p, arg1, 0);
11134         return ret;
11135 #endif
11136     case TARGET_NR_setuid:
11137         return get_errno(sys_setuid(low2highuid(arg1)));
11138     case TARGET_NR_setgid:
11139         return get_errno(sys_setgid(low2highgid(arg1)));
11140     case TARGET_NR_setfsuid:
11141         return get_errno(setfsuid(arg1));
11142     case TARGET_NR_setfsgid:
11143         return get_errno(setfsgid(arg1));
11144 
11145 #ifdef TARGET_NR_lchown32
11146     case TARGET_NR_lchown32:
11147         if (!(p = lock_user_string(arg1)))
11148             return -TARGET_EFAULT;
11149         ret = get_errno(lchown(p, arg2, arg3));
11150         unlock_user(p, arg1, 0);
11151         return ret;
11152 #endif
11153 #ifdef TARGET_NR_getuid32
11154     case TARGET_NR_getuid32:
11155         return get_errno(getuid());
11156 #endif
11157 
11158 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11159    /* Alpha specific */
11160     case TARGET_NR_getxuid:
11161          {
11162             uid_t euid;
11163             euid=geteuid();
11164             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11165          }
11166         return get_errno(getuid());
11167 #endif
11168 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11169    /* Alpha specific */
11170     case TARGET_NR_getxgid:
11171          {
11172             uid_t egid;
11173             egid=getegid();
11174             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11175          }
11176         return get_errno(getgid());
11177 #endif
11178 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11179     /* Alpha specific */
11180     case TARGET_NR_osf_getsysinfo:
11181         ret = -TARGET_EOPNOTSUPP;
11182         switch (arg1) {
11183           case TARGET_GSI_IEEE_FP_CONTROL:
11184             {
11185                 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11186 
11187                 /* Copied from linux ieee_fpcr_to_swcr.  */
11188                 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11189                 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11190                 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11191                                         | SWCR_TRAP_ENABLE_DZE
11192                                         | SWCR_TRAP_ENABLE_OVF);
11193                 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11194                                         | SWCR_TRAP_ENABLE_INE);
11195                 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11196                 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11197 
11198                 if (put_user_u64 (swcr, arg2))
11199                         return -TARGET_EFAULT;
11200                 ret = 0;
11201             }
11202             break;
11203 
11204           /* case GSI_IEEE_STATE_AT_SIGNAL:
11205              -- Not implemented in linux kernel.
11206              case GSI_UACPROC:
11207              -- Retrieves current unaligned access state; not much used.
11208              case GSI_PROC_TYPE:
11209              -- Retrieves implver information; surely not used.
11210              case GSI_GET_HWRPB:
11211              -- Grabs a copy of the HWRPB; surely not used.
11212           */
11213         }
11214         return ret;
11215 #endif
11216 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11217     /* Alpha specific */
11218     case TARGET_NR_osf_setsysinfo:
11219         ret = -TARGET_EOPNOTSUPP;
11220         switch (arg1) {
11221           case TARGET_SSI_IEEE_FP_CONTROL:
11222             {
11223                 uint64_t swcr, fpcr, orig_fpcr;
11224 
11225                 if (get_user_u64 (swcr, arg2)) {
11226                     return -TARGET_EFAULT;
11227                 }
11228                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11229                 fpcr = orig_fpcr & FPCR_DYN_MASK;
11230 
11231                 /* Copied from linux ieee_swcr_to_fpcr.  */
11232                 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11233                 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11234                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11235                                   | SWCR_TRAP_ENABLE_DZE
11236                                   | SWCR_TRAP_ENABLE_OVF)) << 48;
11237                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11238                                   | SWCR_TRAP_ENABLE_INE)) << 57;
11239                 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11240                 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11241 
11242                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11243                 ret = 0;
11244             }
11245             break;
11246 
11247           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11248             {
11249                 uint64_t exc, fpcr, orig_fpcr;
11250                 int si_code;
11251 
11252                 if (get_user_u64(exc, arg2)) {
11253                     return -TARGET_EFAULT;
11254                 }
11255 
11256                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11257 
11258                 /* We only add to the exception status here.  */
11259                 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11260 
11261                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11262                 ret = 0;
11263 
11264                 /* Old exceptions are not signaled.  */
11265                 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11266 
11267                 /* If any exceptions set by this call,
11268                    and are unmasked, send a signal.  */
11269                 si_code = 0;
11270                 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11271                     si_code = TARGET_FPE_FLTRES;
11272                 }
11273                 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11274                     si_code = TARGET_FPE_FLTUND;
11275                 }
11276                 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11277                     si_code = TARGET_FPE_FLTOVF;
11278                 }
11279                 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11280                     si_code = TARGET_FPE_FLTDIV;
11281                 }
11282                 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11283                     si_code = TARGET_FPE_FLTINV;
11284                 }
11285                 if (si_code != 0) {
11286                     target_siginfo_t info;
11287                     info.si_signo = SIGFPE;
11288                     info.si_errno = 0;
11289                     info.si_code = si_code;
11290                     info._sifields._sigfault._addr
11291                         = ((CPUArchState *)cpu_env)->pc;
11292                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11293                                  QEMU_SI_FAULT, &info);
11294                 }
11295             }
11296             break;
11297 
11298           /* case SSI_NVPAIRS:
11299              -- Used with SSIN_UACPROC to enable unaligned accesses.
11300              case SSI_IEEE_STATE_AT_SIGNAL:
11301              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11302              -- Not implemented in linux kernel
11303           */
11304         }
11305         return ret;
11306 #endif
11307 #ifdef TARGET_NR_osf_sigprocmask
11308     /* Alpha specific.  */
11309     case TARGET_NR_osf_sigprocmask:
11310         {
11311             abi_ulong mask;
11312             int how;
11313             sigset_t set, oldset;
11314 
11315             switch(arg1) {
11316             case TARGET_SIG_BLOCK:
11317                 how = SIG_BLOCK;
11318                 break;
11319             case TARGET_SIG_UNBLOCK:
11320                 how = SIG_UNBLOCK;
11321                 break;
11322             case TARGET_SIG_SETMASK:
11323                 how = SIG_SETMASK;
11324                 break;
11325             default:
11326                 return -TARGET_EINVAL;
11327             }
11328             mask = arg2;
11329             target_to_host_old_sigset(&set, &mask);
11330             ret = do_sigprocmask(how, &set, &oldset);
11331             if (!ret) {
11332                 host_to_target_old_sigset(&mask, &oldset);
11333                 ret = mask;
11334             }
11335         }
11336         return ret;
11337 #endif
11338 
11339 #ifdef TARGET_NR_getgid32
11340     case TARGET_NR_getgid32:
11341         return get_errno(getgid());
11342 #endif
11343 #ifdef TARGET_NR_geteuid32
11344     case TARGET_NR_geteuid32:
11345         return get_errno(geteuid());
11346 #endif
11347 #ifdef TARGET_NR_getegid32
11348     case TARGET_NR_getegid32:
11349         return get_errno(getegid());
11350 #endif
11351 #ifdef TARGET_NR_setreuid32
11352     case TARGET_NR_setreuid32:
11353         return get_errno(setreuid(arg1, arg2));
11354 #endif
11355 #ifdef TARGET_NR_setregid32
11356     case TARGET_NR_setregid32:
11357         return get_errno(setregid(arg1, arg2));
11358 #endif
11359 #ifdef TARGET_NR_getgroups32
11360     case TARGET_NR_getgroups32:
11361         {
11362             int gidsetsize = arg1;
11363             uint32_t *target_grouplist;
11364             gid_t *grouplist;
11365             int i;
11366 
11367             grouplist = alloca(gidsetsize * sizeof(gid_t));
11368             ret = get_errno(getgroups(gidsetsize, grouplist));
11369             if (gidsetsize == 0)
11370                 return ret;
11371             if (!is_error(ret)) {
11372                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11373                 if (!target_grouplist) {
11374                     return -TARGET_EFAULT;
11375                 }
11376                 for(i = 0;i < ret; i++)
11377                     target_grouplist[i] = tswap32(grouplist[i]);
11378                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11379             }
11380         }
11381         return ret;
11382 #endif
11383 #ifdef TARGET_NR_setgroups32
11384     case TARGET_NR_setgroups32:
11385         {
11386             int gidsetsize = arg1;
11387             uint32_t *target_grouplist;
11388             gid_t *grouplist;
11389             int i;
11390 
11391             grouplist = alloca(gidsetsize * sizeof(gid_t));
11392             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11393             if (!target_grouplist) {
11394                 return -TARGET_EFAULT;
11395             }
11396             for(i = 0;i < gidsetsize; i++)
11397                 grouplist[i] = tswap32(target_grouplist[i]);
11398             unlock_user(target_grouplist, arg2, 0);
11399             return get_errno(setgroups(gidsetsize, grouplist));
11400         }
11401 #endif
11402 #ifdef TARGET_NR_fchown32
11403     case TARGET_NR_fchown32:
11404         return get_errno(fchown(arg1, arg2, arg3));
11405 #endif
11406 #ifdef TARGET_NR_setresuid32
11407     case TARGET_NR_setresuid32:
11408         return get_errno(sys_setresuid(arg1, arg2, arg3));
11409 #endif
11410 #ifdef TARGET_NR_getresuid32
11411     case TARGET_NR_getresuid32:
11412         {
11413             uid_t ruid, euid, suid;
11414             ret = get_errno(getresuid(&ruid, &euid, &suid));
11415             if (!is_error(ret)) {
11416                 if (put_user_u32(ruid, arg1)
11417                     || put_user_u32(euid, arg2)
11418                     || put_user_u32(suid, arg3))
11419                     return -TARGET_EFAULT;
11420             }
11421         }
11422         return ret;
11423 #endif
11424 #ifdef TARGET_NR_setresgid32
11425     case TARGET_NR_setresgid32:
11426         return get_errno(sys_setresgid(arg1, arg2, arg3));
11427 #endif
11428 #ifdef TARGET_NR_getresgid32
11429     case TARGET_NR_getresgid32:
11430         {
11431             gid_t rgid, egid, sgid;
11432             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11433             if (!is_error(ret)) {
11434                 if (put_user_u32(rgid, arg1)
11435                     || put_user_u32(egid, arg2)
11436                     || put_user_u32(sgid, arg3))
11437                     return -TARGET_EFAULT;
11438             }
11439         }
11440         return ret;
11441 #endif
11442 #ifdef TARGET_NR_chown32
11443     case TARGET_NR_chown32:
11444         if (!(p = lock_user_string(arg1)))
11445             return -TARGET_EFAULT;
11446         ret = get_errno(chown(p, arg2, arg3));
11447         unlock_user(p, arg1, 0);
11448         return ret;
11449 #endif
11450 #ifdef TARGET_NR_setuid32
11451     case TARGET_NR_setuid32:
11452         return get_errno(sys_setuid(arg1));
11453 #endif
11454 #ifdef TARGET_NR_setgid32
11455     case TARGET_NR_setgid32:
11456         return get_errno(sys_setgid(arg1));
11457 #endif
11458 #ifdef TARGET_NR_setfsuid32
11459     case TARGET_NR_setfsuid32:
11460         return get_errno(setfsuid(arg1));
11461 #endif
11462 #ifdef TARGET_NR_setfsgid32
11463     case TARGET_NR_setfsgid32:
11464         return get_errno(setfsgid(arg1));
11465 #endif
11466 #ifdef TARGET_NR_mincore
11467     case TARGET_NR_mincore:
11468         {
11469             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11470             if (!a) {
11471                 return -TARGET_ENOMEM;
11472             }
11473             p = lock_user_string(arg3);
11474             if (!p) {
11475                 ret = -TARGET_EFAULT;
11476             } else {
11477                 ret = get_errno(mincore(a, arg2, p));
11478                 unlock_user(p, arg3, ret);
11479             }
11480             unlock_user(a, arg1, 0);
11481         }
11482         return ret;
11483 #endif
11484 #ifdef TARGET_NR_arm_fadvise64_64
11485     case TARGET_NR_arm_fadvise64_64:
11486         /* arm_fadvise64_64 looks like fadvise64_64 but
11487          * with different argument order: fd, advice, offset, len
11488          * rather than the usual fd, offset, len, advice.
11489          * Note that offset and len are both 64-bit so appear as
11490          * pairs of 32-bit registers.
11491          */
11492         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11493                             target_offset64(arg5, arg6), arg2);
11494         return -host_to_target_errno(ret);
11495 #endif
11496 
11497 #if TARGET_ABI_BITS == 32
11498 
11499 #ifdef TARGET_NR_fadvise64_64
11500     case TARGET_NR_fadvise64_64:
11501 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11502         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11503         ret = arg2;
11504         arg2 = arg3;
11505         arg3 = arg4;
11506         arg4 = arg5;
11507         arg5 = arg6;
11508         arg6 = ret;
11509 #else
11510         /* 6 args: fd, offset (high, low), len (high, low), advice */
11511         if (regpairs_aligned(cpu_env, num)) {
11512             /* offset is in (3,4), len in (5,6) and advice in 7 */
11513             arg2 = arg3;
11514             arg3 = arg4;
11515             arg4 = arg5;
11516             arg5 = arg6;
11517             arg6 = arg7;
11518         }
11519 #endif
11520         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11521                             target_offset64(arg4, arg5), arg6);
11522         return -host_to_target_errno(ret);
11523 #endif
11524 
11525 #ifdef TARGET_NR_fadvise64
11526     case TARGET_NR_fadvise64:
11527         /* 5 args: fd, offset (high, low), len, advice */
11528         if (regpairs_aligned(cpu_env, num)) {
11529             /* offset is in (3,4), len in 5 and advice in 6 */
11530             arg2 = arg3;
11531             arg3 = arg4;
11532             arg4 = arg5;
11533             arg5 = arg6;
11534         }
11535         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11536         return -host_to_target_errno(ret);
11537 #endif
11538 
11539 #else /* not a 32-bit ABI */
11540 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11541 #ifdef TARGET_NR_fadvise64_64
11542     case TARGET_NR_fadvise64_64:
11543 #endif
11544 #ifdef TARGET_NR_fadvise64
11545     case TARGET_NR_fadvise64:
11546 #endif
11547 #ifdef TARGET_S390X
11548         switch (arg4) {
11549         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11550         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11551         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11552         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11553         default: break;
11554         }
11555 #endif
11556         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11557 #endif
11558 #endif /* end of 64-bit ABI fadvise handling */
11559 
11560 #ifdef TARGET_NR_madvise
11561     case TARGET_NR_madvise:
11562         /* A straight passthrough may not be safe because qemu sometimes
11563            turns private file-backed mappings into anonymous mappings.
11564            This will break MADV_DONTNEED.
11565            This is a hint, so ignoring and returning success is ok.  */
11566         return 0;
11567 #endif
11568 #if TARGET_ABI_BITS == 32
11569     case TARGET_NR_fcntl64:
11570     {
11571 	int cmd;
11572 	struct flock64 fl;
11573         from_flock64_fn *copyfrom = copy_from_user_flock64;
11574         to_flock64_fn *copyto = copy_to_user_flock64;
11575 
11576 #ifdef TARGET_ARM
11577         if (!((CPUARMState *)cpu_env)->eabi) {
11578             copyfrom = copy_from_user_oabi_flock64;
11579             copyto = copy_to_user_oabi_flock64;
11580         }
11581 #endif
11582 
11583 	cmd = target_to_host_fcntl_cmd(arg2);
11584         if (cmd == -TARGET_EINVAL) {
11585             return cmd;
11586         }
11587 
11588         switch(arg2) {
11589         case TARGET_F_GETLK64:
11590             ret = copyfrom(&fl, arg3);
11591             if (ret) {
11592                 break;
11593             }
11594             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11595             if (ret == 0) {
11596                 ret = copyto(arg3, &fl);
11597             }
11598 	    break;
11599 
11600         case TARGET_F_SETLK64:
11601         case TARGET_F_SETLKW64:
11602             ret = copyfrom(&fl, arg3);
11603             if (ret) {
11604                 break;
11605             }
11606             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11607 	    break;
11608         default:
11609             ret = do_fcntl(arg1, arg2, arg3);
11610             break;
11611         }
11612         return ret;
11613     }
11614 #endif
11615 #ifdef TARGET_NR_cacheflush
11616     case TARGET_NR_cacheflush:
11617         /* self-modifying code is handled automatically, so nothing needed */
11618         return 0;
11619 #endif
11620 #ifdef TARGET_NR_getpagesize
11621     case TARGET_NR_getpagesize:
11622         return TARGET_PAGE_SIZE;
11623 #endif
11624     case TARGET_NR_gettid:
11625         return get_errno(gettid());
11626 #ifdef TARGET_NR_readahead
11627     case TARGET_NR_readahead:
11628 #if TARGET_ABI_BITS == 32
11629         if (regpairs_aligned(cpu_env, num)) {
11630             arg2 = arg3;
11631             arg3 = arg4;
11632             arg4 = arg5;
11633         }
11634         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11635 #else
11636         ret = get_errno(readahead(arg1, arg2, arg3));
11637 #endif
11638         return ret;
11639 #endif
11640 #ifdef CONFIG_ATTR
11641 #ifdef TARGET_NR_setxattr
11642     case TARGET_NR_listxattr:
11643     case TARGET_NR_llistxattr:
11644     {
11645         void *p, *b = 0;
11646         if (arg2) {
11647             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11648             if (!b) {
11649                 return -TARGET_EFAULT;
11650             }
11651         }
11652         p = lock_user_string(arg1);
11653         if (p) {
11654             if (num == TARGET_NR_listxattr) {
11655                 ret = get_errno(listxattr(p, b, arg3));
11656             } else {
11657                 ret = get_errno(llistxattr(p, b, arg3));
11658             }
11659         } else {
11660             ret = -TARGET_EFAULT;
11661         }
11662         unlock_user(p, arg1, 0);
11663         unlock_user(b, arg2, arg3);
11664         return ret;
11665     }
11666     case TARGET_NR_flistxattr:
11667     {
11668         void *b = 0;
11669         if (arg2) {
11670             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11671             if (!b) {
11672                 return -TARGET_EFAULT;
11673             }
11674         }
11675         ret = get_errno(flistxattr(arg1, b, arg3));
11676         unlock_user(b, arg2, arg3);
11677         return ret;
11678     }
11679     case TARGET_NR_setxattr:
11680     case TARGET_NR_lsetxattr:
11681         {
11682             void *p, *n, *v = 0;
11683             if (arg3) {
11684                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11685                 if (!v) {
11686                     return -TARGET_EFAULT;
11687                 }
11688             }
11689             p = lock_user_string(arg1);
11690             n = lock_user_string(arg2);
11691             if (p && n) {
11692                 if (num == TARGET_NR_setxattr) {
11693                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11694                 } else {
11695                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11696                 }
11697             } else {
11698                 ret = -TARGET_EFAULT;
11699             }
11700             unlock_user(p, arg1, 0);
11701             unlock_user(n, arg2, 0);
11702             unlock_user(v, arg3, 0);
11703         }
11704         return ret;
11705     case TARGET_NR_fsetxattr:
11706         {
11707             void *n, *v = 0;
11708             if (arg3) {
11709                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11710                 if (!v) {
11711                     return -TARGET_EFAULT;
11712                 }
11713             }
11714             n = lock_user_string(arg2);
11715             if (n) {
11716                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11717             } else {
11718                 ret = -TARGET_EFAULT;
11719             }
11720             unlock_user(n, arg2, 0);
11721             unlock_user(v, arg3, 0);
11722         }
11723         return ret;
11724     case TARGET_NR_getxattr:
11725     case TARGET_NR_lgetxattr:
11726         {
11727             void *p, *n, *v = 0;
11728             if (arg3) {
11729                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11730                 if (!v) {
11731                     return -TARGET_EFAULT;
11732                 }
11733             }
11734             p = lock_user_string(arg1);
11735             n = lock_user_string(arg2);
11736             if (p && n) {
11737                 if (num == TARGET_NR_getxattr) {
11738                     ret = get_errno(getxattr(p, n, v, arg4));
11739                 } else {
11740                     ret = get_errno(lgetxattr(p, n, v, arg4));
11741                 }
11742             } else {
11743                 ret = -TARGET_EFAULT;
11744             }
11745             unlock_user(p, arg1, 0);
11746             unlock_user(n, arg2, 0);
11747             unlock_user(v, arg3, arg4);
11748         }
11749         return ret;
11750     case TARGET_NR_fgetxattr:
11751         {
11752             void *n, *v = 0;
11753             if (arg3) {
11754                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11755                 if (!v) {
11756                     return -TARGET_EFAULT;
11757                 }
11758             }
11759             n = lock_user_string(arg2);
11760             if (n) {
11761                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11762             } else {
11763                 ret = -TARGET_EFAULT;
11764             }
11765             unlock_user(n, arg2, 0);
11766             unlock_user(v, arg3, arg4);
11767         }
11768         return ret;
11769     case TARGET_NR_removexattr:
11770     case TARGET_NR_lremovexattr:
11771         {
11772             void *p, *n;
11773             p = lock_user_string(arg1);
11774             n = lock_user_string(arg2);
11775             if (p && n) {
11776                 if (num == TARGET_NR_removexattr) {
11777                     ret = get_errno(removexattr(p, n));
11778                 } else {
11779                     ret = get_errno(lremovexattr(p, n));
11780                 }
11781             } else {
11782                 ret = -TARGET_EFAULT;
11783             }
11784             unlock_user(p, arg1, 0);
11785             unlock_user(n, arg2, 0);
11786         }
11787         return ret;
11788     case TARGET_NR_fremovexattr:
11789         {
11790             void *n;
11791             n = lock_user_string(arg2);
11792             if (n) {
11793                 ret = get_errno(fremovexattr(arg1, n));
11794             } else {
11795                 ret = -TARGET_EFAULT;
11796             }
11797             unlock_user(n, arg2, 0);
11798         }
11799         return ret;
11800 #endif
11801 #endif /* CONFIG_ATTR */
11802 #ifdef TARGET_NR_set_thread_area
11803     case TARGET_NR_set_thread_area:
11804 #if defined(TARGET_MIPS)
11805       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11806       return 0;
11807 #elif defined(TARGET_CRIS)
11808       if (arg1 & 0xff)
11809           ret = -TARGET_EINVAL;
11810       else {
11811           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11812           ret = 0;
11813       }
11814       return ret;
11815 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11816       return do_set_thread_area(cpu_env, arg1);
11817 #elif defined(TARGET_M68K)
11818       {
11819           TaskState *ts = cpu->opaque;
11820           ts->tp_value = arg1;
11821           return 0;
11822       }
11823 #else
11824       return -TARGET_ENOSYS;
11825 #endif
11826 #endif
11827 #ifdef TARGET_NR_get_thread_area
11828     case TARGET_NR_get_thread_area:
11829 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11830         return do_get_thread_area(cpu_env, arg1);
11831 #elif defined(TARGET_M68K)
11832         {
11833             TaskState *ts = cpu->opaque;
11834             return ts->tp_value;
11835         }
11836 #else
11837         return -TARGET_ENOSYS;
11838 #endif
11839 #endif
11840 #ifdef TARGET_NR_getdomainname
11841     case TARGET_NR_getdomainname:
11842         return -TARGET_ENOSYS;
11843 #endif
11844 
11845 #ifdef TARGET_NR_clock_settime
11846     case TARGET_NR_clock_settime:
11847     {
11848         struct timespec ts;
11849 
11850         ret = target_to_host_timespec(&ts, arg2);
11851         if (!is_error(ret)) {
11852             ret = get_errno(clock_settime(arg1, &ts));
11853         }
11854         return ret;
11855     }
11856 #endif
11857 #ifdef TARGET_NR_clock_gettime
11858     case TARGET_NR_clock_gettime:
11859     {
11860         struct timespec ts;
11861         ret = get_errno(clock_gettime(arg1, &ts));
11862         if (!is_error(ret)) {
11863             ret = host_to_target_timespec(arg2, &ts);
11864         }
11865         return ret;
11866     }
11867 #endif
11868 #ifdef TARGET_NR_clock_getres
11869     case TARGET_NR_clock_getres:
11870     {
11871         struct timespec ts;
11872         ret = get_errno(clock_getres(arg1, &ts));
11873         if (!is_error(ret)) {
11874             host_to_target_timespec(arg2, &ts);
11875         }
11876         return ret;
11877     }
11878 #endif
11879 #ifdef TARGET_NR_clock_nanosleep
11880     case TARGET_NR_clock_nanosleep:
11881     {
11882         struct timespec ts;
11883         target_to_host_timespec(&ts, arg3);
11884         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11885                                              &ts, arg4 ? &ts : NULL));
11886         if (arg4)
11887             host_to_target_timespec(arg4, &ts);
11888 
11889 #if defined(TARGET_PPC)
11890         /* clock_nanosleep is odd in that it returns positive errno values.
11891          * On PPC, CR0 bit 3 should be set in such a situation. */
11892         if (ret && ret != -TARGET_ERESTARTSYS) {
11893             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11894         }
11895 #endif
11896         return ret;
11897     }
11898 #endif
11899 
11900 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11901     case TARGET_NR_set_tid_address:
11902         return get_errno(set_tid_address((int *)g2h(arg1)));
11903 #endif
11904 
11905     case TARGET_NR_tkill:
11906         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11907 
11908     case TARGET_NR_tgkill:
11909         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11910                          target_to_host_signal(arg3)));
11911 
11912 #ifdef TARGET_NR_set_robust_list
11913     case TARGET_NR_set_robust_list:
11914     case TARGET_NR_get_robust_list:
11915         /* The ABI for supporting robust futexes has userspace pass
11916          * the kernel a pointer to a linked list which is updated by
11917          * userspace after the syscall; the list is walked by the kernel
11918          * when the thread exits. Since the linked list in QEMU guest
11919          * memory isn't a valid linked list for the host and we have
11920          * no way to reliably intercept the thread-death event, we can't
11921          * support these. Silently return ENOSYS so that guest userspace
11922          * falls back to a non-robust futex implementation (which should
11923          * be OK except in the corner case of the guest crashing while
11924          * holding a mutex that is shared with another process via
11925          * shared memory).
11926          */
11927         return -TARGET_ENOSYS;
11928 #endif
11929 
11930 #if defined(TARGET_NR_utimensat)
11931     case TARGET_NR_utimensat:
11932         {
11933             struct timespec *tsp, ts[2];
11934             if (!arg3) {
11935                 tsp = NULL;
11936             } else {
11937                 target_to_host_timespec(ts, arg3);
11938                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11939                 tsp = ts;
11940             }
11941             if (!arg2)
11942                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11943             else {
11944                 if (!(p = lock_user_string(arg2))) {
11945                     return -TARGET_EFAULT;
11946                 }
11947                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11948                 unlock_user(p, arg2, 0);
11949             }
11950         }
11951         return ret;
11952 #endif
11953     case TARGET_NR_futex:
11954         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11955 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11956     case TARGET_NR_inotify_init:
11957         ret = get_errno(sys_inotify_init());
11958         if (ret >= 0) {
11959             fd_trans_register(ret, &target_inotify_trans);
11960         }
11961         return ret;
11962 #endif
11963 #ifdef CONFIG_INOTIFY1
11964 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11965     case TARGET_NR_inotify_init1:
11966         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11967                                           fcntl_flags_tbl)));
11968         if (ret >= 0) {
11969             fd_trans_register(ret, &target_inotify_trans);
11970         }
11971         return ret;
11972 #endif
11973 #endif
11974 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11975     case TARGET_NR_inotify_add_watch:
11976         p = lock_user_string(arg2);
11977         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11978         unlock_user(p, arg2, 0);
11979         return ret;
11980 #endif
11981 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11982     case TARGET_NR_inotify_rm_watch:
11983         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11984 #endif
11985 
11986 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11987     case TARGET_NR_mq_open:
11988         {
11989             struct mq_attr posix_mq_attr;
11990             struct mq_attr *pposix_mq_attr;
11991             int host_flags;
11992 
11993             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11994             pposix_mq_attr = NULL;
11995             if (arg4) {
11996                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11997                     return -TARGET_EFAULT;
11998                 }
11999                 pposix_mq_attr = &posix_mq_attr;
12000             }
12001             p = lock_user_string(arg1 - 1);
12002             if (!p) {
12003                 return -TARGET_EFAULT;
12004             }
12005             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12006             unlock_user (p, arg1, 0);
12007         }
12008         return ret;
12009 
12010     case TARGET_NR_mq_unlink:
12011         p = lock_user_string(arg1 - 1);
12012         if (!p) {
12013             return -TARGET_EFAULT;
12014         }
12015         ret = get_errno(mq_unlink(p));
12016         unlock_user (p, arg1, 0);
12017         return ret;
12018 
12019     case TARGET_NR_mq_timedsend:
12020         {
12021             struct timespec ts;
12022 
12023             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12024             if (arg5 != 0) {
12025                 target_to_host_timespec(&ts, arg5);
12026                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12027                 host_to_target_timespec(arg5, &ts);
12028             } else {
12029                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12030             }
12031             unlock_user (p, arg2, arg3);
12032         }
12033         return ret;
12034 
12035     case TARGET_NR_mq_timedreceive:
12036         {
12037             struct timespec ts;
12038             unsigned int prio;
12039 
12040             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12041             if (arg5 != 0) {
12042                 target_to_host_timespec(&ts, arg5);
12043                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12044                                                      &prio, &ts));
12045                 host_to_target_timespec(arg5, &ts);
12046             } else {
12047                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12048                                                      &prio, NULL));
12049             }
12050             unlock_user (p, arg2, arg3);
12051             if (arg4 != 0)
12052                 put_user_u32(prio, arg4);
12053         }
12054         return ret;
12055 
12056     /* Not implemented for now... */
12057 /*     case TARGET_NR_mq_notify: */
12058 /*         break; */
12059 
12060     case TARGET_NR_mq_getsetattr:
12061         {
12062             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12063             ret = 0;
12064             if (arg2 != 0) {
12065                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12066                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12067                                            &posix_mq_attr_out));
12068             } else if (arg3 != 0) {
12069                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12070             }
12071             if (ret == 0 && arg3 != 0) {
12072                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12073             }
12074         }
12075         return ret;
12076 #endif
12077 
12078 #ifdef CONFIG_SPLICE
12079 #ifdef TARGET_NR_tee
12080     case TARGET_NR_tee:
12081         {
12082             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12083         }
12084         return ret;
12085 #endif
12086 #ifdef TARGET_NR_splice
12087     case TARGET_NR_splice:
12088         {
12089             loff_t loff_in, loff_out;
12090             loff_t *ploff_in = NULL, *ploff_out = NULL;
12091             if (arg2) {
12092                 if (get_user_u64(loff_in, arg2)) {
12093                     return -TARGET_EFAULT;
12094                 }
12095                 ploff_in = &loff_in;
12096             }
12097             if (arg4) {
12098                 if (get_user_u64(loff_out, arg4)) {
12099                     return -TARGET_EFAULT;
12100                 }
12101                 ploff_out = &loff_out;
12102             }
12103             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12104             if (arg2) {
12105                 if (put_user_u64(loff_in, arg2)) {
12106                     return -TARGET_EFAULT;
12107                 }
12108             }
12109             if (arg4) {
12110                 if (put_user_u64(loff_out, arg4)) {
12111                     return -TARGET_EFAULT;
12112                 }
12113             }
12114         }
12115         return ret;
12116 #endif
12117 #ifdef TARGET_NR_vmsplice
12118 	case TARGET_NR_vmsplice:
12119         {
12120             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12121             if (vec != NULL) {
12122                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12123                 unlock_iovec(vec, arg2, arg3, 0);
12124             } else {
12125                 ret = -host_to_target_errno(errno);
12126             }
12127         }
12128         return ret;
12129 #endif
12130 #endif /* CONFIG_SPLICE */
12131 #ifdef CONFIG_EVENTFD
12132 #if defined(TARGET_NR_eventfd)
12133     case TARGET_NR_eventfd:
12134         ret = get_errno(eventfd(arg1, 0));
12135         if (ret >= 0) {
12136             fd_trans_register(ret, &target_eventfd_trans);
12137         }
12138         return ret;
12139 #endif
12140 #if defined(TARGET_NR_eventfd2)
12141     case TARGET_NR_eventfd2:
12142     {
12143         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12144         if (arg2 & TARGET_O_NONBLOCK) {
12145             host_flags |= O_NONBLOCK;
12146         }
12147         if (arg2 & TARGET_O_CLOEXEC) {
12148             host_flags |= O_CLOEXEC;
12149         }
12150         ret = get_errno(eventfd(arg1, host_flags));
12151         if (ret >= 0) {
12152             fd_trans_register(ret, &target_eventfd_trans);
12153         }
12154         return ret;
12155     }
12156 #endif
12157 #endif /* CONFIG_EVENTFD  */
12158 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12159     case TARGET_NR_fallocate:
12160 #if TARGET_ABI_BITS == 32
12161         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12162                                   target_offset64(arg5, arg6)));
12163 #else
12164         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12165 #endif
12166         return ret;
12167 #endif
12168 #if defined(CONFIG_SYNC_FILE_RANGE)
12169 #if defined(TARGET_NR_sync_file_range)
12170     case TARGET_NR_sync_file_range:
12171 #if TARGET_ABI_BITS == 32
12172 #if defined(TARGET_MIPS)
12173         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12174                                         target_offset64(arg5, arg6), arg7));
12175 #else
12176         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12177                                         target_offset64(arg4, arg5), arg6));
12178 #endif /* !TARGET_MIPS */
12179 #else
12180         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12181 #endif
12182         return ret;
12183 #endif
12184 #if defined(TARGET_NR_sync_file_range2)
12185     case TARGET_NR_sync_file_range2:
12186         /* This is like sync_file_range but the arguments are reordered */
12187 #if TARGET_ABI_BITS == 32
12188         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12189                                         target_offset64(arg5, arg6), arg2));
12190 #else
12191         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12192 #endif
12193         return ret;
12194 #endif
12195 #endif
12196 #if defined(TARGET_NR_signalfd4)
12197     case TARGET_NR_signalfd4:
12198         return do_signalfd4(arg1, arg2, arg4);
12199 #endif
12200 #if defined(TARGET_NR_signalfd)
12201     case TARGET_NR_signalfd:
12202         return do_signalfd4(arg1, arg2, 0);
12203 #endif
12204 #if defined(CONFIG_EPOLL)
12205 #if defined(TARGET_NR_epoll_create)
12206     case TARGET_NR_epoll_create:
12207         return get_errno(epoll_create(arg1));
12208 #endif
12209 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12210     case TARGET_NR_epoll_create1:
12211         return get_errno(epoll_create1(arg1));
12212 #endif
12213 #if defined(TARGET_NR_epoll_ctl)
12214     case TARGET_NR_epoll_ctl:
12215     {
12216         struct epoll_event ep;
12217         struct epoll_event *epp = 0;
12218         if (arg4) {
12219             struct target_epoll_event *target_ep;
12220             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12221                 return -TARGET_EFAULT;
12222             }
12223             ep.events = tswap32(target_ep->events);
12224             /* The epoll_data_t union is just opaque data to the kernel,
12225              * so we transfer all 64 bits across and need not worry what
12226              * actual data type it is.
12227              */
12228             ep.data.u64 = tswap64(target_ep->data.u64);
12229             unlock_user_struct(target_ep, arg4, 0);
12230             epp = &ep;
12231         }
12232         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12233     }
12234 #endif
12235 
12236 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12237 #if defined(TARGET_NR_epoll_wait)
12238     case TARGET_NR_epoll_wait:
12239 #endif
12240 #if defined(TARGET_NR_epoll_pwait)
12241     case TARGET_NR_epoll_pwait:
12242 #endif
12243     {
12244         struct target_epoll_event *target_ep;
12245         struct epoll_event *ep;
12246         int epfd = arg1;
12247         int maxevents = arg3;
12248         int timeout = arg4;
12249 
12250         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12251             return -TARGET_EINVAL;
12252         }
12253 
12254         target_ep = lock_user(VERIFY_WRITE, arg2,
12255                               maxevents * sizeof(struct target_epoll_event), 1);
12256         if (!target_ep) {
12257             return -TARGET_EFAULT;
12258         }
12259 
12260         ep = g_try_new(struct epoll_event, maxevents);
12261         if (!ep) {
12262             unlock_user(target_ep, arg2, 0);
12263             return -TARGET_ENOMEM;
12264         }
12265 
12266         switch (num) {
12267 #if defined(TARGET_NR_epoll_pwait)
12268         case TARGET_NR_epoll_pwait:
12269         {
12270             target_sigset_t *target_set;
12271             sigset_t _set, *set = &_set;
12272 
12273             if (arg5) {
12274                 if (arg6 != sizeof(target_sigset_t)) {
12275                     ret = -TARGET_EINVAL;
12276                     break;
12277                 }
12278 
12279                 target_set = lock_user(VERIFY_READ, arg5,
12280                                        sizeof(target_sigset_t), 1);
12281                 if (!target_set) {
12282                     ret = -TARGET_EFAULT;
12283                     break;
12284                 }
12285                 target_to_host_sigset(set, target_set);
12286                 unlock_user(target_set, arg5, 0);
12287             } else {
12288                 set = NULL;
12289             }
12290 
12291             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12292                                              set, SIGSET_T_SIZE));
12293             break;
12294         }
12295 #endif
12296 #if defined(TARGET_NR_epoll_wait)
12297         case TARGET_NR_epoll_wait:
12298             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12299                                              NULL, 0));
12300             break;
12301 #endif
12302         default:
12303             ret = -TARGET_ENOSYS;
12304         }
12305         if (!is_error(ret)) {
12306             int i;
12307             for (i = 0; i < ret; i++) {
12308                 target_ep[i].events = tswap32(ep[i].events);
12309                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12310             }
12311             unlock_user(target_ep, arg2,
12312                         ret * sizeof(struct target_epoll_event));
12313         } else {
12314             unlock_user(target_ep, arg2, 0);
12315         }
12316         g_free(ep);
12317         return ret;
12318     }
12319 #endif
12320 #endif
12321 #ifdef TARGET_NR_prlimit64
12322     case TARGET_NR_prlimit64:
12323     {
12324         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12325         struct target_rlimit64 *target_rnew, *target_rold;
12326         struct host_rlimit64 rnew, rold, *rnewp = 0;
12327         int resource = target_to_host_resource(arg2);
12328         if (arg3) {
12329             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12330                 return -TARGET_EFAULT;
12331             }
12332             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12333             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12334             unlock_user_struct(target_rnew, arg3, 0);
12335             rnewp = &rnew;
12336         }
12337 
12338         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12339         if (!is_error(ret) && arg4) {
12340             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12341                 return -TARGET_EFAULT;
12342             }
12343             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12344             target_rold->rlim_max = tswap64(rold.rlim_max);
12345             unlock_user_struct(target_rold, arg4, 1);
12346         }
12347         return ret;
12348     }
12349 #endif
12350 #ifdef TARGET_NR_gethostname
12351     case TARGET_NR_gethostname:
12352     {
12353         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12354         if (name) {
12355             ret = get_errno(gethostname(name, arg2));
12356             unlock_user(name, arg1, arg2);
12357         } else {
12358             ret = -TARGET_EFAULT;
12359         }
12360         return ret;
12361     }
12362 #endif
12363 #ifdef TARGET_NR_atomic_cmpxchg_32
12364     case TARGET_NR_atomic_cmpxchg_32:
12365     {
12366         /* should use start_exclusive from main.c */
12367         abi_ulong mem_value;
12368         if (get_user_u32(mem_value, arg6)) {
12369             target_siginfo_t info;
12370             info.si_signo = SIGSEGV;
12371             info.si_errno = 0;
12372             info.si_code = TARGET_SEGV_MAPERR;
12373             info._sifields._sigfault._addr = arg6;
12374             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12375                          QEMU_SI_FAULT, &info);
12376             ret = 0xdeadbeef;
12377 
12378         }
12379         if (mem_value == arg2)
12380             put_user_u32(arg1, arg6);
12381         return mem_value;
12382     }
12383 #endif
12384 #ifdef TARGET_NR_atomic_barrier
12385     case TARGET_NR_atomic_barrier:
12386         /* Like the kernel implementation and the
12387            qemu arm barrier, no-op this? */
12388         return 0;
12389 #endif
12390 
12391 #ifdef TARGET_NR_timer_create
12392     case TARGET_NR_timer_create:
12393     {
12394         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12395 
12396         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12397 
12398         int clkid = arg1;
12399         int timer_index = next_free_host_timer();
12400 
12401         if (timer_index < 0) {
12402             ret = -TARGET_EAGAIN;
12403         } else {
12404             timer_t *phtimer = g_posix_timers  + timer_index;
12405 
12406             if (arg2) {
12407                 phost_sevp = &host_sevp;
12408                 ret = target_to_host_sigevent(phost_sevp, arg2);
12409                 if (ret != 0) {
12410                     return ret;
12411                 }
12412             }
12413 
12414             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12415             if (ret) {
12416                 phtimer = NULL;
12417             } else {
12418                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12419                     return -TARGET_EFAULT;
12420                 }
12421             }
12422         }
12423         return ret;
12424     }
12425 #endif
12426 
12427 #ifdef TARGET_NR_timer_settime
12428     case TARGET_NR_timer_settime:
12429     {
12430         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12431          * struct itimerspec * old_value */
12432         target_timer_t timerid = get_timer_id(arg1);
12433 
12434         if (timerid < 0) {
12435             ret = timerid;
12436         } else if (arg3 == 0) {
12437             ret = -TARGET_EINVAL;
12438         } else {
12439             timer_t htimer = g_posix_timers[timerid];
12440             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12441 
12442             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12443                 return -TARGET_EFAULT;
12444             }
12445             ret = get_errno(
12446                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12447             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12448                 return -TARGET_EFAULT;
12449             }
12450         }
12451         return ret;
12452     }
12453 #endif
12454 
12455 #ifdef TARGET_NR_timer_gettime
12456     case TARGET_NR_timer_gettime:
12457     {
12458         /* args: timer_t timerid, struct itimerspec *curr_value */
12459         target_timer_t timerid = get_timer_id(arg1);
12460 
12461         if (timerid < 0) {
12462             ret = timerid;
12463         } else if (!arg2) {
12464             ret = -TARGET_EFAULT;
12465         } else {
12466             timer_t htimer = g_posix_timers[timerid];
12467             struct itimerspec hspec;
12468             ret = get_errno(timer_gettime(htimer, &hspec));
12469 
12470             if (host_to_target_itimerspec(arg2, &hspec)) {
12471                 ret = -TARGET_EFAULT;
12472             }
12473         }
12474         return ret;
12475     }
12476 #endif
12477 
12478 #ifdef TARGET_NR_timer_getoverrun
12479     case TARGET_NR_timer_getoverrun:
12480     {
12481         /* args: timer_t timerid */
12482         target_timer_t timerid = get_timer_id(arg1);
12483 
12484         if (timerid < 0) {
12485             ret = timerid;
12486         } else {
12487             timer_t htimer = g_posix_timers[timerid];
12488             ret = get_errno(timer_getoverrun(htimer));
12489         }
12490         fd_trans_unregister(ret);
12491         return ret;
12492     }
12493 #endif
12494 
12495 #ifdef TARGET_NR_timer_delete
12496     case TARGET_NR_timer_delete:
12497     {
12498         /* args: timer_t timerid */
12499         target_timer_t timerid = get_timer_id(arg1);
12500 
12501         if (timerid < 0) {
12502             ret = timerid;
12503         } else {
12504             timer_t htimer = g_posix_timers[timerid];
12505             ret = get_errno(timer_delete(htimer));
12506             g_posix_timers[timerid] = 0;
12507         }
12508         return ret;
12509     }
12510 #endif
12511 
12512 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12513     case TARGET_NR_timerfd_create:
12514         return get_errno(timerfd_create(arg1,
12515                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12516 #endif
12517 
12518 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12519     case TARGET_NR_timerfd_gettime:
12520         {
12521             struct itimerspec its_curr;
12522 
12523             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12524 
12525             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12526                 return -TARGET_EFAULT;
12527             }
12528         }
12529         return ret;
12530 #endif
12531 
12532 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12533     case TARGET_NR_timerfd_settime:
12534         {
12535             struct itimerspec its_new, its_old, *p_new;
12536 
12537             if (arg3) {
12538                 if (target_to_host_itimerspec(&its_new, arg3)) {
12539                     return -TARGET_EFAULT;
12540                 }
12541                 p_new = &its_new;
12542             } else {
12543                 p_new = NULL;
12544             }
12545 
12546             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12547 
12548             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12549                 return -TARGET_EFAULT;
12550             }
12551         }
12552         return ret;
12553 #endif
12554 
12555 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12556     case TARGET_NR_ioprio_get:
12557         return get_errno(ioprio_get(arg1, arg2));
12558 #endif
12559 
12560 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12561     case TARGET_NR_ioprio_set:
12562         return get_errno(ioprio_set(arg1, arg2, arg3));
12563 #endif
12564 
12565 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12566     case TARGET_NR_setns:
12567         return get_errno(setns(arg1, arg2));
12568 #endif
12569 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12570     case TARGET_NR_unshare:
12571         return get_errno(unshare(arg1));
12572 #endif
12573 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12574     case TARGET_NR_kcmp:
12575         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12576 #endif
12577 #ifdef TARGET_NR_swapcontext
12578     case TARGET_NR_swapcontext:
12579         /* PowerPC specific.  */
12580         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12581 #endif
12582 
12583     default:
12584         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12585         return -TARGET_ENOSYS;
12586     }
12587     return ret;
12588 }
12589 
12590 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12591                     abi_long arg2, abi_long arg3, abi_long arg4,
12592                     abi_long arg5, abi_long arg6, abi_long arg7,
12593                     abi_long arg8)
12594 {
12595     CPUState *cpu = ENV_GET_CPU(cpu_env);
12596     abi_long ret;
12597 
12598 #ifdef DEBUG_ERESTARTSYS
12599     /* Debug-only code for exercising the syscall-restart code paths
12600      * in the per-architecture cpu main loops: restart every syscall
12601      * the guest makes once before letting it through.
12602      */
12603     {
12604         static bool flag;
12605         flag = !flag;
12606         if (flag) {
12607             return -TARGET_ERESTARTSYS;
12608         }
12609     }
12610 #endif
12611 
12612     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
12613                              arg5, arg6, arg7, arg8);
12614 
12615     if (unlikely(do_strace)) {
12616         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12617         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12618                           arg5, arg6, arg7, arg8);
12619         print_syscall_ret(num, ret);
12620     } else {
12621         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12622                           arg5, arg6, arg7, arg8);
12623     }
12624 
12625     trace_guest_user_syscall_ret(cpu, num, ret);
12626     return ret;
12627 }
12628