xref: /openbmc/qemu/linux-user/syscall.c (revision 5de154e82f4e507084f6c12b4ee300221ce078ce)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77 
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84 
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
112 
113 #include "qemu.h"
114 
115 #ifndef CLONE_IO
116 #define CLONE_IO                0x80000000      /* Clone io context */
117 #endif
118 
119 /* We can't directly call the host clone syscall, because this will
120  * badly confuse libc (breaking mutexes, for example). So we must
121  * divide clone flags into:
122  *  * flag combinations that look like pthread_create()
123  *  * flag combinations that look like fork()
124  *  * flags we can implement within QEMU itself
125  *  * flags we can't support and will return an error for
126  */
127 /* For thread creation, all these flags must be present; for
128  * fork, none must be present.
129  */
130 #define CLONE_THREAD_FLAGS                              \
131     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
132      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
133 
134 /* These flags are ignored:
135  * CLONE_DETACHED is now ignored by the kernel;
136  * CLONE_IO is just an optimisation hint to the I/O scheduler
137  */
138 #define CLONE_IGNORED_FLAGS                     \
139     (CLONE_DETACHED | CLONE_IO)
140 
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS               \
143     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
144      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
145 
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
148     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
149      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
150 
151 #define CLONE_INVALID_FORK_FLAGS                                        \
152     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
153 
154 #define CLONE_INVALID_THREAD_FLAGS                                      \
155     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
156        CLONE_IGNORED_FLAGS))
157 
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159  * have almost all been allocated. We cannot support any of
160  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162  * The checks against the invalid thread masks above will catch these.
163  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
164  */
165 
166 //#define DEBUG
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168  * once. This exercises the codepaths for restart.
169  */
170 //#define DEBUG_ERESTARTSYS
171 
172 //#include <linux/msdos_fs.h>
173 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
174 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
175 
176 #undef _syscall0
177 #undef _syscall1
178 #undef _syscall2
179 #undef _syscall3
180 #undef _syscall4
181 #undef _syscall5
182 #undef _syscall6
183 
184 #define _syscall0(type,name)		\
185 static type name (void)			\
186 {					\
187 	return syscall(__NR_##name);	\
188 }
189 
190 #define _syscall1(type,name,type1,arg1)		\
191 static type name (type1 arg1)			\
192 {						\
193 	return syscall(__NR_##name, arg1);	\
194 }
195 
196 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
197 static type name (type1 arg1,type2 arg2)		\
198 {							\
199 	return syscall(__NR_##name, arg1, arg2);	\
200 }
201 
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
203 static type name (type1 arg1,type2 arg2,type3 arg3)		\
204 {								\
205 	return syscall(__NR_##name, arg1, arg2, arg3);		\
206 }
207 
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
210 {										\
211 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
212 }
213 
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
215 		  type5,arg5)							\
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
217 {										\
218 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
219 }
220 
221 
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
223 		  type5,arg5,type6,arg6)					\
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
225                   type6 arg6)							\
226 {										\
227 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
228 }
229 
230 
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
243 
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
246 #endif
247 
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
251 #endif
252 
253 #ifdef __NR_gettid
254 _syscall0(int, gettid)
255 #else
256 /* This is a replacement for the host gettid() and must return a host
257    errno. */
258 static int gettid(void) {
259     return -ENOSYS;
260 }
261 #endif
262 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
263 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
264 #endif
265 #if !defined(__NR_getdents) || \
266     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
267 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
268 #endif
269 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
270 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
271           loff_t *, res, uint, wh);
272 #endif
273 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
274 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
275           siginfo_t *, uinfo)
276 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
277 #ifdef __NR_exit_group
278 _syscall1(int,exit_group,int,error_code)
279 #endif
280 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
281 _syscall1(int,set_tid_address,int *,tidptr)
282 #endif
283 #if defined(TARGET_NR_futex) && defined(__NR_futex)
284 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
285           const struct timespec *,timeout,int *,uaddr2,int,val3)
286 #endif
287 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
288 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
289           unsigned long *, user_mask_ptr);
290 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
291 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
292           unsigned long *, user_mask_ptr);
293 #define __NR_sys_getcpu __NR_getcpu
294 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
295 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
296           void *, arg);
297 _syscall2(int, capget, struct __user_cap_header_struct *, header,
298           struct __user_cap_data_struct *, data);
299 _syscall2(int, capset, struct __user_cap_header_struct *, header,
300           struct __user_cap_data_struct *, data);
301 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
302 _syscall2(int, ioprio_get, int, which, int, who)
303 #endif
304 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
305 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
306 #endif
307 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
308 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
309 #endif
310 
311 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
312 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
313           unsigned long, idx1, unsigned long, idx2)
314 #endif
315 
316 static bitmask_transtbl fcntl_flags_tbl[] = {
317   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
318   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
319   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
320   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
321   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
322   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
323   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
324   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
325   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
326   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
327   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
328   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
329   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
330 #if defined(O_DIRECT)
331   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
332 #endif
333 #if defined(O_NOATIME)
334   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
335 #endif
336 #if defined(O_CLOEXEC)
337   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
338 #endif
339 #if defined(O_PATH)
340   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
341 #endif
342 #if defined(O_TMPFILE)
343   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
344 #endif
345   /* Don't terminate the list prematurely on 64-bit host+guest.  */
346 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
347   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
348 #endif
349   { 0, 0, 0, 0 }
350 };
351 
352 enum {
353     QEMU_IFLA_BR_UNSPEC,
354     QEMU_IFLA_BR_FORWARD_DELAY,
355     QEMU_IFLA_BR_HELLO_TIME,
356     QEMU_IFLA_BR_MAX_AGE,
357     QEMU_IFLA_BR_AGEING_TIME,
358     QEMU_IFLA_BR_STP_STATE,
359     QEMU_IFLA_BR_PRIORITY,
360     QEMU_IFLA_BR_VLAN_FILTERING,
361     QEMU_IFLA_BR_VLAN_PROTOCOL,
362     QEMU_IFLA_BR_GROUP_FWD_MASK,
363     QEMU_IFLA_BR_ROOT_ID,
364     QEMU_IFLA_BR_BRIDGE_ID,
365     QEMU_IFLA_BR_ROOT_PORT,
366     QEMU_IFLA_BR_ROOT_PATH_COST,
367     QEMU_IFLA_BR_TOPOLOGY_CHANGE,
368     QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
369     QEMU_IFLA_BR_HELLO_TIMER,
370     QEMU_IFLA_BR_TCN_TIMER,
371     QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
372     QEMU_IFLA_BR_GC_TIMER,
373     QEMU_IFLA_BR_GROUP_ADDR,
374     QEMU_IFLA_BR_FDB_FLUSH,
375     QEMU_IFLA_BR_MCAST_ROUTER,
376     QEMU_IFLA_BR_MCAST_SNOOPING,
377     QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
378     QEMU_IFLA_BR_MCAST_QUERIER,
379     QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
380     QEMU_IFLA_BR_MCAST_HASH_MAX,
381     QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
382     QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
383     QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
384     QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
385     QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
386     QEMU_IFLA_BR_MCAST_QUERY_INTVL,
387     QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
388     QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
389     QEMU_IFLA_BR_NF_CALL_IPTABLES,
390     QEMU_IFLA_BR_NF_CALL_IP6TABLES,
391     QEMU_IFLA_BR_NF_CALL_ARPTABLES,
392     QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
393     QEMU_IFLA_BR_PAD,
394     QEMU_IFLA_BR_VLAN_STATS_ENABLED,
395     QEMU_IFLA_BR_MCAST_STATS_ENABLED,
396     QEMU___IFLA_BR_MAX,
397 };
398 
399 enum {
400     QEMU_IFLA_UNSPEC,
401     QEMU_IFLA_ADDRESS,
402     QEMU_IFLA_BROADCAST,
403     QEMU_IFLA_IFNAME,
404     QEMU_IFLA_MTU,
405     QEMU_IFLA_LINK,
406     QEMU_IFLA_QDISC,
407     QEMU_IFLA_STATS,
408     QEMU_IFLA_COST,
409     QEMU_IFLA_PRIORITY,
410     QEMU_IFLA_MASTER,
411     QEMU_IFLA_WIRELESS,
412     QEMU_IFLA_PROTINFO,
413     QEMU_IFLA_TXQLEN,
414     QEMU_IFLA_MAP,
415     QEMU_IFLA_WEIGHT,
416     QEMU_IFLA_OPERSTATE,
417     QEMU_IFLA_LINKMODE,
418     QEMU_IFLA_LINKINFO,
419     QEMU_IFLA_NET_NS_PID,
420     QEMU_IFLA_IFALIAS,
421     QEMU_IFLA_NUM_VF,
422     QEMU_IFLA_VFINFO_LIST,
423     QEMU_IFLA_STATS64,
424     QEMU_IFLA_VF_PORTS,
425     QEMU_IFLA_PORT_SELF,
426     QEMU_IFLA_AF_SPEC,
427     QEMU_IFLA_GROUP,
428     QEMU_IFLA_NET_NS_FD,
429     QEMU_IFLA_EXT_MASK,
430     QEMU_IFLA_PROMISCUITY,
431     QEMU_IFLA_NUM_TX_QUEUES,
432     QEMU_IFLA_NUM_RX_QUEUES,
433     QEMU_IFLA_CARRIER,
434     QEMU_IFLA_PHYS_PORT_ID,
435     QEMU_IFLA_CARRIER_CHANGES,
436     QEMU_IFLA_PHYS_SWITCH_ID,
437     QEMU_IFLA_LINK_NETNSID,
438     QEMU_IFLA_PHYS_PORT_NAME,
439     QEMU_IFLA_PROTO_DOWN,
440     QEMU_IFLA_GSO_MAX_SEGS,
441     QEMU_IFLA_GSO_MAX_SIZE,
442     QEMU_IFLA_PAD,
443     QEMU_IFLA_XDP,
444     QEMU___IFLA_MAX
445 };
446 
447 enum {
448     QEMU_IFLA_BRPORT_UNSPEC,
449     QEMU_IFLA_BRPORT_STATE,
450     QEMU_IFLA_BRPORT_PRIORITY,
451     QEMU_IFLA_BRPORT_COST,
452     QEMU_IFLA_BRPORT_MODE,
453     QEMU_IFLA_BRPORT_GUARD,
454     QEMU_IFLA_BRPORT_PROTECT,
455     QEMU_IFLA_BRPORT_FAST_LEAVE,
456     QEMU_IFLA_BRPORT_LEARNING,
457     QEMU_IFLA_BRPORT_UNICAST_FLOOD,
458     QEMU_IFLA_BRPORT_PROXYARP,
459     QEMU_IFLA_BRPORT_LEARNING_SYNC,
460     QEMU_IFLA_BRPORT_PROXYARP_WIFI,
461     QEMU_IFLA_BRPORT_ROOT_ID,
462     QEMU_IFLA_BRPORT_BRIDGE_ID,
463     QEMU_IFLA_BRPORT_DESIGNATED_PORT,
464     QEMU_IFLA_BRPORT_DESIGNATED_COST,
465     QEMU_IFLA_BRPORT_ID,
466     QEMU_IFLA_BRPORT_NO,
467     QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
468     QEMU_IFLA_BRPORT_CONFIG_PENDING,
469     QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
470     QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
471     QEMU_IFLA_BRPORT_HOLD_TIMER,
472     QEMU_IFLA_BRPORT_FLUSH,
473     QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
474     QEMU_IFLA_BRPORT_PAD,
475     QEMU___IFLA_BRPORT_MAX
476 };
477 
478 enum {
479     QEMU_IFLA_INFO_UNSPEC,
480     QEMU_IFLA_INFO_KIND,
481     QEMU_IFLA_INFO_DATA,
482     QEMU_IFLA_INFO_XSTATS,
483     QEMU_IFLA_INFO_SLAVE_KIND,
484     QEMU_IFLA_INFO_SLAVE_DATA,
485     QEMU___IFLA_INFO_MAX,
486 };
487 
488 enum {
489     QEMU_IFLA_INET_UNSPEC,
490     QEMU_IFLA_INET_CONF,
491     QEMU___IFLA_INET_MAX,
492 };
493 
494 enum {
495     QEMU_IFLA_INET6_UNSPEC,
496     QEMU_IFLA_INET6_FLAGS,
497     QEMU_IFLA_INET6_CONF,
498     QEMU_IFLA_INET6_STATS,
499     QEMU_IFLA_INET6_MCAST,
500     QEMU_IFLA_INET6_CACHEINFO,
501     QEMU_IFLA_INET6_ICMP6STATS,
502     QEMU_IFLA_INET6_TOKEN,
503     QEMU_IFLA_INET6_ADDR_GEN_MODE,
504     QEMU___IFLA_INET6_MAX
505 };
506 
507 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
508 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
509 typedef struct TargetFdTrans {
510     TargetFdDataFunc host_to_target_data;
511     TargetFdDataFunc target_to_host_data;
512     TargetFdAddrFunc target_to_host_addr;
513 } TargetFdTrans;
514 
515 static TargetFdTrans **target_fd_trans;
516 
517 static unsigned int target_fd_max;
518 
519 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
520 {
521     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
522         return target_fd_trans[fd]->target_to_host_data;
523     }
524     return NULL;
525 }
526 
527 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
528 {
529     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
530         return target_fd_trans[fd]->host_to_target_data;
531     }
532     return NULL;
533 }
534 
535 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
536 {
537     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
538         return target_fd_trans[fd]->target_to_host_addr;
539     }
540     return NULL;
541 }
542 
543 static void fd_trans_register(int fd, TargetFdTrans *trans)
544 {
545     unsigned int oldmax;
546 
547     if (fd >= target_fd_max) {
548         oldmax = target_fd_max;
549         target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
550         target_fd_trans = g_renew(TargetFdTrans *,
551                                   target_fd_trans, target_fd_max);
552         memset((void *)(target_fd_trans + oldmax), 0,
553                (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
554     }
555     target_fd_trans[fd] = trans;
556 }
557 
558 static void fd_trans_unregister(int fd)
559 {
560     if (fd >= 0 && fd < target_fd_max) {
561         target_fd_trans[fd] = NULL;
562     }
563 }
564 
565 static void fd_trans_dup(int oldfd, int newfd)
566 {
567     fd_trans_unregister(newfd);
568     if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
569         fd_trans_register(newfd, target_fd_trans[oldfd]);
570     }
571 }
572 
573 static int sys_getcwd1(char *buf, size_t size)
574 {
575   if (getcwd(buf, size) == NULL) {
576       /* getcwd() sets errno */
577       return (-1);
578   }
579   return strlen(buf)+1;
580 }
581 
582 #ifdef TARGET_NR_utimensat
583 #if defined(__NR_utimensat)
584 #define __NR_sys_utimensat __NR_utimensat
585 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
586           const struct timespec *,tsp,int,flags)
587 #else
588 static int sys_utimensat(int dirfd, const char *pathname,
589                          const struct timespec times[2], int flags)
590 {
591     errno = ENOSYS;
592     return -1;
593 }
594 #endif
595 #endif /* TARGET_NR_utimensat */
596 
597 #ifdef TARGET_NR_renameat2
598 #if defined(__NR_renameat2)
599 #define __NR_sys_renameat2 __NR_renameat2
600 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
601           const char *, new, unsigned int, flags)
602 #else
603 static int sys_renameat2(int oldfd, const char *old,
604                          int newfd, const char *new, int flags)
605 {
606     if (flags == 0) {
607         return renameat(oldfd, old, newfd, new);
608     }
609     errno = ENOSYS;
610     return -1;
611 }
612 #endif
613 #endif /* TARGET_NR_renameat2 */
614 
615 #ifdef CONFIG_INOTIFY
616 #include <sys/inotify.h>
617 
618 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
619 static int sys_inotify_init(void)
620 {
621   return (inotify_init());
622 }
623 #endif
624 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
625 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
626 {
627   return (inotify_add_watch(fd, pathname, mask));
628 }
629 #endif
630 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
631 static int sys_inotify_rm_watch(int fd, int32_t wd)
632 {
633   return (inotify_rm_watch(fd, wd));
634 }
635 #endif
636 #ifdef CONFIG_INOTIFY1
637 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
638 static int sys_inotify_init1(int flags)
639 {
640   return (inotify_init1(flags));
641 }
642 #endif
643 #endif
644 #else
645 /* Userspace can usually survive runtime without inotify */
646 #undef TARGET_NR_inotify_init
647 #undef TARGET_NR_inotify_init1
648 #undef TARGET_NR_inotify_add_watch
649 #undef TARGET_NR_inotify_rm_watch
650 #endif /* CONFIG_INOTIFY  */
651 
652 #if defined(TARGET_NR_prlimit64)
653 #ifndef __NR_prlimit64
654 # define __NR_prlimit64 -1
655 #endif
656 #define __NR_sys_prlimit64 __NR_prlimit64
657 /* The glibc rlimit structure may not be that used by the underlying syscall */
658 struct host_rlimit64 {
659     uint64_t rlim_cur;
660     uint64_t rlim_max;
661 };
662 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
663           const struct host_rlimit64 *, new_limit,
664           struct host_rlimit64 *, old_limit)
665 #endif
666 
667 
668 #if defined(TARGET_NR_timer_create)
669 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
670 static timer_t g_posix_timers[32] = { 0, } ;
671 
672 static inline int next_free_host_timer(void)
673 {
674     int k ;
675     /* FIXME: Does finding the next free slot require a lock? */
676     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
677         if (g_posix_timers[k] == 0) {
678             g_posix_timers[k] = (timer_t) 1;
679             return k;
680         }
681     }
682     return -1;
683 }
684 #endif
685 
686 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
687 #ifdef TARGET_ARM
688 static inline int regpairs_aligned(void *cpu_env, int num)
689 {
690     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
691 }
692 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
693 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
694 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
695 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
696  * of registers which translates to the same as ARM/MIPS, because we start with
697  * r3 as arg1 */
698 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
699 #elif defined(TARGET_SH4)
700 /* SH4 doesn't align register pairs, except for p{read,write}64 */
701 static inline int regpairs_aligned(void *cpu_env, int num)
702 {
703     switch (num) {
704     case TARGET_NR_pread64:
705     case TARGET_NR_pwrite64:
706         return 1;
707 
708     default:
709         return 0;
710     }
711 }
712 #elif defined(TARGET_XTENSA)
713 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
714 #else
715 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
716 #endif
717 
718 #define ERRNO_TABLE_SIZE 1200
719 
720 /* target_to_host_errno_table[] is initialized from
721  * host_to_target_errno_table[] in syscall_init(). */
722 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
723 };
724 
725 /*
726  * This list is the union of errno values overridden in asm-<arch>/errno.h
727  * minus the errnos that are not actually generic to all archs.
728  */
729 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
730     [EAGAIN]		= TARGET_EAGAIN,
731     [EIDRM]		= TARGET_EIDRM,
732     [ECHRNG]		= TARGET_ECHRNG,
733     [EL2NSYNC]		= TARGET_EL2NSYNC,
734     [EL3HLT]		= TARGET_EL3HLT,
735     [EL3RST]		= TARGET_EL3RST,
736     [ELNRNG]		= TARGET_ELNRNG,
737     [EUNATCH]		= TARGET_EUNATCH,
738     [ENOCSI]		= TARGET_ENOCSI,
739     [EL2HLT]		= TARGET_EL2HLT,
740     [EDEADLK]		= TARGET_EDEADLK,
741     [ENOLCK]		= TARGET_ENOLCK,
742     [EBADE]		= TARGET_EBADE,
743     [EBADR]		= TARGET_EBADR,
744     [EXFULL]		= TARGET_EXFULL,
745     [ENOANO]		= TARGET_ENOANO,
746     [EBADRQC]		= TARGET_EBADRQC,
747     [EBADSLT]		= TARGET_EBADSLT,
748     [EBFONT]		= TARGET_EBFONT,
749     [ENOSTR]		= TARGET_ENOSTR,
750     [ENODATA]		= TARGET_ENODATA,
751     [ETIME]		= TARGET_ETIME,
752     [ENOSR]		= TARGET_ENOSR,
753     [ENONET]		= TARGET_ENONET,
754     [ENOPKG]		= TARGET_ENOPKG,
755     [EREMOTE]		= TARGET_EREMOTE,
756     [ENOLINK]		= TARGET_ENOLINK,
757     [EADV]		= TARGET_EADV,
758     [ESRMNT]		= TARGET_ESRMNT,
759     [ECOMM]		= TARGET_ECOMM,
760     [EPROTO]		= TARGET_EPROTO,
761     [EDOTDOT]		= TARGET_EDOTDOT,
762     [EMULTIHOP]		= TARGET_EMULTIHOP,
763     [EBADMSG]		= TARGET_EBADMSG,
764     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
765     [EOVERFLOW]		= TARGET_EOVERFLOW,
766     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
767     [EBADFD]		= TARGET_EBADFD,
768     [EREMCHG]		= TARGET_EREMCHG,
769     [ELIBACC]		= TARGET_ELIBACC,
770     [ELIBBAD]		= TARGET_ELIBBAD,
771     [ELIBSCN]		= TARGET_ELIBSCN,
772     [ELIBMAX]		= TARGET_ELIBMAX,
773     [ELIBEXEC]		= TARGET_ELIBEXEC,
774     [EILSEQ]		= TARGET_EILSEQ,
775     [ENOSYS]		= TARGET_ENOSYS,
776     [ELOOP]		= TARGET_ELOOP,
777     [ERESTART]		= TARGET_ERESTART,
778     [ESTRPIPE]		= TARGET_ESTRPIPE,
779     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
780     [EUSERS]		= TARGET_EUSERS,
781     [ENOTSOCK]		= TARGET_ENOTSOCK,
782     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
783     [EMSGSIZE]		= TARGET_EMSGSIZE,
784     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
785     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
786     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
787     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
788     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
789     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
790     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
791     [EADDRINUSE]	= TARGET_EADDRINUSE,
792     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
793     [ENETDOWN]		= TARGET_ENETDOWN,
794     [ENETUNREACH]	= TARGET_ENETUNREACH,
795     [ENETRESET]		= TARGET_ENETRESET,
796     [ECONNABORTED]	= TARGET_ECONNABORTED,
797     [ECONNRESET]	= TARGET_ECONNRESET,
798     [ENOBUFS]		= TARGET_ENOBUFS,
799     [EISCONN]		= TARGET_EISCONN,
800     [ENOTCONN]		= TARGET_ENOTCONN,
801     [EUCLEAN]		= TARGET_EUCLEAN,
802     [ENOTNAM]		= TARGET_ENOTNAM,
803     [ENAVAIL]		= TARGET_ENAVAIL,
804     [EISNAM]		= TARGET_EISNAM,
805     [EREMOTEIO]		= TARGET_EREMOTEIO,
806     [EDQUOT]            = TARGET_EDQUOT,
807     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
808     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
809     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
810     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
811     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
812     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
813     [EALREADY]		= TARGET_EALREADY,
814     [EINPROGRESS]	= TARGET_EINPROGRESS,
815     [ESTALE]		= TARGET_ESTALE,
816     [ECANCELED]		= TARGET_ECANCELED,
817     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
818     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
819 #ifdef ENOKEY
820     [ENOKEY]		= TARGET_ENOKEY,
821 #endif
822 #ifdef EKEYEXPIRED
823     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
824 #endif
825 #ifdef EKEYREVOKED
826     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
827 #endif
828 #ifdef EKEYREJECTED
829     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
830 #endif
831 #ifdef EOWNERDEAD
832     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
833 #endif
834 #ifdef ENOTRECOVERABLE
835     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
836 #endif
837 #ifdef ENOMSG
838     [ENOMSG]            = TARGET_ENOMSG,
839 #endif
840 #ifdef ERKFILL
841     [ERFKILL]           = TARGET_ERFKILL,
842 #endif
843 #ifdef EHWPOISON
844     [EHWPOISON]         = TARGET_EHWPOISON,
845 #endif
846 };
847 
848 static inline int host_to_target_errno(int err)
849 {
850     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
851         host_to_target_errno_table[err]) {
852         return host_to_target_errno_table[err];
853     }
854     return err;
855 }
856 
857 static inline int target_to_host_errno(int err)
858 {
859     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
860         target_to_host_errno_table[err]) {
861         return target_to_host_errno_table[err];
862     }
863     return err;
864 }
865 
866 static inline abi_long get_errno(abi_long ret)
867 {
868     if (ret == -1)
869         return -host_to_target_errno(errno);
870     else
871         return ret;
872 }
873 
874 static inline int is_error(abi_long ret)
875 {
876     return (abi_ulong)ret >= (abi_ulong)(-4096);
877 }
878 
879 const char *target_strerror(int err)
880 {
881     if (err == TARGET_ERESTARTSYS) {
882         return "To be restarted";
883     }
884     if (err == TARGET_QEMU_ESIGRETURN) {
885         return "Successful exit from sigreturn";
886     }
887 
888     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
889         return NULL;
890     }
891     return strerror(target_to_host_errno(err));
892 }
893 
894 #define safe_syscall0(type, name) \
895 static type safe_##name(void) \
896 { \
897     return safe_syscall(__NR_##name); \
898 }
899 
900 #define safe_syscall1(type, name, type1, arg1) \
901 static type safe_##name(type1 arg1) \
902 { \
903     return safe_syscall(__NR_##name, arg1); \
904 }
905 
906 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
907 static type safe_##name(type1 arg1, type2 arg2) \
908 { \
909     return safe_syscall(__NR_##name, arg1, arg2); \
910 }
911 
912 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
913 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
914 { \
915     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
916 }
917 
918 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
919     type4, arg4) \
920 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
921 { \
922     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
923 }
924 
925 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
926     type4, arg4, type5, arg5) \
927 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
928     type5 arg5) \
929 { \
930     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
931 }
932 
933 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
934     type4, arg4, type5, arg5, type6, arg6) \
935 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
936     type5 arg5, type6 arg6) \
937 { \
938     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
939 }
940 
941 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
942 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
943 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
944               int, flags, mode_t, mode)
945 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
946               struct rusage *, rusage)
947 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
948               int, options, struct rusage *, rusage)
949 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
950 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
951               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
952 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
953               struct timespec *, tsp, const sigset_t *, sigmask,
954               size_t, sigsetsize)
955 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
956               int, maxevents, int, timeout, const sigset_t *, sigmask,
957               size_t, sigsetsize)
958 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
959               const struct timespec *,timeout,int *,uaddr2,int,val3)
960 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
961 safe_syscall2(int, kill, pid_t, pid, int, sig)
962 safe_syscall2(int, tkill, int, tid, int, sig)
963 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
964 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
965 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
966 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
967               unsigned long, pos_l, unsigned long, pos_h)
968 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
969               unsigned long, pos_l, unsigned long, pos_h)
970 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
971               socklen_t, addrlen)
972 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
973               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
974 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
975               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
976 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
977 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
978 safe_syscall2(int, flock, int, fd, int, operation)
979 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
980               const struct timespec *, uts, size_t, sigsetsize)
981 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
982               int, flags)
983 safe_syscall2(int, nanosleep, const struct timespec *, req,
984               struct timespec *, rem)
985 #ifdef TARGET_NR_clock_nanosleep
986 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
987               const struct timespec *, req, struct timespec *, rem)
988 #endif
989 #ifdef __NR_msgsnd
990 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
991               int, flags)
992 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
993               long, msgtype, int, flags)
994 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
995               unsigned, nsops, const struct timespec *, timeout)
996 #else
997 /* This host kernel architecture uses a single ipc syscall; fake up
998  * wrappers for the sub-operations to hide this implementation detail.
999  * Annoyingly we can't include linux/ipc.h to get the constant definitions
1000  * for the call parameter because some structs in there conflict with the
1001  * sys/ipc.h ones. So we just define them here, and rely on them being
1002  * the same for all host architectures.
1003  */
1004 #define Q_SEMTIMEDOP 4
1005 #define Q_MSGSND 11
1006 #define Q_MSGRCV 12
1007 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1008 
1009 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1010               void *, ptr, long, fifth)
1011 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1012 {
1013     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1014 }
1015 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1016 {
1017     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1018 }
1019 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1020                            const struct timespec *timeout)
1021 {
1022     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1023                     (long)timeout);
1024 }
1025 #endif
1026 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1027 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1028               size_t, len, unsigned, prio, const struct timespec *, timeout)
1029 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1030               size_t, len, unsigned *, prio, const struct timespec *, timeout)
1031 #endif
1032 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1033  * "third argument might be integer or pointer or not present" behaviour of
1034  * the libc function.
1035  */
1036 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1037 /* Similarly for fcntl. Note that callers must always:
1038  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1039  *  use the flock64 struct rather than unsuffixed flock
1040  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1041  */
1042 #ifdef __NR_fcntl64
1043 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1044 #else
1045 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1046 #endif
1047 
1048 static inline int host_to_target_sock_type(int host_type)
1049 {
1050     int target_type;
1051 
1052     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1053     case SOCK_DGRAM:
1054         target_type = TARGET_SOCK_DGRAM;
1055         break;
1056     case SOCK_STREAM:
1057         target_type = TARGET_SOCK_STREAM;
1058         break;
1059     default:
1060         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1061         break;
1062     }
1063 
1064 #if defined(SOCK_CLOEXEC)
1065     if (host_type & SOCK_CLOEXEC) {
1066         target_type |= TARGET_SOCK_CLOEXEC;
1067     }
1068 #endif
1069 
1070 #if defined(SOCK_NONBLOCK)
1071     if (host_type & SOCK_NONBLOCK) {
1072         target_type |= TARGET_SOCK_NONBLOCK;
1073     }
1074 #endif
1075 
1076     return target_type;
1077 }
1078 
1079 static abi_ulong target_brk;
1080 static abi_ulong target_original_brk;
1081 static abi_ulong brk_page;
1082 
1083 void target_set_brk(abi_ulong new_brk)
1084 {
1085     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1086     brk_page = HOST_PAGE_ALIGN(target_brk);
1087 }
1088 
1089 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1090 #define DEBUGF_BRK(message, args...)
1091 
1092 /* do_brk() must return target values and target errnos. */
1093 abi_long do_brk(abi_ulong new_brk)
1094 {
1095     abi_long mapped_addr;
1096     abi_ulong new_alloc_size;
1097 
1098     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1099 
1100     if (!new_brk) {
1101         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1102         return target_brk;
1103     }
1104     if (new_brk < target_original_brk) {
1105         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1106                    target_brk);
1107         return target_brk;
1108     }
1109 
1110     /* If the new brk is less than the highest page reserved to the
1111      * target heap allocation, set it and we're almost done...  */
1112     if (new_brk <= brk_page) {
1113         /* Heap contents are initialized to zero, as for anonymous
1114          * mapped pages.  */
1115         if (new_brk > target_brk) {
1116             memset(g2h(target_brk), 0, new_brk - target_brk);
1117         }
1118 	target_brk = new_brk;
1119         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1120     	return target_brk;
1121     }
1122 
1123     /* We need to allocate more memory after the brk... Note that
1124      * we don't use MAP_FIXED because that will map over the top of
1125      * any existing mapping (like the one with the host libc or qemu
1126      * itself); instead we treat "mapped but at wrong address" as
1127      * a failure and unmap again.
1128      */
1129     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1130     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1131                                         PROT_READ|PROT_WRITE,
1132                                         MAP_ANON|MAP_PRIVATE, 0, 0));
1133 
1134     if (mapped_addr == brk_page) {
1135         /* Heap contents are initialized to zero, as for anonymous
1136          * mapped pages.  Technically the new pages are already
1137          * initialized to zero since they *are* anonymous mapped
1138          * pages, however we have to take care with the contents that
1139          * come from the remaining part of the previous page: it may
1140          * contains garbage data due to a previous heap usage (grown
1141          * then shrunken).  */
1142         memset(g2h(target_brk), 0, brk_page - target_brk);
1143 
1144         target_brk = new_brk;
1145         brk_page = HOST_PAGE_ALIGN(target_brk);
1146         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1147             target_brk);
1148         return target_brk;
1149     } else if (mapped_addr != -1) {
1150         /* Mapped but at wrong address, meaning there wasn't actually
1151          * enough space for this brk.
1152          */
1153         target_munmap(mapped_addr, new_alloc_size);
1154         mapped_addr = -1;
1155         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1156     }
1157     else {
1158         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1159     }
1160 
1161 #if defined(TARGET_ALPHA)
1162     /* We (partially) emulate OSF/1 on Alpha, which requires we
1163        return a proper errno, not an unchanged brk value.  */
1164     return -TARGET_ENOMEM;
1165 #endif
1166     /* For everything else, return the previous break. */
1167     return target_brk;
1168 }
1169 
1170 static inline abi_long copy_from_user_fdset(fd_set *fds,
1171                                             abi_ulong target_fds_addr,
1172                                             int n)
1173 {
1174     int i, nw, j, k;
1175     abi_ulong b, *target_fds;
1176 
1177     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1178     if (!(target_fds = lock_user(VERIFY_READ,
1179                                  target_fds_addr,
1180                                  sizeof(abi_ulong) * nw,
1181                                  1)))
1182         return -TARGET_EFAULT;
1183 
1184     FD_ZERO(fds);
1185     k = 0;
1186     for (i = 0; i < nw; i++) {
1187         /* grab the abi_ulong */
1188         __get_user(b, &target_fds[i]);
1189         for (j = 0; j < TARGET_ABI_BITS; j++) {
1190             /* check the bit inside the abi_ulong */
1191             if ((b >> j) & 1)
1192                 FD_SET(k, fds);
1193             k++;
1194         }
1195     }
1196 
1197     unlock_user(target_fds, target_fds_addr, 0);
1198 
1199     return 0;
1200 }
1201 
1202 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1203                                                  abi_ulong target_fds_addr,
1204                                                  int n)
1205 {
1206     if (target_fds_addr) {
1207         if (copy_from_user_fdset(fds, target_fds_addr, n))
1208             return -TARGET_EFAULT;
1209         *fds_ptr = fds;
1210     } else {
1211         *fds_ptr = NULL;
1212     }
1213     return 0;
1214 }
1215 
1216 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1217                                           const fd_set *fds,
1218                                           int n)
1219 {
1220     int i, nw, j, k;
1221     abi_long v;
1222     abi_ulong *target_fds;
1223 
1224     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1225     if (!(target_fds = lock_user(VERIFY_WRITE,
1226                                  target_fds_addr,
1227                                  sizeof(abi_ulong) * nw,
1228                                  0)))
1229         return -TARGET_EFAULT;
1230 
1231     k = 0;
1232     for (i = 0; i < nw; i++) {
1233         v = 0;
1234         for (j = 0; j < TARGET_ABI_BITS; j++) {
1235             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1236             k++;
1237         }
1238         __put_user(v, &target_fds[i]);
1239     }
1240 
1241     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1242 
1243     return 0;
1244 }
1245 
1246 #if defined(__alpha__)
1247 #define HOST_HZ 1024
1248 #else
1249 #define HOST_HZ 100
1250 #endif
1251 
1252 static inline abi_long host_to_target_clock_t(long ticks)
1253 {
1254 #if HOST_HZ == TARGET_HZ
1255     return ticks;
1256 #else
1257     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1258 #endif
1259 }
1260 
1261 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1262                                              const struct rusage *rusage)
1263 {
1264     struct target_rusage *target_rusage;
1265 
1266     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1267         return -TARGET_EFAULT;
1268     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1269     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1270     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1271     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1272     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1273     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1274     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1275     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1276     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1277     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1278     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1279     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1280     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1281     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1282     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1283     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1284     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1285     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1286     unlock_user_struct(target_rusage, target_addr, 1);
1287 
1288     return 0;
1289 }
1290 
1291 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1292 {
1293     abi_ulong target_rlim_swap;
1294     rlim_t result;
1295 
1296     target_rlim_swap = tswapal(target_rlim);
1297     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1298         return RLIM_INFINITY;
1299 
1300     result = target_rlim_swap;
1301     if (target_rlim_swap != (rlim_t)result)
1302         return RLIM_INFINITY;
1303 
1304     return result;
1305 }
1306 
1307 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1308 {
1309     abi_ulong target_rlim_swap;
1310     abi_ulong result;
1311 
1312     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1313         target_rlim_swap = TARGET_RLIM_INFINITY;
1314     else
1315         target_rlim_swap = rlim;
1316     result = tswapal(target_rlim_swap);
1317 
1318     return result;
1319 }
1320 
1321 static inline int target_to_host_resource(int code)
1322 {
1323     switch (code) {
1324     case TARGET_RLIMIT_AS:
1325         return RLIMIT_AS;
1326     case TARGET_RLIMIT_CORE:
1327         return RLIMIT_CORE;
1328     case TARGET_RLIMIT_CPU:
1329         return RLIMIT_CPU;
1330     case TARGET_RLIMIT_DATA:
1331         return RLIMIT_DATA;
1332     case TARGET_RLIMIT_FSIZE:
1333         return RLIMIT_FSIZE;
1334     case TARGET_RLIMIT_LOCKS:
1335         return RLIMIT_LOCKS;
1336     case TARGET_RLIMIT_MEMLOCK:
1337         return RLIMIT_MEMLOCK;
1338     case TARGET_RLIMIT_MSGQUEUE:
1339         return RLIMIT_MSGQUEUE;
1340     case TARGET_RLIMIT_NICE:
1341         return RLIMIT_NICE;
1342     case TARGET_RLIMIT_NOFILE:
1343         return RLIMIT_NOFILE;
1344     case TARGET_RLIMIT_NPROC:
1345         return RLIMIT_NPROC;
1346     case TARGET_RLIMIT_RSS:
1347         return RLIMIT_RSS;
1348     case TARGET_RLIMIT_RTPRIO:
1349         return RLIMIT_RTPRIO;
1350     case TARGET_RLIMIT_SIGPENDING:
1351         return RLIMIT_SIGPENDING;
1352     case TARGET_RLIMIT_STACK:
1353         return RLIMIT_STACK;
1354     default:
1355         return code;
1356     }
1357 }
1358 
1359 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1360                                               abi_ulong target_tv_addr)
1361 {
1362     struct target_timeval *target_tv;
1363 
1364     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1365         return -TARGET_EFAULT;
1366 
1367     __get_user(tv->tv_sec, &target_tv->tv_sec);
1368     __get_user(tv->tv_usec, &target_tv->tv_usec);
1369 
1370     unlock_user_struct(target_tv, target_tv_addr, 0);
1371 
1372     return 0;
1373 }
1374 
1375 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1376                                             const struct timeval *tv)
1377 {
1378     struct target_timeval *target_tv;
1379 
1380     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1381         return -TARGET_EFAULT;
1382 
1383     __put_user(tv->tv_sec, &target_tv->tv_sec);
1384     __put_user(tv->tv_usec, &target_tv->tv_usec);
1385 
1386     unlock_user_struct(target_tv, target_tv_addr, 1);
1387 
1388     return 0;
1389 }
1390 
1391 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1392                                                abi_ulong target_tz_addr)
1393 {
1394     struct target_timezone *target_tz;
1395 
1396     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1397         return -TARGET_EFAULT;
1398     }
1399 
1400     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1401     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1402 
1403     unlock_user_struct(target_tz, target_tz_addr, 0);
1404 
1405     return 0;
1406 }
1407 
1408 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1409 #include <mqueue.h>
1410 
1411 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1412                                               abi_ulong target_mq_attr_addr)
1413 {
1414     struct target_mq_attr *target_mq_attr;
1415 
1416     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1417                           target_mq_attr_addr, 1))
1418         return -TARGET_EFAULT;
1419 
1420     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1421     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1422     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1423     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1424 
1425     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1426 
1427     return 0;
1428 }
1429 
1430 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1431                                             const struct mq_attr *attr)
1432 {
1433     struct target_mq_attr *target_mq_attr;
1434 
1435     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1436                           target_mq_attr_addr, 0))
1437         return -TARGET_EFAULT;
1438 
1439     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1440     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1441     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1442     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1443 
1444     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1445 
1446     return 0;
1447 }
1448 #endif
1449 
1450 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1451 /* do_select() must return target values and target errnos. */
1452 static abi_long do_select(int n,
1453                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1454                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1455 {
1456     fd_set rfds, wfds, efds;
1457     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1458     struct timeval tv;
1459     struct timespec ts, *ts_ptr;
1460     abi_long ret;
1461 
1462     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1463     if (ret) {
1464         return ret;
1465     }
1466     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1467     if (ret) {
1468         return ret;
1469     }
1470     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1471     if (ret) {
1472         return ret;
1473     }
1474 
1475     if (target_tv_addr) {
1476         if (copy_from_user_timeval(&tv, target_tv_addr))
1477             return -TARGET_EFAULT;
1478         ts.tv_sec = tv.tv_sec;
1479         ts.tv_nsec = tv.tv_usec * 1000;
1480         ts_ptr = &ts;
1481     } else {
1482         ts_ptr = NULL;
1483     }
1484 
1485     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1486                                   ts_ptr, NULL));
1487 
1488     if (!is_error(ret)) {
1489         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1490             return -TARGET_EFAULT;
1491         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1492             return -TARGET_EFAULT;
1493         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1494             return -TARGET_EFAULT;
1495 
1496         if (target_tv_addr) {
1497             tv.tv_sec = ts.tv_sec;
1498             tv.tv_usec = ts.tv_nsec / 1000;
1499             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1500                 return -TARGET_EFAULT;
1501             }
1502         }
1503     }
1504 
1505     return ret;
1506 }
1507 
1508 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1509 static abi_long do_old_select(abi_ulong arg1)
1510 {
1511     struct target_sel_arg_struct *sel;
1512     abi_ulong inp, outp, exp, tvp;
1513     long nsel;
1514 
1515     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1516         return -TARGET_EFAULT;
1517     }
1518 
1519     nsel = tswapal(sel->n);
1520     inp = tswapal(sel->inp);
1521     outp = tswapal(sel->outp);
1522     exp = tswapal(sel->exp);
1523     tvp = tswapal(sel->tvp);
1524 
1525     unlock_user_struct(sel, arg1, 0);
1526 
1527     return do_select(nsel, inp, outp, exp, tvp);
1528 }
1529 #endif
1530 #endif
1531 
1532 static abi_long do_pipe2(int host_pipe[], int flags)
1533 {
1534 #ifdef CONFIG_PIPE2
1535     return pipe2(host_pipe, flags);
1536 #else
1537     return -ENOSYS;
1538 #endif
1539 }
1540 
1541 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1542                         int flags, int is_pipe2)
1543 {
1544     int host_pipe[2];
1545     abi_long ret;
1546     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1547 
1548     if (is_error(ret))
1549         return get_errno(ret);
1550 
1551     /* Several targets have special calling conventions for the original
1552        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1553     if (!is_pipe2) {
1554 #if defined(TARGET_ALPHA)
1555         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1556         return host_pipe[0];
1557 #elif defined(TARGET_MIPS)
1558         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1559         return host_pipe[0];
1560 #elif defined(TARGET_SH4)
1561         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1562         return host_pipe[0];
1563 #elif defined(TARGET_SPARC)
1564         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1565         return host_pipe[0];
1566 #endif
1567     }
1568 
1569     if (put_user_s32(host_pipe[0], pipedes)
1570         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1571         return -TARGET_EFAULT;
1572     return get_errno(ret);
1573 }
1574 
1575 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1576                                               abi_ulong target_addr,
1577                                               socklen_t len)
1578 {
1579     struct target_ip_mreqn *target_smreqn;
1580 
1581     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1582     if (!target_smreqn)
1583         return -TARGET_EFAULT;
1584     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1585     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1586     if (len == sizeof(struct target_ip_mreqn))
1587         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1588     unlock_user(target_smreqn, target_addr, 0);
1589 
1590     return 0;
1591 }
1592 
1593 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1594                                                abi_ulong target_addr,
1595                                                socklen_t len)
1596 {
1597     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1598     sa_family_t sa_family;
1599     struct target_sockaddr *target_saddr;
1600 
1601     if (fd_trans_target_to_host_addr(fd)) {
1602         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1603     }
1604 
1605     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1606     if (!target_saddr)
1607         return -TARGET_EFAULT;
1608 
1609     sa_family = tswap16(target_saddr->sa_family);
1610 
1611     /* Oops. The caller might send a incomplete sun_path; sun_path
1612      * must be terminated by \0 (see the manual page), but
1613      * unfortunately it is quite common to specify sockaddr_un
1614      * length as "strlen(x->sun_path)" while it should be
1615      * "strlen(...) + 1". We'll fix that here if needed.
1616      * Linux kernel has a similar feature.
1617      */
1618 
1619     if (sa_family == AF_UNIX) {
1620         if (len < unix_maxlen && len > 0) {
1621             char *cp = (char*)target_saddr;
1622 
1623             if ( cp[len-1] && !cp[len] )
1624                 len++;
1625         }
1626         if (len > unix_maxlen)
1627             len = unix_maxlen;
1628     }
1629 
1630     memcpy(addr, target_saddr, len);
1631     addr->sa_family = sa_family;
1632     if (sa_family == AF_NETLINK) {
1633         struct sockaddr_nl *nladdr;
1634 
1635         nladdr = (struct sockaddr_nl *)addr;
1636         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1637         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1638     } else if (sa_family == AF_PACKET) {
1639 	struct target_sockaddr_ll *lladdr;
1640 
1641 	lladdr = (struct target_sockaddr_ll *)addr;
1642 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1643 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1644     }
1645     unlock_user(target_saddr, target_addr, 0);
1646 
1647     return 0;
1648 }
1649 
1650 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1651                                                struct sockaddr *addr,
1652                                                socklen_t len)
1653 {
1654     struct target_sockaddr *target_saddr;
1655 
1656     if (len == 0) {
1657         return 0;
1658     }
1659     assert(addr);
1660 
1661     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1662     if (!target_saddr)
1663         return -TARGET_EFAULT;
1664     memcpy(target_saddr, addr, len);
1665     if (len >= offsetof(struct target_sockaddr, sa_family) +
1666         sizeof(target_saddr->sa_family)) {
1667         target_saddr->sa_family = tswap16(addr->sa_family);
1668     }
1669     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1670         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1671         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1672         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1673     } else if (addr->sa_family == AF_PACKET) {
1674         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1675         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1676         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1677     } else if (addr->sa_family == AF_INET6 &&
1678                len >= sizeof(struct target_sockaddr_in6)) {
1679         struct target_sockaddr_in6 *target_in6 =
1680                (struct target_sockaddr_in6 *)target_saddr;
1681         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1682     }
1683     unlock_user(target_saddr, target_addr, len);
1684 
1685     return 0;
1686 }
1687 
1688 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1689                                            struct target_msghdr *target_msgh)
1690 {
1691     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1692     abi_long msg_controllen;
1693     abi_ulong target_cmsg_addr;
1694     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1695     socklen_t space = 0;
1696 
1697     msg_controllen = tswapal(target_msgh->msg_controllen);
1698     if (msg_controllen < sizeof (struct target_cmsghdr))
1699         goto the_end;
1700     target_cmsg_addr = tswapal(target_msgh->msg_control);
1701     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1702     target_cmsg_start = target_cmsg;
1703     if (!target_cmsg)
1704         return -TARGET_EFAULT;
1705 
1706     while (cmsg && target_cmsg) {
1707         void *data = CMSG_DATA(cmsg);
1708         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1709 
1710         int len = tswapal(target_cmsg->cmsg_len)
1711             - sizeof(struct target_cmsghdr);
1712 
1713         space += CMSG_SPACE(len);
1714         if (space > msgh->msg_controllen) {
1715             space -= CMSG_SPACE(len);
1716             /* This is a QEMU bug, since we allocated the payload
1717              * area ourselves (unlike overflow in host-to-target
1718              * conversion, which is just the guest giving us a buffer
1719              * that's too small). It can't happen for the payload types
1720              * we currently support; if it becomes an issue in future
1721              * we would need to improve our allocation strategy to
1722              * something more intelligent than "twice the size of the
1723              * target buffer we're reading from".
1724              */
1725             gemu_log("Host cmsg overflow\n");
1726             break;
1727         }
1728 
1729         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1730             cmsg->cmsg_level = SOL_SOCKET;
1731         } else {
1732             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1733         }
1734         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1735         cmsg->cmsg_len = CMSG_LEN(len);
1736 
1737         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1738             int *fd = (int *)data;
1739             int *target_fd = (int *)target_data;
1740             int i, numfds = len / sizeof(int);
1741 
1742             for (i = 0; i < numfds; i++) {
1743                 __get_user(fd[i], target_fd + i);
1744             }
1745         } else if (cmsg->cmsg_level == SOL_SOCKET
1746                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1747             struct ucred *cred = (struct ucred *)data;
1748             struct target_ucred *target_cred =
1749                 (struct target_ucred *)target_data;
1750 
1751             __get_user(cred->pid, &target_cred->pid);
1752             __get_user(cred->uid, &target_cred->uid);
1753             __get_user(cred->gid, &target_cred->gid);
1754         } else {
1755             gemu_log("Unsupported ancillary data: %d/%d\n",
1756                                         cmsg->cmsg_level, cmsg->cmsg_type);
1757             memcpy(data, target_data, len);
1758         }
1759 
1760         cmsg = CMSG_NXTHDR(msgh, cmsg);
1761         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1762                                          target_cmsg_start);
1763     }
1764     unlock_user(target_cmsg, target_cmsg_addr, 0);
1765  the_end:
1766     msgh->msg_controllen = space;
1767     return 0;
1768 }
1769 
1770 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1771                                            struct msghdr *msgh)
1772 {
1773     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1774     abi_long msg_controllen;
1775     abi_ulong target_cmsg_addr;
1776     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1777     socklen_t space = 0;
1778 
1779     msg_controllen = tswapal(target_msgh->msg_controllen);
1780     if (msg_controllen < sizeof (struct target_cmsghdr))
1781         goto the_end;
1782     target_cmsg_addr = tswapal(target_msgh->msg_control);
1783     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1784     target_cmsg_start = target_cmsg;
1785     if (!target_cmsg)
1786         return -TARGET_EFAULT;
1787 
1788     while (cmsg && target_cmsg) {
1789         void *data = CMSG_DATA(cmsg);
1790         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1791 
1792         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1793         int tgt_len, tgt_space;
1794 
1795         /* We never copy a half-header but may copy half-data;
1796          * this is Linux's behaviour in put_cmsg(). Note that
1797          * truncation here is a guest problem (which we report
1798          * to the guest via the CTRUNC bit), unlike truncation
1799          * in target_to_host_cmsg, which is a QEMU bug.
1800          */
1801         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1802             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1803             break;
1804         }
1805 
1806         if (cmsg->cmsg_level == SOL_SOCKET) {
1807             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1808         } else {
1809             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1810         }
1811         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1812 
1813         /* Payload types which need a different size of payload on
1814          * the target must adjust tgt_len here.
1815          */
1816         switch (cmsg->cmsg_level) {
1817         case SOL_SOCKET:
1818             switch (cmsg->cmsg_type) {
1819             case SO_TIMESTAMP:
1820                 tgt_len = sizeof(struct target_timeval);
1821                 break;
1822             default:
1823                 break;
1824             }
1825         default:
1826             tgt_len = len;
1827             break;
1828         }
1829 
1830         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1831             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1832             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1833         }
1834 
1835         /* We must now copy-and-convert len bytes of payload
1836          * into tgt_len bytes of destination space. Bear in mind
1837          * that in both source and destination we may be dealing
1838          * with a truncated value!
1839          */
1840         switch (cmsg->cmsg_level) {
1841         case SOL_SOCKET:
1842             switch (cmsg->cmsg_type) {
1843             case SCM_RIGHTS:
1844             {
1845                 int *fd = (int *)data;
1846                 int *target_fd = (int *)target_data;
1847                 int i, numfds = tgt_len / sizeof(int);
1848 
1849                 for (i = 0; i < numfds; i++) {
1850                     __put_user(fd[i], target_fd + i);
1851                 }
1852                 break;
1853             }
1854             case SO_TIMESTAMP:
1855             {
1856                 struct timeval *tv = (struct timeval *)data;
1857                 struct target_timeval *target_tv =
1858                     (struct target_timeval *)target_data;
1859 
1860                 if (len != sizeof(struct timeval) ||
1861                     tgt_len != sizeof(struct target_timeval)) {
1862                     goto unimplemented;
1863                 }
1864 
1865                 /* copy struct timeval to target */
1866                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1867                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1868                 break;
1869             }
1870             case SCM_CREDENTIALS:
1871             {
1872                 struct ucred *cred = (struct ucred *)data;
1873                 struct target_ucred *target_cred =
1874                     (struct target_ucred *)target_data;
1875 
1876                 __put_user(cred->pid, &target_cred->pid);
1877                 __put_user(cred->uid, &target_cred->uid);
1878                 __put_user(cred->gid, &target_cred->gid);
1879                 break;
1880             }
1881             default:
1882                 goto unimplemented;
1883             }
1884             break;
1885 
1886         case SOL_IP:
1887             switch (cmsg->cmsg_type) {
1888             case IP_TTL:
1889             {
1890                 uint32_t *v = (uint32_t *)data;
1891                 uint32_t *t_int = (uint32_t *)target_data;
1892 
1893                 if (len != sizeof(uint32_t) ||
1894                     tgt_len != sizeof(uint32_t)) {
1895                     goto unimplemented;
1896                 }
1897                 __put_user(*v, t_int);
1898                 break;
1899             }
1900             case IP_RECVERR:
1901             {
1902                 struct errhdr_t {
1903                    struct sock_extended_err ee;
1904                    struct sockaddr_in offender;
1905                 };
1906                 struct errhdr_t *errh = (struct errhdr_t *)data;
1907                 struct errhdr_t *target_errh =
1908                     (struct errhdr_t *)target_data;
1909 
1910                 if (len != sizeof(struct errhdr_t) ||
1911                     tgt_len != sizeof(struct errhdr_t)) {
1912                     goto unimplemented;
1913                 }
1914                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1915                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1916                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1917                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1918                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1919                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1920                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1921                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1922                     (void *) &errh->offender, sizeof(errh->offender));
1923                 break;
1924             }
1925             default:
1926                 goto unimplemented;
1927             }
1928             break;
1929 
1930         case SOL_IPV6:
1931             switch (cmsg->cmsg_type) {
1932             case IPV6_HOPLIMIT:
1933             {
1934                 uint32_t *v = (uint32_t *)data;
1935                 uint32_t *t_int = (uint32_t *)target_data;
1936 
1937                 if (len != sizeof(uint32_t) ||
1938                     tgt_len != sizeof(uint32_t)) {
1939                     goto unimplemented;
1940                 }
1941                 __put_user(*v, t_int);
1942                 break;
1943             }
1944             case IPV6_RECVERR:
1945             {
1946                 struct errhdr6_t {
1947                    struct sock_extended_err ee;
1948                    struct sockaddr_in6 offender;
1949                 };
1950                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1951                 struct errhdr6_t *target_errh =
1952                     (struct errhdr6_t *)target_data;
1953 
1954                 if (len != sizeof(struct errhdr6_t) ||
1955                     tgt_len != sizeof(struct errhdr6_t)) {
1956                     goto unimplemented;
1957                 }
1958                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1959                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1960                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1961                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1962                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1963                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1964                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1965                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1966                     (void *) &errh->offender, sizeof(errh->offender));
1967                 break;
1968             }
1969             default:
1970                 goto unimplemented;
1971             }
1972             break;
1973 
1974         default:
1975         unimplemented:
1976             gemu_log("Unsupported ancillary data: %d/%d\n",
1977                                         cmsg->cmsg_level, cmsg->cmsg_type);
1978             memcpy(target_data, data, MIN(len, tgt_len));
1979             if (tgt_len > len) {
1980                 memset(target_data + len, 0, tgt_len - len);
1981             }
1982         }
1983 
1984         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1985         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1986         if (msg_controllen < tgt_space) {
1987             tgt_space = msg_controllen;
1988         }
1989         msg_controllen -= tgt_space;
1990         space += tgt_space;
1991         cmsg = CMSG_NXTHDR(msgh, cmsg);
1992         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1993                                          target_cmsg_start);
1994     }
1995     unlock_user(target_cmsg, target_cmsg_addr, space);
1996  the_end:
1997     target_msgh->msg_controllen = tswapal(space);
1998     return 0;
1999 }
2000 
2001 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2002 {
2003     nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2004     nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2005     nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2006     nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2007     nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2008 }
2009 
2010 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2011                                               size_t len,
2012                                               abi_long (*host_to_target_nlmsg)
2013                                                        (struct nlmsghdr *))
2014 {
2015     uint32_t nlmsg_len;
2016     abi_long ret;
2017 
2018     while (len > sizeof(struct nlmsghdr)) {
2019 
2020         nlmsg_len = nlh->nlmsg_len;
2021         if (nlmsg_len < sizeof(struct nlmsghdr) ||
2022             nlmsg_len > len) {
2023             break;
2024         }
2025 
2026         switch (nlh->nlmsg_type) {
2027         case NLMSG_DONE:
2028             tswap_nlmsghdr(nlh);
2029             return 0;
2030         case NLMSG_NOOP:
2031             break;
2032         case NLMSG_ERROR:
2033         {
2034             struct nlmsgerr *e = NLMSG_DATA(nlh);
2035             e->error = tswap32(e->error);
2036             tswap_nlmsghdr(&e->msg);
2037             tswap_nlmsghdr(nlh);
2038             return 0;
2039         }
2040         default:
2041             ret = host_to_target_nlmsg(nlh);
2042             if (ret < 0) {
2043                 tswap_nlmsghdr(nlh);
2044                 return ret;
2045             }
2046             break;
2047         }
2048         tswap_nlmsghdr(nlh);
2049         len -= NLMSG_ALIGN(nlmsg_len);
2050         nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2051     }
2052     return 0;
2053 }
2054 
2055 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2056                                               size_t len,
2057                                               abi_long (*target_to_host_nlmsg)
2058                                                        (struct nlmsghdr *))
2059 {
2060     int ret;
2061 
2062     while (len > sizeof(struct nlmsghdr)) {
2063         if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2064             tswap32(nlh->nlmsg_len) > len) {
2065             break;
2066         }
2067         tswap_nlmsghdr(nlh);
2068         switch (nlh->nlmsg_type) {
2069         case NLMSG_DONE:
2070             return 0;
2071         case NLMSG_NOOP:
2072             break;
2073         case NLMSG_ERROR:
2074         {
2075             struct nlmsgerr *e = NLMSG_DATA(nlh);
2076             e->error = tswap32(e->error);
2077             tswap_nlmsghdr(&e->msg);
2078             return 0;
2079         }
2080         default:
2081             ret = target_to_host_nlmsg(nlh);
2082             if (ret < 0) {
2083                 return ret;
2084             }
2085         }
2086         len -= NLMSG_ALIGN(nlh->nlmsg_len);
2087         nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2088     }
2089     return 0;
2090 }
2091 
2092 #ifdef CONFIG_RTNETLINK
2093 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2094                                                size_t len, void *context,
2095                                                abi_long (*host_to_target_nlattr)
2096                                                         (struct nlattr *,
2097                                                          void *context))
2098 {
2099     unsigned short nla_len;
2100     abi_long ret;
2101 
2102     while (len > sizeof(struct nlattr)) {
2103         nla_len = nlattr->nla_len;
2104         if (nla_len < sizeof(struct nlattr) ||
2105             nla_len > len) {
2106             break;
2107         }
2108         ret = host_to_target_nlattr(nlattr, context);
2109         nlattr->nla_len = tswap16(nlattr->nla_len);
2110         nlattr->nla_type = tswap16(nlattr->nla_type);
2111         if (ret < 0) {
2112             return ret;
2113         }
2114         len -= NLA_ALIGN(nla_len);
2115         nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2116     }
2117     return 0;
2118 }
2119 
2120 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2121                                                size_t len,
2122                                                abi_long (*host_to_target_rtattr)
2123                                                         (struct rtattr *))
2124 {
2125     unsigned short rta_len;
2126     abi_long ret;
2127 
2128     while (len > sizeof(struct rtattr)) {
2129         rta_len = rtattr->rta_len;
2130         if (rta_len < sizeof(struct rtattr) ||
2131             rta_len > len) {
2132             break;
2133         }
2134         ret = host_to_target_rtattr(rtattr);
2135         rtattr->rta_len = tswap16(rtattr->rta_len);
2136         rtattr->rta_type = tswap16(rtattr->rta_type);
2137         if (ret < 0) {
2138             return ret;
2139         }
2140         len -= RTA_ALIGN(rta_len);
2141         rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2142     }
2143     return 0;
2144 }
2145 
2146 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2147 
2148 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2149                                                   void *context)
2150 {
2151     uint16_t *u16;
2152     uint32_t *u32;
2153     uint64_t *u64;
2154 
2155     switch (nlattr->nla_type) {
2156     /* no data */
2157     case QEMU_IFLA_BR_FDB_FLUSH:
2158         break;
2159     /* binary */
2160     case QEMU_IFLA_BR_GROUP_ADDR:
2161         break;
2162     /* uint8_t */
2163     case QEMU_IFLA_BR_VLAN_FILTERING:
2164     case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2165     case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2166     case QEMU_IFLA_BR_MCAST_ROUTER:
2167     case QEMU_IFLA_BR_MCAST_SNOOPING:
2168     case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2169     case QEMU_IFLA_BR_MCAST_QUERIER:
2170     case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2171     case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2172     case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2173         break;
2174     /* uint16_t */
2175     case QEMU_IFLA_BR_PRIORITY:
2176     case QEMU_IFLA_BR_VLAN_PROTOCOL:
2177     case QEMU_IFLA_BR_GROUP_FWD_MASK:
2178     case QEMU_IFLA_BR_ROOT_PORT:
2179     case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2180         u16 = NLA_DATA(nlattr);
2181         *u16 = tswap16(*u16);
2182         break;
2183     /* uint32_t */
2184     case QEMU_IFLA_BR_FORWARD_DELAY:
2185     case QEMU_IFLA_BR_HELLO_TIME:
2186     case QEMU_IFLA_BR_MAX_AGE:
2187     case QEMU_IFLA_BR_AGEING_TIME:
2188     case QEMU_IFLA_BR_STP_STATE:
2189     case QEMU_IFLA_BR_ROOT_PATH_COST:
2190     case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2191     case QEMU_IFLA_BR_MCAST_HASH_MAX:
2192     case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2193     case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2194         u32 = NLA_DATA(nlattr);
2195         *u32 = tswap32(*u32);
2196         break;
2197     /* uint64_t */
2198     case QEMU_IFLA_BR_HELLO_TIMER:
2199     case QEMU_IFLA_BR_TCN_TIMER:
2200     case QEMU_IFLA_BR_GC_TIMER:
2201     case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2202     case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2203     case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2204     case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2205     case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2206     case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2207     case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2208         u64 = NLA_DATA(nlattr);
2209         *u64 = tswap64(*u64);
2210         break;
2211     /* ifla_bridge_id: uin8_t[] */
2212     case QEMU_IFLA_BR_ROOT_ID:
2213     case QEMU_IFLA_BR_BRIDGE_ID:
2214         break;
2215     default:
2216         gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2217         break;
2218     }
2219     return 0;
2220 }
2221 
2222 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2223                                                         void *context)
2224 {
2225     uint16_t *u16;
2226     uint32_t *u32;
2227     uint64_t *u64;
2228 
2229     switch (nlattr->nla_type) {
2230     /* uint8_t */
2231     case QEMU_IFLA_BRPORT_STATE:
2232     case QEMU_IFLA_BRPORT_MODE:
2233     case QEMU_IFLA_BRPORT_GUARD:
2234     case QEMU_IFLA_BRPORT_PROTECT:
2235     case QEMU_IFLA_BRPORT_FAST_LEAVE:
2236     case QEMU_IFLA_BRPORT_LEARNING:
2237     case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2238     case QEMU_IFLA_BRPORT_PROXYARP:
2239     case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2240     case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2241     case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2242     case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2243     case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2244         break;
2245     /* uint16_t */
2246     case QEMU_IFLA_BRPORT_PRIORITY:
2247     case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2248     case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2249     case QEMU_IFLA_BRPORT_ID:
2250     case QEMU_IFLA_BRPORT_NO:
2251         u16 = NLA_DATA(nlattr);
2252         *u16 = tswap16(*u16);
2253         break;
2254     /* uin32_t */
2255     case QEMU_IFLA_BRPORT_COST:
2256         u32 = NLA_DATA(nlattr);
2257         *u32 = tswap32(*u32);
2258         break;
2259     /* uint64_t */
2260     case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2261     case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2262     case QEMU_IFLA_BRPORT_HOLD_TIMER:
2263         u64 = NLA_DATA(nlattr);
2264         *u64 = tswap64(*u64);
2265         break;
2266     /* ifla_bridge_id: uint8_t[] */
2267     case QEMU_IFLA_BRPORT_ROOT_ID:
2268     case QEMU_IFLA_BRPORT_BRIDGE_ID:
2269         break;
2270     default:
2271         gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2272         break;
2273     }
2274     return 0;
2275 }
2276 
2277 struct linkinfo_context {
2278     int len;
2279     char *name;
2280     int slave_len;
2281     char *slave_name;
2282 };
2283 
2284 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2285                                                     void *context)
2286 {
2287     struct linkinfo_context *li_context = context;
2288 
2289     switch (nlattr->nla_type) {
2290     /* string */
2291     case QEMU_IFLA_INFO_KIND:
2292         li_context->name = NLA_DATA(nlattr);
2293         li_context->len = nlattr->nla_len - NLA_HDRLEN;
2294         break;
2295     case QEMU_IFLA_INFO_SLAVE_KIND:
2296         li_context->slave_name = NLA_DATA(nlattr);
2297         li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2298         break;
2299     /* stats */
2300     case QEMU_IFLA_INFO_XSTATS:
2301         /* FIXME: only used by CAN */
2302         break;
2303     /* nested */
2304     case QEMU_IFLA_INFO_DATA:
2305         if (strncmp(li_context->name, "bridge",
2306                     li_context->len) == 0) {
2307             return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2308                                                   nlattr->nla_len,
2309                                                   NULL,
2310                                              host_to_target_data_bridge_nlattr);
2311         } else {
2312             gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2313         }
2314         break;
2315     case QEMU_IFLA_INFO_SLAVE_DATA:
2316         if (strncmp(li_context->slave_name, "bridge",
2317                     li_context->slave_len) == 0) {
2318             return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2319                                                   nlattr->nla_len,
2320                                                   NULL,
2321                                        host_to_target_slave_data_bridge_nlattr);
2322         } else {
2323             gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2324                      li_context->slave_name);
2325         }
2326         break;
2327     default:
2328         gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2329         break;
2330     }
2331 
2332     return 0;
2333 }
2334 
2335 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2336                                                 void *context)
2337 {
2338     uint32_t *u32;
2339     int i;
2340 
2341     switch (nlattr->nla_type) {
2342     case QEMU_IFLA_INET_CONF:
2343         u32 = NLA_DATA(nlattr);
2344         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2345              i++) {
2346             u32[i] = tswap32(u32[i]);
2347         }
2348         break;
2349     default:
2350         gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2351     }
2352     return 0;
2353 }
2354 
2355 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2356                                                 void *context)
2357 {
2358     uint32_t *u32;
2359     uint64_t *u64;
2360     struct ifla_cacheinfo *ci;
2361     int i;
2362 
2363     switch (nlattr->nla_type) {
2364     /* binaries */
2365     case QEMU_IFLA_INET6_TOKEN:
2366         break;
2367     /* uint8_t */
2368     case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2369         break;
2370     /* uint32_t */
2371     case QEMU_IFLA_INET6_FLAGS:
2372         u32 = NLA_DATA(nlattr);
2373         *u32 = tswap32(*u32);
2374         break;
2375     /* uint32_t[] */
2376     case QEMU_IFLA_INET6_CONF:
2377         u32 = NLA_DATA(nlattr);
2378         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2379              i++) {
2380             u32[i] = tswap32(u32[i]);
2381         }
2382         break;
2383     /* ifla_cacheinfo */
2384     case QEMU_IFLA_INET6_CACHEINFO:
2385         ci = NLA_DATA(nlattr);
2386         ci->max_reasm_len = tswap32(ci->max_reasm_len);
2387         ci->tstamp = tswap32(ci->tstamp);
2388         ci->reachable_time = tswap32(ci->reachable_time);
2389         ci->retrans_time = tswap32(ci->retrans_time);
2390         break;
2391     /* uint64_t[] */
2392     case QEMU_IFLA_INET6_STATS:
2393     case QEMU_IFLA_INET6_ICMP6STATS:
2394         u64 = NLA_DATA(nlattr);
2395         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2396              i++) {
2397             u64[i] = tswap64(u64[i]);
2398         }
2399         break;
2400     default:
2401         gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2402     }
2403     return 0;
2404 }
2405 
2406 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2407                                                     void *context)
2408 {
2409     switch (nlattr->nla_type) {
2410     case AF_INET:
2411         return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2412                                               NULL,
2413                                              host_to_target_data_inet_nlattr);
2414     case AF_INET6:
2415         return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2416                                               NULL,
2417                                              host_to_target_data_inet6_nlattr);
2418     default:
2419         gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2420         break;
2421     }
2422     return 0;
2423 }
2424 
2425 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2426 {
2427     uint32_t *u32;
2428     struct rtnl_link_stats *st;
2429     struct rtnl_link_stats64 *st64;
2430     struct rtnl_link_ifmap *map;
2431     struct linkinfo_context li_context;
2432 
2433     switch (rtattr->rta_type) {
2434     /* binary stream */
2435     case QEMU_IFLA_ADDRESS:
2436     case QEMU_IFLA_BROADCAST:
2437     /* string */
2438     case QEMU_IFLA_IFNAME:
2439     case QEMU_IFLA_QDISC:
2440         break;
2441     /* uin8_t */
2442     case QEMU_IFLA_OPERSTATE:
2443     case QEMU_IFLA_LINKMODE:
2444     case QEMU_IFLA_CARRIER:
2445     case QEMU_IFLA_PROTO_DOWN:
2446         break;
2447     /* uint32_t */
2448     case QEMU_IFLA_MTU:
2449     case QEMU_IFLA_LINK:
2450     case QEMU_IFLA_WEIGHT:
2451     case QEMU_IFLA_TXQLEN:
2452     case QEMU_IFLA_CARRIER_CHANGES:
2453     case QEMU_IFLA_NUM_RX_QUEUES:
2454     case QEMU_IFLA_NUM_TX_QUEUES:
2455     case QEMU_IFLA_PROMISCUITY:
2456     case QEMU_IFLA_EXT_MASK:
2457     case QEMU_IFLA_LINK_NETNSID:
2458     case QEMU_IFLA_GROUP:
2459     case QEMU_IFLA_MASTER:
2460     case QEMU_IFLA_NUM_VF:
2461     case QEMU_IFLA_GSO_MAX_SEGS:
2462     case QEMU_IFLA_GSO_MAX_SIZE:
2463         u32 = RTA_DATA(rtattr);
2464         *u32 = tswap32(*u32);
2465         break;
2466     /* struct rtnl_link_stats */
2467     case QEMU_IFLA_STATS:
2468         st = RTA_DATA(rtattr);
2469         st->rx_packets = tswap32(st->rx_packets);
2470         st->tx_packets = tswap32(st->tx_packets);
2471         st->rx_bytes = tswap32(st->rx_bytes);
2472         st->tx_bytes = tswap32(st->tx_bytes);
2473         st->rx_errors = tswap32(st->rx_errors);
2474         st->tx_errors = tswap32(st->tx_errors);
2475         st->rx_dropped = tswap32(st->rx_dropped);
2476         st->tx_dropped = tswap32(st->tx_dropped);
2477         st->multicast = tswap32(st->multicast);
2478         st->collisions = tswap32(st->collisions);
2479 
2480         /* detailed rx_errors: */
2481         st->rx_length_errors = tswap32(st->rx_length_errors);
2482         st->rx_over_errors = tswap32(st->rx_over_errors);
2483         st->rx_crc_errors = tswap32(st->rx_crc_errors);
2484         st->rx_frame_errors = tswap32(st->rx_frame_errors);
2485         st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2486         st->rx_missed_errors = tswap32(st->rx_missed_errors);
2487 
2488         /* detailed tx_errors */
2489         st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2490         st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2491         st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2492         st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2493         st->tx_window_errors = tswap32(st->tx_window_errors);
2494 
2495         /* for cslip etc */
2496         st->rx_compressed = tswap32(st->rx_compressed);
2497         st->tx_compressed = tswap32(st->tx_compressed);
2498         break;
2499     /* struct rtnl_link_stats64 */
2500     case QEMU_IFLA_STATS64:
2501         st64 = RTA_DATA(rtattr);
2502         st64->rx_packets = tswap64(st64->rx_packets);
2503         st64->tx_packets = tswap64(st64->tx_packets);
2504         st64->rx_bytes = tswap64(st64->rx_bytes);
2505         st64->tx_bytes = tswap64(st64->tx_bytes);
2506         st64->rx_errors = tswap64(st64->rx_errors);
2507         st64->tx_errors = tswap64(st64->tx_errors);
2508         st64->rx_dropped = tswap64(st64->rx_dropped);
2509         st64->tx_dropped = tswap64(st64->tx_dropped);
2510         st64->multicast = tswap64(st64->multicast);
2511         st64->collisions = tswap64(st64->collisions);
2512 
2513         /* detailed rx_errors: */
2514         st64->rx_length_errors = tswap64(st64->rx_length_errors);
2515         st64->rx_over_errors = tswap64(st64->rx_over_errors);
2516         st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2517         st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2518         st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2519         st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2520 
2521         /* detailed tx_errors */
2522         st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2523         st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2524         st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2525         st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2526         st64->tx_window_errors = tswap64(st64->tx_window_errors);
2527 
2528         /* for cslip etc */
2529         st64->rx_compressed = tswap64(st64->rx_compressed);
2530         st64->tx_compressed = tswap64(st64->tx_compressed);
2531         break;
2532     /* struct rtnl_link_ifmap */
2533     case QEMU_IFLA_MAP:
2534         map = RTA_DATA(rtattr);
2535         map->mem_start = tswap64(map->mem_start);
2536         map->mem_end = tswap64(map->mem_end);
2537         map->base_addr = tswap64(map->base_addr);
2538         map->irq = tswap16(map->irq);
2539         break;
2540     /* nested */
2541     case QEMU_IFLA_LINKINFO:
2542         memset(&li_context, 0, sizeof(li_context));
2543         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2544                                               &li_context,
2545                                            host_to_target_data_linkinfo_nlattr);
2546     case QEMU_IFLA_AF_SPEC:
2547         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2548                                               NULL,
2549                                              host_to_target_data_spec_nlattr);
2550     default:
2551         gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2552         break;
2553     }
2554     return 0;
2555 }
2556 
2557 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2558 {
2559     uint32_t *u32;
2560     struct ifa_cacheinfo *ci;
2561 
2562     switch (rtattr->rta_type) {
2563     /* binary: depends on family type */
2564     case IFA_ADDRESS:
2565     case IFA_LOCAL:
2566         break;
2567     /* string */
2568     case IFA_LABEL:
2569         break;
2570     /* u32 */
2571     case IFA_FLAGS:
2572     case IFA_BROADCAST:
2573         u32 = RTA_DATA(rtattr);
2574         *u32 = tswap32(*u32);
2575         break;
2576     /* struct ifa_cacheinfo */
2577     case IFA_CACHEINFO:
2578         ci = RTA_DATA(rtattr);
2579         ci->ifa_prefered = tswap32(ci->ifa_prefered);
2580         ci->ifa_valid = tswap32(ci->ifa_valid);
2581         ci->cstamp = tswap32(ci->cstamp);
2582         ci->tstamp = tswap32(ci->tstamp);
2583         break;
2584     default:
2585         gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2586         break;
2587     }
2588     return 0;
2589 }
2590 
2591 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2592 {
2593     uint32_t *u32;
2594     switch (rtattr->rta_type) {
2595     /* binary: depends on family type */
2596     case RTA_GATEWAY:
2597     case RTA_DST:
2598     case RTA_PREFSRC:
2599         break;
2600     /* u32 */
2601     case RTA_PRIORITY:
2602     case RTA_TABLE:
2603     case RTA_OIF:
2604         u32 = RTA_DATA(rtattr);
2605         *u32 = tswap32(*u32);
2606         break;
2607     default:
2608         gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2609         break;
2610     }
2611     return 0;
2612 }
2613 
2614 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2615                                          uint32_t rtattr_len)
2616 {
2617     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2618                                           host_to_target_data_link_rtattr);
2619 }
2620 
2621 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2622                                          uint32_t rtattr_len)
2623 {
2624     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2625                                           host_to_target_data_addr_rtattr);
2626 }
2627 
2628 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2629                                          uint32_t rtattr_len)
2630 {
2631     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2632                                           host_to_target_data_route_rtattr);
2633 }
2634 
2635 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2636 {
2637     uint32_t nlmsg_len;
2638     struct ifinfomsg *ifi;
2639     struct ifaddrmsg *ifa;
2640     struct rtmsg *rtm;
2641 
2642     nlmsg_len = nlh->nlmsg_len;
2643     switch (nlh->nlmsg_type) {
2644     case RTM_NEWLINK:
2645     case RTM_DELLINK:
2646     case RTM_GETLINK:
2647         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2648             ifi = NLMSG_DATA(nlh);
2649             ifi->ifi_type = tswap16(ifi->ifi_type);
2650             ifi->ifi_index = tswap32(ifi->ifi_index);
2651             ifi->ifi_flags = tswap32(ifi->ifi_flags);
2652             ifi->ifi_change = tswap32(ifi->ifi_change);
2653             host_to_target_link_rtattr(IFLA_RTA(ifi),
2654                                        nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2655         }
2656         break;
2657     case RTM_NEWADDR:
2658     case RTM_DELADDR:
2659     case RTM_GETADDR:
2660         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2661             ifa = NLMSG_DATA(nlh);
2662             ifa->ifa_index = tswap32(ifa->ifa_index);
2663             host_to_target_addr_rtattr(IFA_RTA(ifa),
2664                                        nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2665         }
2666         break;
2667     case RTM_NEWROUTE:
2668     case RTM_DELROUTE:
2669     case RTM_GETROUTE:
2670         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2671             rtm = NLMSG_DATA(nlh);
2672             rtm->rtm_flags = tswap32(rtm->rtm_flags);
2673             host_to_target_route_rtattr(RTM_RTA(rtm),
2674                                         nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2675         }
2676         break;
2677     default:
2678         return -TARGET_EINVAL;
2679     }
2680     return 0;
2681 }
2682 
2683 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2684                                                   size_t len)
2685 {
2686     return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2687 }
2688 
2689 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2690                                                size_t len,
2691                                                abi_long (*target_to_host_rtattr)
2692                                                         (struct rtattr *))
2693 {
2694     abi_long ret;
2695 
2696     while (len >= sizeof(struct rtattr)) {
2697         if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2698             tswap16(rtattr->rta_len) > len) {
2699             break;
2700         }
2701         rtattr->rta_len = tswap16(rtattr->rta_len);
2702         rtattr->rta_type = tswap16(rtattr->rta_type);
2703         ret = target_to_host_rtattr(rtattr);
2704         if (ret < 0) {
2705             return ret;
2706         }
2707         len -= RTA_ALIGN(rtattr->rta_len);
2708         rtattr = (struct rtattr *)(((char *)rtattr) +
2709                  RTA_ALIGN(rtattr->rta_len));
2710     }
2711     return 0;
2712 }
2713 
2714 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2715 {
2716     switch (rtattr->rta_type) {
2717     default:
2718         gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2719         break;
2720     }
2721     return 0;
2722 }
2723 
2724 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2725 {
2726     switch (rtattr->rta_type) {
2727     /* binary: depends on family type */
2728     case IFA_LOCAL:
2729     case IFA_ADDRESS:
2730         break;
2731     default:
2732         gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2733         break;
2734     }
2735     return 0;
2736 }
2737 
2738 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2739 {
2740     uint32_t *u32;
2741     switch (rtattr->rta_type) {
2742     /* binary: depends on family type */
2743     case RTA_DST:
2744     case RTA_SRC:
2745     case RTA_GATEWAY:
2746         break;
2747     /* u32 */
2748     case RTA_PRIORITY:
2749     case RTA_OIF:
2750         u32 = RTA_DATA(rtattr);
2751         *u32 = tswap32(*u32);
2752         break;
2753     default:
2754         gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2755         break;
2756     }
2757     return 0;
2758 }
2759 
2760 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2761                                        uint32_t rtattr_len)
2762 {
2763     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2764                                    target_to_host_data_link_rtattr);
2765 }
2766 
2767 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2768                                      uint32_t rtattr_len)
2769 {
2770     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2771                                    target_to_host_data_addr_rtattr);
2772 }
2773 
2774 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2775                                      uint32_t rtattr_len)
2776 {
2777     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2778                                    target_to_host_data_route_rtattr);
2779 }
2780 
2781 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2782 {
2783     struct ifinfomsg *ifi;
2784     struct ifaddrmsg *ifa;
2785     struct rtmsg *rtm;
2786 
2787     switch (nlh->nlmsg_type) {
2788     case RTM_GETLINK:
2789         break;
2790     case RTM_NEWLINK:
2791     case RTM_DELLINK:
2792         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2793             ifi = NLMSG_DATA(nlh);
2794             ifi->ifi_type = tswap16(ifi->ifi_type);
2795             ifi->ifi_index = tswap32(ifi->ifi_index);
2796             ifi->ifi_flags = tswap32(ifi->ifi_flags);
2797             ifi->ifi_change = tswap32(ifi->ifi_change);
2798             target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2799                                        NLMSG_LENGTH(sizeof(*ifi)));
2800         }
2801         break;
2802     case RTM_GETADDR:
2803     case RTM_NEWADDR:
2804     case RTM_DELADDR:
2805         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2806             ifa = NLMSG_DATA(nlh);
2807             ifa->ifa_index = tswap32(ifa->ifa_index);
2808             target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2809                                        NLMSG_LENGTH(sizeof(*ifa)));
2810         }
2811         break;
2812     case RTM_GETROUTE:
2813         break;
2814     case RTM_NEWROUTE:
2815     case RTM_DELROUTE:
2816         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2817             rtm = NLMSG_DATA(nlh);
2818             rtm->rtm_flags = tswap32(rtm->rtm_flags);
2819             target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2820                                         NLMSG_LENGTH(sizeof(*rtm)));
2821         }
2822         break;
2823     default:
2824         return -TARGET_EOPNOTSUPP;
2825     }
2826     return 0;
2827 }
2828 
2829 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2830 {
2831     return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2832 }
2833 #endif /* CONFIG_RTNETLINK */
2834 
2835 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2836 {
2837     switch (nlh->nlmsg_type) {
2838     default:
2839         gemu_log("Unknown host audit message type %d\n",
2840                  nlh->nlmsg_type);
2841         return -TARGET_EINVAL;
2842     }
2843     return 0;
2844 }
2845 
2846 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2847                                                   size_t len)
2848 {
2849     return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2850 }
2851 
2852 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2853 {
2854     switch (nlh->nlmsg_type) {
2855     case AUDIT_USER:
2856     case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2857     case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2858         break;
2859     default:
2860         gemu_log("Unknown target audit message type %d\n",
2861                  nlh->nlmsg_type);
2862         return -TARGET_EINVAL;
2863     }
2864 
2865     return 0;
2866 }
2867 
2868 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2869 {
2870     return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2871 }
2872 
2873 /* do_setsockopt() Must return target values and target errnos. */
2874 static abi_long do_setsockopt(int sockfd, int level, int optname,
2875                               abi_ulong optval_addr, socklen_t optlen)
2876 {
2877     abi_long ret;
2878     int val;
2879     struct ip_mreqn *ip_mreq;
2880     struct ip_mreq_source *ip_mreq_source;
2881 
2882     switch(level) {
2883     case SOL_TCP:
2884         /* TCP options all take an 'int' value.  */
2885         if (optlen < sizeof(uint32_t))
2886             return -TARGET_EINVAL;
2887 
2888         if (get_user_u32(val, optval_addr))
2889             return -TARGET_EFAULT;
2890         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2891         break;
2892     case SOL_IP:
2893         switch(optname) {
2894         case IP_TOS:
2895         case IP_TTL:
2896         case IP_HDRINCL:
2897         case IP_ROUTER_ALERT:
2898         case IP_RECVOPTS:
2899         case IP_RETOPTS:
2900         case IP_PKTINFO:
2901         case IP_MTU_DISCOVER:
2902         case IP_RECVERR:
2903         case IP_RECVTTL:
2904         case IP_RECVTOS:
2905 #ifdef IP_FREEBIND
2906         case IP_FREEBIND:
2907 #endif
2908         case IP_MULTICAST_TTL:
2909         case IP_MULTICAST_LOOP:
2910             val = 0;
2911             if (optlen >= sizeof(uint32_t)) {
2912                 if (get_user_u32(val, optval_addr))
2913                     return -TARGET_EFAULT;
2914             } else if (optlen >= 1) {
2915                 if (get_user_u8(val, optval_addr))
2916                     return -TARGET_EFAULT;
2917             }
2918             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2919             break;
2920         case IP_ADD_MEMBERSHIP:
2921         case IP_DROP_MEMBERSHIP:
2922             if (optlen < sizeof (struct target_ip_mreq) ||
2923                 optlen > sizeof (struct target_ip_mreqn))
2924                 return -TARGET_EINVAL;
2925 
2926             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2927             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2928             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2929             break;
2930 
2931         case IP_BLOCK_SOURCE:
2932         case IP_UNBLOCK_SOURCE:
2933         case IP_ADD_SOURCE_MEMBERSHIP:
2934         case IP_DROP_SOURCE_MEMBERSHIP:
2935             if (optlen != sizeof (struct target_ip_mreq_source))
2936                 return -TARGET_EINVAL;
2937 
2938             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2939             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2940             unlock_user (ip_mreq_source, optval_addr, 0);
2941             break;
2942 
2943         default:
2944             goto unimplemented;
2945         }
2946         break;
2947     case SOL_IPV6:
2948         switch (optname) {
2949         case IPV6_MTU_DISCOVER:
2950         case IPV6_MTU:
2951         case IPV6_V6ONLY:
2952         case IPV6_RECVPKTINFO:
2953         case IPV6_UNICAST_HOPS:
2954         case IPV6_RECVERR:
2955         case IPV6_RECVHOPLIMIT:
2956         case IPV6_2292HOPLIMIT:
2957         case IPV6_CHECKSUM:
2958             val = 0;
2959             if (optlen < sizeof(uint32_t)) {
2960                 return -TARGET_EINVAL;
2961             }
2962             if (get_user_u32(val, optval_addr)) {
2963                 return -TARGET_EFAULT;
2964             }
2965             ret = get_errno(setsockopt(sockfd, level, optname,
2966                                        &val, sizeof(val)));
2967             break;
2968         case IPV6_PKTINFO:
2969         {
2970             struct in6_pktinfo pki;
2971 
2972             if (optlen < sizeof(pki)) {
2973                 return -TARGET_EINVAL;
2974             }
2975 
2976             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2977                 return -TARGET_EFAULT;
2978             }
2979 
2980             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2981 
2982             ret = get_errno(setsockopt(sockfd, level, optname,
2983                                        &pki, sizeof(pki)));
2984             break;
2985         }
2986         default:
2987             goto unimplemented;
2988         }
2989         break;
2990     case SOL_ICMPV6:
2991         switch (optname) {
2992         case ICMPV6_FILTER:
2993         {
2994             struct icmp6_filter icmp6f;
2995 
2996             if (optlen > sizeof(icmp6f)) {
2997                 optlen = sizeof(icmp6f);
2998             }
2999 
3000             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
3001                 return -TARGET_EFAULT;
3002             }
3003 
3004             for (val = 0; val < 8; val++) {
3005                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3006             }
3007 
3008             ret = get_errno(setsockopt(sockfd, level, optname,
3009                                        &icmp6f, optlen));
3010             break;
3011         }
3012         default:
3013             goto unimplemented;
3014         }
3015         break;
3016     case SOL_RAW:
3017         switch (optname) {
3018         case ICMP_FILTER:
3019         case IPV6_CHECKSUM:
3020             /* those take an u32 value */
3021             if (optlen < sizeof(uint32_t)) {
3022                 return -TARGET_EINVAL;
3023             }
3024 
3025             if (get_user_u32(val, optval_addr)) {
3026                 return -TARGET_EFAULT;
3027             }
3028             ret = get_errno(setsockopt(sockfd, level, optname,
3029                                        &val, sizeof(val)));
3030             break;
3031 
3032         default:
3033             goto unimplemented;
3034         }
3035         break;
3036     case TARGET_SOL_SOCKET:
3037         switch (optname) {
3038         case TARGET_SO_RCVTIMEO:
3039         {
3040                 struct timeval tv;
3041 
3042                 optname = SO_RCVTIMEO;
3043 
3044 set_timeout:
3045                 if (optlen != sizeof(struct target_timeval)) {
3046                     return -TARGET_EINVAL;
3047                 }
3048 
3049                 if (copy_from_user_timeval(&tv, optval_addr)) {
3050                     return -TARGET_EFAULT;
3051                 }
3052 
3053                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3054                                 &tv, sizeof(tv)));
3055                 return ret;
3056         }
3057         case TARGET_SO_SNDTIMEO:
3058                 optname = SO_SNDTIMEO;
3059                 goto set_timeout;
3060         case TARGET_SO_ATTACH_FILTER:
3061         {
3062                 struct target_sock_fprog *tfprog;
3063                 struct target_sock_filter *tfilter;
3064                 struct sock_fprog fprog;
3065                 struct sock_filter *filter;
3066                 int i;
3067 
3068                 if (optlen != sizeof(*tfprog)) {
3069                     return -TARGET_EINVAL;
3070                 }
3071                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3072                     return -TARGET_EFAULT;
3073                 }
3074                 if (!lock_user_struct(VERIFY_READ, tfilter,
3075                                       tswapal(tfprog->filter), 0)) {
3076                     unlock_user_struct(tfprog, optval_addr, 1);
3077                     return -TARGET_EFAULT;
3078                 }
3079 
3080                 fprog.len = tswap16(tfprog->len);
3081                 filter = g_try_new(struct sock_filter, fprog.len);
3082                 if (filter == NULL) {
3083                     unlock_user_struct(tfilter, tfprog->filter, 1);
3084                     unlock_user_struct(tfprog, optval_addr, 1);
3085                     return -TARGET_ENOMEM;
3086                 }
3087                 for (i = 0; i < fprog.len; i++) {
3088                     filter[i].code = tswap16(tfilter[i].code);
3089                     filter[i].jt = tfilter[i].jt;
3090                     filter[i].jf = tfilter[i].jf;
3091                     filter[i].k = tswap32(tfilter[i].k);
3092                 }
3093                 fprog.filter = filter;
3094 
3095                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3096                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3097                 g_free(filter);
3098 
3099                 unlock_user_struct(tfilter, tfprog->filter, 1);
3100                 unlock_user_struct(tfprog, optval_addr, 1);
3101                 return ret;
3102         }
3103 	case TARGET_SO_BINDTODEVICE:
3104 	{
3105 		char *dev_ifname, *addr_ifname;
3106 
3107 		if (optlen > IFNAMSIZ - 1) {
3108 		    optlen = IFNAMSIZ - 1;
3109 		}
3110 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3111 		if (!dev_ifname) {
3112 		    return -TARGET_EFAULT;
3113 		}
3114 		optname = SO_BINDTODEVICE;
3115 		addr_ifname = alloca(IFNAMSIZ);
3116 		memcpy(addr_ifname, dev_ifname, optlen);
3117 		addr_ifname[optlen] = 0;
3118 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3119                                            addr_ifname, optlen));
3120 		unlock_user (dev_ifname, optval_addr, 0);
3121 		return ret;
3122 	}
3123             /* Options with 'int' argument.  */
3124         case TARGET_SO_DEBUG:
3125 		optname = SO_DEBUG;
3126 		break;
3127         case TARGET_SO_REUSEADDR:
3128 		optname = SO_REUSEADDR;
3129 		break;
3130         case TARGET_SO_TYPE:
3131 		optname = SO_TYPE;
3132 		break;
3133         case TARGET_SO_ERROR:
3134 		optname = SO_ERROR;
3135 		break;
3136         case TARGET_SO_DONTROUTE:
3137 		optname = SO_DONTROUTE;
3138 		break;
3139         case TARGET_SO_BROADCAST:
3140 		optname = SO_BROADCAST;
3141 		break;
3142         case TARGET_SO_SNDBUF:
3143 		optname = SO_SNDBUF;
3144 		break;
3145         case TARGET_SO_SNDBUFFORCE:
3146                 optname = SO_SNDBUFFORCE;
3147                 break;
3148         case TARGET_SO_RCVBUF:
3149 		optname = SO_RCVBUF;
3150 		break;
3151         case TARGET_SO_RCVBUFFORCE:
3152                 optname = SO_RCVBUFFORCE;
3153                 break;
3154         case TARGET_SO_KEEPALIVE:
3155 		optname = SO_KEEPALIVE;
3156 		break;
3157         case TARGET_SO_OOBINLINE:
3158 		optname = SO_OOBINLINE;
3159 		break;
3160         case TARGET_SO_NO_CHECK:
3161 		optname = SO_NO_CHECK;
3162 		break;
3163         case TARGET_SO_PRIORITY:
3164 		optname = SO_PRIORITY;
3165 		break;
3166 #ifdef SO_BSDCOMPAT
3167         case TARGET_SO_BSDCOMPAT:
3168 		optname = SO_BSDCOMPAT;
3169 		break;
3170 #endif
3171         case TARGET_SO_PASSCRED:
3172 		optname = SO_PASSCRED;
3173 		break;
3174         case TARGET_SO_PASSSEC:
3175                 optname = SO_PASSSEC;
3176                 break;
3177         case TARGET_SO_TIMESTAMP:
3178 		optname = SO_TIMESTAMP;
3179 		break;
3180         case TARGET_SO_RCVLOWAT:
3181 		optname = SO_RCVLOWAT;
3182 		break;
3183         default:
3184             goto unimplemented;
3185         }
3186 	if (optlen < sizeof(uint32_t))
3187             return -TARGET_EINVAL;
3188 
3189 	if (get_user_u32(val, optval_addr))
3190             return -TARGET_EFAULT;
3191 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3192         break;
3193     default:
3194     unimplemented:
3195         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3196         ret = -TARGET_ENOPROTOOPT;
3197     }
3198     return ret;
3199 }
3200 
3201 /* do_getsockopt() Must return target values and target errnos. */
3202 static abi_long do_getsockopt(int sockfd, int level, int optname,
3203                               abi_ulong optval_addr, abi_ulong optlen)
3204 {
3205     abi_long ret;
3206     int len, val;
3207     socklen_t lv;
3208 
3209     switch(level) {
3210     case TARGET_SOL_SOCKET:
3211         level = SOL_SOCKET;
3212         switch (optname) {
3213         /* These don't just return a single integer */
3214         case TARGET_SO_LINGER:
3215         case TARGET_SO_RCVTIMEO:
3216         case TARGET_SO_SNDTIMEO:
3217         case TARGET_SO_PEERNAME:
3218             goto unimplemented;
3219         case TARGET_SO_PEERCRED: {
3220             struct ucred cr;
3221             socklen_t crlen;
3222             struct target_ucred *tcr;
3223 
3224             if (get_user_u32(len, optlen)) {
3225                 return -TARGET_EFAULT;
3226             }
3227             if (len < 0) {
3228                 return -TARGET_EINVAL;
3229             }
3230 
3231             crlen = sizeof(cr);
3232             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3233                                        &cr, &crlen));
3234             if (ret < 0) {
3235                 return ret;
3236             }
3237             if (len > crlen) {
3238                 len = crlen;
3239             }
3240             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3241                 return -TARGET_EFAULT;
3242             }
3243             __put_user(cr.pid, &tcr->pid);
3244             __put_user(cr.uid, &tcr->uid);
3245             __put_user(cr.gid, &tcr->gid);
3246             unlock_user_struct(tcr, optval_addr, 1);
3247             if (put_user_u32(len, optlen)) {
3248                 return -TARGET_EFAULT;
3249             }
3250             break;
3251         }
3252         /* Options with 'int' argument.  */
3253         case TARGET_SO_DEBUG:
3254             optname = SO_DEBUG;
3255             goto int_case;
3256         case TARGET_SO_REUSEADDR:
3257             optname = SO_REUSEADDR;
3258             goto int_case;
3259         case TARGET_SO_TYPE:
3260             optname = SO_TYPE;
3261             goto int_case;
3262         case TARGET_SO_ERROR:
3263             optname = SO_ERROR;
3264             goto int_case;
3265         case TARGET_SO_DONTROUTE:
3266             optname = SO_DONTROUTE;
3267             goto int_case;
3268         case TARGET_SO_BROADCAST:
3269             optname = SO_BROADCAST;
3270             goto int_case;
3271         case TARGET_SO_SNDBUF:
3272             optname = SO_SNDBUF;
3273             goto int_case;
3274         case TARGET_SO_RCVBUF:
3275             optname = SO_RCVBUF;
3276             goto int_case;
3277         case TARGET_SO_KEEPALIVE:
3278             optname = SO_KEEPALIVE;
3279             goto int_case;
3280         case TARGET_SO_OOBINLINE:
3281             optname = SO_OOBINLINE;
3282             goto int_case;
3283         case TARGET_SO_NO_CHECK:
3284             optname = SO_NO_CHECK;
3285             goto int_case;
3286         case TARGET_SO_PRIORITY:
3287             optname = SO_PRIORITY;
3288             goto int_case;
3289 #ifdef SO_BSDCOMPAT
3290         case TARGET_SO_BSDCOMPAT:
3291             optname = SO_BSDCOMPAT;
3292             goto int_case;
3293 #endif
3294         case TARGET_SO_PASSCRED:
3295             optname = SO_PASSCRED;
3296             goto int_case;
3297         case TARGET_SO_TIMESTAMP:
3298             optname = SO_TIMESTAMP;
3299             goto int_case;
3300         case TARGET_SO_RCVLOWAT:
3301             optname = SO_RCVLOWAT;
3302             goto int_case;
3303         case TARGET_SO_ACCEPTCONN:
3304             optname = SO_ACCEPTCONN;
3305             goto int_case;
3306         default:
3307             goto int_case;
3308         }
3309         break;
3310     case SOL_TCP:
3311         /* TCP options all take an 'int' value.  */
3312     int_case:
3313         if (get_user_u32(len, optlen))
3314             return -TARGET_EFAULT;
3315         if (len < 0)
3316             return -TARGET_EINVAL;
3317         lv = sizeof(lv);
3318         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3319         if (ret < 0)
3320             return ret;
3321         if (optname == SO_TYPE) {
3322             val = host_to_target_sock_type(val);
3323         }
3324         if (len > lv)
3325             len = lv;
3326         if (len == 4) {
3327             if (put_user_u32(val, optval_addr))
3328                 return -TARGET_EFAULT;
3329         } else {
3330             if (put_user_u8(val, optval_addr))
3331                 return -TARGET_EFAULT;
3332         }
3333         if (put_user_u32(len, optlen))
3334             return -TARGET_EFAULT;
3335         break;
3336     case SOL_IP:
3337         switch(optname) {
3338         case IP_TOS:
3339         case IP_TTL:
3340         case IP_HDRINCL:
3341         case IP_ROUTER_ALERT:
3342         case IP_RECVOPTS:
3343         case IP_RETOPTS:
3344         case IP_PKTINFO:
3345         case IP_MTU_DISCOVER:
3346         case IP_RECVERR:
3347         case IP_RECVTOS:
3348 #ifdef IP_FREEBIND
3349         case IP_FREEBIND:
3350 #endif
3351         case IP_MULTICAST_TTL:
3352         case IP_MULTICAST_LOOP:
3353             if (get_user_u32(len, optlen))
3354                 return -TARGET_EFAULT;
3355             if (len < 0)
3356                 return -TARGET_EINVAL;
3357             lv = sizeof(lv);
3358             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3359             if (ret < 0)
3360                 return ret;
3361             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3362                 len = 1;
3363                 if (put_user_u32(len, optlen)
3364                     || put_user_u8(val, optval_addr))
3365                     return -TARGET_EFAULT;
3366             } else {
3367                 if (len > sizeof(int))
3368                     len = sizeof(int);
3369                 if (put_user_u32(len, optlen)
3370                     || put_user_u32(val, optval_addr))
3371                     return -TARGET_EFAULT;
3372             }
3373             break;
3374         default:
3375             ret = -TARGET_ENOPROTOOPT;
3376             break;
3377         }
3378         break;
3379     default:
3380     unimplemented:
3381         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3382                  level, optname);
3383         ret = -TARGET_EOPNOTSUPP;
3384         break;
3385     }
3386     return ret;
3387 }
3388 
3389 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3390                                 abi_ulong count, int copy)
3391 {
3392     struct target_iovec *target_vec;
3393     struct iovec *vec;
3394     abi_ulong total_len, max_len;
3395     int i;
3396     int err = 0;
3397     bool bad_address = false;
3398 
3399     if (count == 0) {
3400         errno = 0;
3401         return NULL;
3402     }
3403     if (count > IOV_MAX) {
3404         errno = EINVAL;
3405         return NULL;
3406     }
3407 
3408     vec = g_try_new0(struct iovec, count);
3409     if (vec == NULL) {
3410         errno = ENOMEM;
3411         return NULL;
3412     }
3413 
3414     target_vec = lock_user(VERIFY_READ, target_addr,
3415                            count * sizeof(struct target_iovec), 1);
3416     if (target_vec == NULL) {
3417         err = EFAULT;
3418         goto fail2;
3419     }
3420 
3421     /* ??? If host page size > target page size, this will result in a
3422        value larger than what we can actually support.  */
3423     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3424     total_len = 0;
3425 
3426     for (i = 0; i < count; i++) {
3427         abi_ulong base = tswapal(target_vec[i].iov_base);
3428         abi_long len = tswapal(target_vec[i].iov_len);
3429 
3430         if (len < 0) {
3431             err = EINVAL;
3432             goto fail;
3433         } else if (len == 0) {
3434             /* Zero length pointer is ignored.  */
3435             vec[i].iov_base = 0;
3436         } else {
3437             vec[i].iov_base = lock_user(type, base, len, copy);
3438             /* If the first buffer pointer is bad, this is a fault.  But
3439              * subsequent bad buffers will result in a partial write; this
3440              * is realized by filling the vector with null pointers and
3441              * zero lengths. */
3442             if (!vec[i].iov_base) {
3443                 if (i == 0) {
3444                     err = EFAULT;
3445                     goto fail;
3446                 } else {
3447                     bad_address = true;
3448                 }
3449             }
3450             if (bad_address) {
3451                 len = 0;
3452             }
3453             if (len > max_len - total_len) {
3454                 len = max_len - total_len;
3455             }
3456         }
3457         vec[i].iov_len = len;
3458         total_len += len;
3459     }
3460 
3461     unlock_user(target_vec, target_addr, 0);
3462     return vec;
3463 
3464  fail:
3465     while (--i >= 0) {
3466         if (tswapal(target_vec[i].iov_len) > 0) {
3467             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3468         }
3469     }
3470     unlock_user(target_vec, target_addr, 0);
3471  fail2:
3472     g_free(vec);
3473     errno = err;
3474     return NULL;
3475 }
3476 
3477 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3478                          abi_ulong count, int copy)
3479 {
3480     struct target_iovec *target_vec;
3481     int i;
3482 
3483     target_vec = lock_user(VERIFY_READ, target_addr,
3484                            count * sizeof(struct target_iovec), 1);
3485     if (target_vec) {
3486         for (i = 0; i < count; i++) {
3487             abi_ulong base = tswapal(target_vec[i].iov_base);
3488             abi_long len = tswapal(target_vec[i].iov_len);
3489             if (len < 0) {
3490                 break;
3491             }
3492             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3493         }
3494         unlock_user(target_vec, target_addr, 0);
3495     }
3496 
3497     g_free(vec);
3498 }
3499 
3500 static inline int target_to_host_sock_type(int *type)
3501 {
3502     int host_type = 0;
3503     int target_type = *type;
3504 
3505     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3506     case TARGET_SOCK_DGRAM:
3507         host_type = SOCK_DGRAM;
3508         break;
3509     case TARGET_SOCK_STREAM:
3510         host_type = SOCK_STREAM;
3511         break;
3512     default:
3513         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3514         break;
3515     }
3516     if (target_type & TARGET_SOCK_CLOEXEC) {
3517 #if defined(SOCK_CLOEXEC)
3518         host_type |= SOCK_CLOEXEC;
3519 #else
3520         return -TARGET_EINVAL;
3521 #endif
3522     }
3523     if (target_type & TARGET_SOCK_NONBLOCK) {
3524 #if defined(SOCK_NONBLOCK)
3525         host_type |= SOCK_NONBLOCK;
3526 #elif !defined(O_NONBLOCK)
3527         return -TARGET_EINVAL;
3528 #endif
3529     }
3530     *type = host_type;
3531     return 0;
3532 }
3533 
3534 /* Try to emulate socket type flags after socket creation.  */
3535 static int sock_flags_fixup(int fd, int target_type)
3536 {
3537 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3538     if (target_type & TARGET_SOCK_NONBLOCK) {
3539         int flags = fcntl(fd, F_GETFL);
3540         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3541             close(fd);
3542             return -TARGET_EINVAL;
3543         }
3544     }
3545 #endif
3546     return fd;
3547 }
3548 
3549 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3550                                                abi_ulong target_addr,
3551                                                socklen_t len)
3552 {
3553     struct sockaddr *addr = host_addr;
3554     struct target_sockaddr *target_saddr;
3555 
3556     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3557     if (!target_saddr) {
3558         return -TARGET_EFAULT;
3559     }
3560 
3561     memcpy(addr, target_saddr, len);
3562     addr->sa_family = tswap16(target_saddr->sa_family);
3563     /* spkt_protocol is big-endian */
3564 
3565     unlock_user(target_saddr, target_addr, 0);
3566     return 0;
3567 }
3568 
3569 static TargetFdTrans target_packet_trans = {
3570     .target_to_host_addr = packet_target_to_host_sockaddr,
3571 };
3572 
3573 #ifdef CONFIG_RTNETLINK
3574 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3575 {
3576     abi_long ret;
3577 
3578     ret = target_to_host_nlmsg_route(buf, len);
3579     if (ret < 0) {
3580         return ret;
3581     }
3582 
3583     return len;
3584 }
3585 
3586 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3587 {
3588     abi_long ret;
3589 
3590     ret = host_to_target_nlmsg_route(buf, len);
3591     if (ret < 0) {
3592         return ret;
3593     }
3594 
3595     return len;
3596 }
3597 
3598 static TargetFdTrans target_netlink_route_trans = {
3599     .target_to_host_data = netlink_route_target_to_host,
3600     .host_to_target_data = netlink_route_host_to_target,
3601 };
3602 #endif /* CONFIG_RTNETLINK */
3603 
3604 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3605 {
3606     abi_long ret;
3607 
3608     ret = target_to_host_nlmsg_audit(buf, len);
3609     if (ret < 0) {
3610         return ret;
3611     }
3612 
3613     return len;
3614 }
3615 
3616 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3617 {
3618     abi_long ret;
3619 
3620     ret = host_to_target_nlmsg_audit(buf, len);
3621     if (ret < 0) {
3622         return ret;
3623     }
3624 
3625     return len;
3626 }
3627 
3628 static TargetFdTrans target_netlink_audit_trans = {
3629     .target_to_host_data = netlink_audit_target_to_host,
3630     .host_to_target_data = netlink_audit_host_to_target,
3631 };
3632 
3633 /* do_socket() Must return target values and target errnos. */
3634 static abi_long do_socket(int domain, int type, int protocol)
3635 {
3636     int target_type = type;
3637     int ret;
3638 
3639     ret = target_to_host_sock_type(&type);
3640     if (ret) {
3641         return ret;
3642     }
3643 
3644     if (domain == PF_NETLINK && !(
3645 #ifdef CONFIG_RTNETLINK
3646          protocol == NETLINK_ROUTE ||
3647 #endif
3648          protocol == NETLINK_KOBJECT_UEVENT ||
3649          protocol == NETLINK_AUDIT)) {
3650         return -EPFNOSUPPORT;
3651     }
3652 
3653     if (domain == AF_PACKET ||
3654         (domain == AF_INET && type == SOCK_PACKET)) {
3655         protocol = tswap16(protocol);
3656     }
3657 
3658     ret = get_errno(socket(domain, type, protocol));
3659     if (ret >= 0) {
3660         ret = sock_flags_fixup(ret, target_type);
3661         if (type == SOCK_PACKET) {
3662             /* Manage an obsolete case :
3663              * if socket type is SOCK_PACKET, bind by name
3664              */
3665             fd_trans_register(ret, &target_packet_trans);
3666         } else if (domain == PF_NETLINK) {
3667             switch (protocol) {
3668 #ifdef CONFIG_RTNETLINK
3669             case NETLINK_ROUTE:
3670                 fd_trans_register(ret, &target_netlink_route_trans);
3671                 break;
3672 #endif
3673             case NETLINK_KOBJECT_UEVENT:
3674                 /* nothing to do: messages are strings */
3675                 break;
3676             case NETLINK_AUDIT:
3677                 fd_trans_register(ret, &target_netlink_audit_trans);
3678                 break;
3679             default:
3680                 g_assert_not_reached();
3681             }
3682         }
3683     }
3684     return ret;
3685 }
3686 
3687 /* do_bind() Must return target values and target errnos. */
3688 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3689                         socklen_t addrlen)
3690 {
3691     void *addr;
3692     abi_long ret;
3693 
3694     if ((int)addrlen < 0) {
3695         return -TARGET_EINVAL;
3696     }
3697 
3698     addr = alloca(addrlen+1);
3699 
3700     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3701     if (ret)
3702         return ret;
3703 
3704     return get_errno(bind(sockfd, addr, addrlen));
3705 }
3706 
3707 /* do_connect() Must return target values and target errnos. */
3708 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3709                            socklen_t addrlen)
3710 {
3711     void *addr;
3712     abi_long ret;
3713 
3714     if ((int)addrlen < 0) {
3715         return -TARGET_EINVAL;
3716     }
3717 
3718     addr = alloca(addrlen+1);
3719 
3720     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3721     if (ret)
3722         return ret;
3723 
3724     return get_errno(safe_connect(sockfd, addr, addrlen));
3725 }
3726 
3727 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3728 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3729                                       int flags, int send)
3730 {
3731     abi_long ret, len;
3732     struct msghdr msg;
3733     abi_ulong count;
3734     struct iovec *vec;
3735     abi_ulong target_vec;
3736 
3737     if (msgp->msg_name) {
3738         msg.msg_namelen = tswap32(msgp->msg_namelen);
3739         msg.msg_name = alloca(msg.msg_namelen+1);
3740         ret = target_to_host_sockaddr(fd, msg.msg_name,
3741                                       tswapal(msgp->msg_name),
3742                                       msg.msg_namelen);
3743         if (ret == -TARGET_EFAULT) {
3744             /* For connected sockets msg_name and msg_namelen must
3745              * be ignored, so returning EFAULT immediately is wrong.
3746              * Instead, pass a bad msg_name to the host kernel, and
3747              * let it decide whether to return EFAULT or not.
3748              */
3749             msg.msg_name = (void *)-1;
3750         } else if (ret) {
3751             goto out2;
3752         }
3753     } else {
3754         msg.msg_name = NULL;
3755         msg.msg_namelen = 0;
3756     }
3757     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3758     msg.msg_control = alloca(msg.msg_controllen);
3759     msg.msg_flags = tswap32(msgp->msg_flags);
3760 
3761     count = tswapal(msgp->msg_iovlen);
3762     target_vec = tswapal(msgp->msg_iov);
3763 
3764     if (count > IOV_MAX) {
3765         /* sendrcvmsg returns a different errno for this condition than
3766          * readv/writev, so we must catch it here before lock_iovec() does.
3767          */
3768         ret = -TARGET_EMSGSIZE;
3769         goto out2;
3770     }
3771 
3772     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3773                      target_vec, count, send);
3774     if (vec == NULL) {
3775         ret = -host_to_target_errno(errno);
3776         goto out2;
3777     }
3778     msg.msg_iovlen = count;
3779     msg.msg_iov = vec;
3780 
3781     if (send) {
3782         if (fd_trans_target_to_host_data(fd)) {
3783             void *host_msg;
3784 
3785             host_msg = g_malloc(msg.msg_iov->iov_len);
3786             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3787             ret = fd_trans_target_to_host_data(fd)(host_msg,
3788                                                    msg.msg_iov->iov_len);
3789             if (ret >= 0) {
3790                 msg.msg_iov->iov_base = host_msg;
3791                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3792             }
3793             g_free(host_msg);
3794         } else {
3795             ret = target_to_host_cmsg(&msg, msgp);
3796             if (ret == 0) {
3797                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3798             }
3799         }
3800     } else {
3801         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3802         if (!is_error(ret)) {
3803             len = ret;
3804             if (fd_trans_host_to_target_data(fd)) {
3805                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3806                                                        len);
3807             } else {
3808                 ret = host_to_target_cmsg(msgp, &msg);
3809             }
3810             if (!is_error(ret)) {
3811                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3812                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3813                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3814                                     msg.msg_name, msg.msg_namelen);
3815                     if (ret) {
3816                         goto out;
3817                     }
3818                 }
3819 
3820                 ret = len;
3821             }
3822         }
3823     }
3824 
3825 out:
3826     unlock_iovec(vec, target_vec, count, !send);
3827 out2:
3828     return ret;
3829 }
3830 
3831 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3832                                int flags, int send)
3833 {
3834     abi_long ret;
3835     struct target_msghdr *msgp;
3836 
3837     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3838                           msgp,
3839                           target_msg,
3840                           send ? 1 : 0)) {
3841         return -TARGET_EFAULT;
3842     }
3843     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3844     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3845     return ret;
3846 }
3847 
3848 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3849  * so it might not have this *mmsg-specific flag either.
3850  */
3851 #ifndef MSG_WAITFORONE
3852 #define MSG_WAITFORONE 0x10000
3853 #endif
3854 
3855 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3856                                 unsigned int vlen, unsigned int flags,
3857                                 int send)
3858 {
3859     struct target_mmsghdr *mmsgp;
3860     abi_long ret = 0;
3861     int i;
3862 
3863     if (vlen > UIO_MAXIOV) {
3864         vlen = UIO_MAXIOV;
3865     }
3866 
3867     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3868     if (!mmsgp) {
3869         return -TARGET_EFAULT;
3870     }
3871 
3872     for (i = 0; i < vlen; i++) {
3873         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3874         if (is_error(ret)) {
3875             break;
3876         }
3877         mmsgp[i].msg_len = tswap32(ret);
3878         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3879         if (flags & MSG_WAITFORONE) {
3880             flags |= MSG_DONTWAIT;
3881         }
3882     }
3883 
3884     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3885 
3886     /* Return number of datagrams sent if we sent any at all;
3887      * otherwise return the error.
3888      */
3889     if (i) {
3890         return i;
3891     }
3892     return ret;
3893 }
3894 
3895 /* do_accept4() Must return target values and target errnos. */
3896 static abi_long do_accept4(int fd, abi_ulong target_addr,
3897                            abi_ulong target_addrlen_addr, int flags)
3898 {
3899     socklen_t addrlen;
3900     void *addr;
3901     abi_long ret;
3902     int host_flags;
3903 
3904     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3905 
3906     if (target_addr == 0) {
3907         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3908     }
3909 
3910     /* linux returns EINVAL if addrlen pointer is invalid */
3911     if (get_user_u32(addrlen, target_addrlen_addr))
3912         return -TARGET_EINVAL;
3913 
3914     if ((int)addrlen < 0) {
3915         return -TARGET_EINVAL;
3916     }
3917 
3918     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3919         return -TARGET_EINVAL;
3920 
3921     addr = alloca(addrlen);
3922 
3923     ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3924     if (!is_error(ret)) {
3925         host_to_target_sockaddr(target_addr, addr, addrlen);
3926         if (put_user_u32(addrlen, target_addrlen_addr))
3927             ret = -TARGET_EFAULT;
3928     }
3929     return ret;
3930 }
3931 
3932 /* do_getpeername() Must return target values and target errnos. */
3933 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3934                                abi_ulong target_addrlen_addr)
3935 {
3936     socklen_t addrlen;
3937     void *addr;
3938     abi_long ret;
3939 
3940     if (get_user_u32(addrlen, target_addrlen_addr))
3941         return -TARGET_EFAULT;
3942 
3943     if ((int)addrlen < 0) {
3944         return -TARGET_EINVAL;
3945     }
3946 
3947     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3948         return -TARGET_EFAULT;
3949 
3950     addr = alloca(addrlen);
3951 
3952     ret = get_errno(getpeername(fd, addr, &addrlen));
3953     if (!is_error(ret)) {
3954         host_to_target_sockaddr(target_addr, addr, addrlen);
3955         if (put_user_u32(addrlen, target_addrlen_addr))
3956             ret = -TARGET_EFAULT;
3957     }
3958     return ret;
3959 }
3960 
3961 /* do_getsockname() Must return target values and target errnos. */
3962 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3963                                abi_ulong target_addrlen_addr)
3964 {
3965     socklen_t addrlen;
3966     void *addr;
3967     abi_long ret;
3968 
3969     if (get_user_u32(addrlen, target_addrlen_addr))
3970         return -TARGET_EFAULT;
3971 
3972     if ((int)addrlen < 0) {
3973         return -TARGET_EINVAL;
3974     }
3975 
3976     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3977         return -TARGET_EFAULT;
3978 
3979     addr = alloca(addrlen);
3980 
3981     ret = get_errno(getsockname(fd, addr, &addrlen));
3982     if (!is_error(ret)) {
3983         host_to_target_sockaddr(target_addr, addr, addrlen);
3984         if (put_user_u32(addrlen, target_addrlen_addr))
3985             ret = -TARGET_EFAULT;
3986     }
3987     return ret;
3988 }
3989 
3990 /* do_socketpair() Must return target values and target errnos. */
3991 static abi_long do_socketpair(int domain, int type, int protocol,
3992                               abi_ulong target_tab_addr)
3993 {
3994     int tab[2];
3995     abi_long ret;
3996 
3997     target_to_host_sock_type(&type);
3998 
3999     ret = get_errno(socketpair(domain, type, protocol, tab));
4000     if (!is_error(ret)) {
4001         if (put_user_s32(tab[0], target_tab_addr)
4002             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4003             ret = -TARGET_EFAULT;
4004     }
4005     return ret;
4006 }
4007 
4008 /* do_sendto() Must return target values and target errnos. */
4009 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4010                           abi_ulong target_addr, socklen_t addrlen)
4011 {
4012     void *addr;
4013     void *host_msg;
4014     void *copy_msg = NULL;
4015     abi_long ret;
4016 
4017     if ((int)addrlen < 0) {
4018         return -TARGET_EINVAL;
4019     }
4020 
4021     host_msg = lock_user(VERIFY_READ, msg, len, 1);
4022     if (!host_msg)
4023         return -TARGET_EFAULT;
4024     if (fd_trans_target_to_host_data(fd)) {
4025         copy_msg = host_msg;
4026         host_msg = g_malloc(len);
4027         memcpy(host_msg, copy_msg, len);
4028         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4029         if (ret < 0) {
4030             goto fail;
4031         }
4032     }
4033     if (target_addr) {
4034         addr = alloca(addrlen+1);
4035         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4036         if (ret) {
4037             goto fail;
4038         }
4039         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4040     } else {
4041         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4042     }
4043 fail:
4044     if (copy_msg) {
4045         g_free(host_msg);
4046         host_msg = copy_msg;
4047     }
4048     unlock_user(host_msg, msg, 0);
4049     return ret;
4050 }
4051 
4052 /* do_recvfrom() Must return target values and target errnos. */
4053 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4054                             abi_ulong target_addr,
4055                             abi_ulong target_addrlen)
4056 {
4057     socklen_t addrlen;
4058     void *addr;
4059     void *host_msg;
4060     abi_long ret;
4061 
4062     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4063     if (!host_msg)
4064         return -TARGET_EFAULT;
4065     if (target_addr) {
4066         if (get_user_u32(addrlen, target_addrlen)) {
4067             ret = -TARGET_EFAULT;
4068             goto fail;
4069         }
4070         if ((int)addrlen < 0) {
4071             ret = -TARGET_EINVAL;
4072             goto fail;
4073         }
4074         addr = alloca(addrlen);
4075         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4076                                       addr, &addrlen));
4077     } else {
4078         addr = NULL; /* To keep compiler quiet.  */
4079         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4080     }
4081     if (!is_error(ret)) {
4082         if (fd_trans_host_to_target_data(fd)) {
4083             ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4084         }
4085         if (target_addr) {
4086             host_to_target_sockaddr(target_addr, addr, addrlen);
4087             if (put_user_u32(addrlen, target_addrlen)) {
4088                 ret = -TARGET_EFAULT;
4089                 goto fail;
4090             }
4091         }
4092         unlock_user(host_msg, msg, len);
4093     } else {
4094 fail:
4095         unlock_user(host_msg, msg, 0);
4096     }
4097     return ret;
4098 }
4099 
4100 #ifdef TARGET_NR_socketcall
4101 /* do_socketcall() must return target values and target errnos. */
4102 static abi_long do_socketcall(int num, abi_ulong vptr)
4103 {
4104     static const unsigned nargs[] = { /* number of arguments per operation */
4105         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
4106         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
4107         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
4108         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
4109         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
4110         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4111         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4112         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
4113         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
4114         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
4115         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
4116         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
4117         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
4118         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
4119         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
4120         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
4121         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
4122         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
4123         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
4124         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
4125     };
4126     abi_long a[6]; /* max 6 args */
4127     unsigned i;
4128 
4129     /* check the range of the first argument num */
4130     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4131     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4132         return -TARGET_EINVAL;
4133     }
4134     /* ensure we have space for args */
4135     if (nargs[num] > ARRAY_SIZE(a)) {
4136         return -TARGET_EINVAL;
4137     }
4138     /* collect the arguments in a[] according to nargs[] */
4139     for (i = 0; i < nargs[num]; ++i) {
4140         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4141             return -TARGET_EFAULT;
4142         }
4143     }
4144     /* now when we have the args, invoke the appropriate underlying function */
4145     switch (num) {
4146     case TARGET_SYS_SOCKET: /* domain, type, protocol */
4147         return do_socket(a[0], a[1], a[2]);
4148     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4149         return do_bind(a[0], a[1], a[2]);
4150     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4151         return do_connect(a[0], a[1], a[2]);
4152     case TARGET_SYS_LISTEN: /* sockfd, backlog */
4153         return get_errno(listen(a[0], a[1]));
4154     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4155         return do_accept4(a[0], a[1], a[2], 0);
4156     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4157         return do_getsockname(a[0], a[1], a[2]);
4158     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4159         return do_getpeername(a[0], a[1], a[2]);
4160     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4161         return do_socketpair(a[0], a[1], a[2], a[3]);
4162     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4163         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4164     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4165         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4166     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4167         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4168     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4169         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4170     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4171         return get_errno(shutdown(a[0], a[1]));
4172     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4173         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4174     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4175         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4176     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4177         return do_sendrecvmsg(a[0], a[1], a[2], 1);
4178     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4179         return do_sendrecvmsg(a[0], a[1], a[2], 0);
4180     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4181         return do_accept4(a[0], a[1], a[2], a[3]);
4182     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4183         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4184     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4185         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4186     default:
4187         gemu_log("Unsupported socketcall: %d\n", num);
4188         return -TARGET_EINVAL;
4189     }
4190 }
4191 #endif
4192 
4193 #define N_SHM_REGIONS	32
4194 
4195 static struct shm_region {
4196     abi_ulong start;
4197     abi_ulong size;
4198     bool in_use;
4199 } shm_regions[N_SHM_REGIONS];
4200 
4201 #ifndef TARGET_SEMID64_DS
4202 /* asm-generic version of this struct */
4203 struct target_semid64_ds
4204 {
4205   struct target_ipc_perm sem_perm;
4206   abi_ulong sem_otime;
4207 #if TARGET_ABI_BITS == 32
4208   abi_ulong __unused1;
4209 #endif
4210   abi_ulong sem_ctime;
4211 #if TARGET_ABI_BITS == 32
4212   abi_ulong __unused2;
4213 #endif
4214   abi_ulong sem_nsems;
4215   abi_ulong __unused3;
4216   abi_ulong __unused4;
4217 };
4218 #endif
4219 
4220 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4221                                                abi_ulong target_addr)
4222 {
4223     struct target_ipc_perm *target_ip;
4224     struct target_semid64_ds *target_sd;
4225 
4226     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4227         return -TARGET_EFAULT;
4228     target_ip = &(target_sd->sem_perm);
4229     host_ip->__key = tswap32(target_ip->__key);
4230     host_ip->uid = tswap32(target_ip->uid);
4231     host_ip->gid = tswap32(target_ip->gid);
4232     host_ip->cuid = tswap32(target_ip->cuid);
4233     host_ip->cgid = tswap32(target_ip->cgid);
4234 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4235     host_ip->mode = tswap32(target_ip->mode);
4236 #else
4237     host_ip->mode = tswap16(target_ip->mode);
4238 #endif
4239 #if defined(TARGET_PPC)
4240     host_ip->__seq = tswap32(target_ip->__seq);
4241 #else
4242     host_ip->__seq = tswap16(target_ip->__seq);
4243 #endif
4244     unlock_user_struct(target_sd, target_addr, 0);
4245     return 0;
4246 }
4247 
4248 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4249                                                struct ipc_perm *host_ip)
4250 {
4251     struct target_ipc_perm *target_ip;
4252     struct target_semid64_ds *target_sd;
4253 
4254     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4255         return -TARGET_EFAULT;
4256     target_ip = &(target_sd->sem_perm);
4257     target_ip->__key = tswap32(host_ip->__key);
4258     target_ip->uid = tswap32(host_ip->uid);
4259     target_ip->gid = tswap32(host_ip->gid);
4260     target_ip->cuid = tswap32(host_ip->cuid);
4261     target_ip->cgid = tswap32(host_ip->cgid);
4262 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4263     target_ip->mode = tswap32(host_ip->mode);
4264 #else
4265     target_ip->mode = tswap16(host_ip->mode);
4266 #endif
4267 #if defined(TARGET_PPC)
4268     target_ip->__seq = tswap32(host_ip->__seq);
4269 #else
4270     target_ip->__seq = tswap16(host_ip->__seq);
4271 #endif
4272     unlock_user_struct(target_sd, target_addr, 1);
4273     return 0;
4274 }
4275 
4276 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4277                                                abi_ulong target_addr)
4278 {
4279     struct target_semid64_ds *target_sd;
4280 
4281     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4282         return -TARGET_EFAULT;
4283     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4284         return -TARGET_EFAULT;
4285     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4286     host_sd->sem_otime = tswapal(target_sd->sem_otime);
4287     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4288     unlock_user_struct(target_sd, target_addr, 0);
4289     return 0;
4290 }
4291 
4292 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4293                                                struct semid_ds *host_sd)
4294 {
4295     struct target_semid64_ds *target_sd;
4296 
4297     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4298         return -TARGET_EFAULT;
4299     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4300         return -TARGET_EFAULT;
4301     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4302     target_sd->sem_otime = tswapal(host_sd->sem_otime);
4303     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4304     unlock_user_struct(target_sd, target_addr, 1);
4305     return 0;
4306 }
4307 
4308 struct target_seminfo {
4309     int semmap;
4310     int semmni;
4311     int semmns;
4312     int semmnu;
4313     int semmsl;
4314     int semopm;
4315     int semume;
4316     int semusz;
4317     int semvmx;
4318     int semaem;
4319 };
4320 
4321 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4322                                               struct seminfo *host_seminfo)
4323 {
4324     struct target_seminfo *target_seminfo;
4325     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4326         return -TARGET_EFAULT;
4327     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4328     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4329     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4330     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4331     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4332     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4333     __put_user(host_seminfo->semume, &target_seminfo->semume);
4334     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4335     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4336     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4337     unlock_user_struct(target_seminfo, target_addr, 1);
4338     return 0;
4339 }
4340 
4341 union semun {
4342 	int val;
4343 	struct semid_ds *buf;
4344 	unsigned short *array;
4345 	struct seminfo *__buf;
4346 };
4347 
4348 union target_semun {
4349 	int val;
4350 	abi_ulong buf;
4351 	abi_ulong array;
4352 	abi_ulong __buf;
4353 };
4354 
4355 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4356                                                abi_ulong target_addr)
4357 {
4358     int nsems;
4359     unsigned short *array;
4360     union semun semun;
4361     struct semid_ds semid_ds;
4362     int i, ret;
4363 
4364     semun.buf = &semid_ds;
4365 
4366     ret = semctl(semid, 0, IPC_STAT, semun);
4367     if (ret == -1)
4368         return get_errno(ret);
4369 
4370     nsems = semid_ds.sem_nsems;
4371 
4372     *host_array = g_try_new(unsigned short, nsems);
4373     if (!*host_array) {
4374         return -TARGET_ENOMEM;
4375     }
4376     array = lock_user(VERIFY_READ, target_addr,
4377                       nsems*sizeof(unsigned short), 1);
4378     if (!array) {
4379         g_free(*host_array);
4380         return -TARGET_EFAULT;
4381     }
4382 
4383     for(i=0; i<nsems; i++) {
4384         __get_user((*host_array)[i], &array[i]);
4385     }
4386     unlock_user(array, target_addr, 0);
4387 
4388     return 0;
4389 }
4390 
4391 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4392                                                unsigned short **host_array)
4393 {
4394     int nsems;
4395     unsigned short *array;
4396     union semun semun;
4397     struct semid_ds semid_ds;
4398     int i, ret;
4399 
4400     semun.buf = &semid_ds;
4401 
4402     ret = semctl(semid, 0, IPC_STAT, semun);
4403     if (ret == -1)
4404         return get_errno(ret);
4405 
4406     nsems = semid_ds.sem_nsems;
4407 
4408     array = lock_user(VERIFY_WRITE, target_addr,
4409                       nsems*sizeof(unsigned short), 0);
4410     if (!array)
4411         return -TARGET_EFAULT;
4412 
4413     for(i=0; i<nsems; i++) {
4414         __put_user((*host_array)[i], &array[i]);
4415     }
4416     g_free(*host_array);
4417     unlock_user(array, target_addr, 1);
4418 
4419     return 0;
4420 }
4421 
4422 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4423                                  abi_ulong target_arg)
4424 {
4425     union target_semun target_su = { .buf = target_arg };
4426     union semun arg;
4427     struct semid_ds dsarg;
4428     unsigned short *array = NULL;
4429     struct seminfo seminfo;
4430     abi_long ret = -TARGET_EINVAL;
4431     abi_long err;
4432     cmd &= 0xff;
4433 
4434     switch( cmd ) {
4435 	case GETVAL:
4436 	case SETVAL:
4437             /* In 64 bit cross-endian situations, we will erroneously pick up
4438              * the wrong half of the union for the "val" element.  To rectify
4439              * this, the entire 8-byte structure is byteswapped, followed by
4440 	     * a swap of the 4 byte val field. In other cases, the data is
4441 	     * already in proper host byte order. */
4442 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4443 		target_su.buf = tswapal(target_su.buf);
4444 		arg.val = tswap32(target_su.val);
4445 	    } else {
4446 		arg.val = target_su.val;
4447 	    }
4448             ret = get_errno(semctl(semid, semnum, cmd, arg));
4449             break;
4450 	case GETALL:
4451 	case SETALL:
4452             err = target_to_host_semarray(semid, &array, target_su.array);
4453             if (err)
4454                 return err;
4455             arg.array = array;
4456             ret = get_errno(semctl(semid, semnum, cmd, arg));
4457             err = host_to_target_semarray(semid, target_su.array, &array);
4458             if (err)
4459                 return err;
4460             break;
4461 	case IPC_STAT:
4462 	case IPC_SET:
4463 	case SEM_STAT:
4464             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4465             if (err)
4466                 return err;
4467             arg.buf = &dsarg;
4468             ret = get_errno(semctl(semid, semnum, cmd, arg));
4469             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4470             if (err)
4471                 return err;
4472             break;
4473 	case IPC_INFO:
4474 	case SEM_INFO:
4475             arg.__buf = &seminfo;
4476             ret = get_errno(semctl(semid, semnum, cmd, arg));
4477             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4478             if (err)
4479                 return err;
4480             break;
4481 	case IPC_RMID:
4482 	case GETPID:
4483 	case GETNCNT:
4484 	case GETZCNT:
4485             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4486             break;
4487     }
4488 
4489     return ret;
4490 }
4491 
4492 struct target_sembuf {
4493     unsigned short sem_num;
4494     short sem_op;
4495     short sem_flg;
4496 };
4497 
4498 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4499                                              abi_ulong target_addr,
4500                                              unsigned nsops)
4501 {
4502     struct target_sembuf *target_sembuf;
4503     int i;
4504 
4505     target_sembuf = lock_user(VERIFY_READ, target_addr,
4506                               nsops*sizeof(struct target_sembuf), 1);
4507     if (!target_sembuf)
4508         return -TARGET_EFAULT;
4509 
4510     for(i=0; i<nsops; i++) {
4511         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4512         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4513         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4514     }
4515 
4516     unlock_user(target_sembuf, target_addr, 0);
4517 
4518     return 0;
4519 }
4520 
4521 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4522 {
4523     struct sembuf sops[nsops];
4524 
4525     if (target_to_host_sembuf(sops, ptr, nsops))
4526         return -TARGET_EFAULT;
4527 
4528     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4529 }
4530 
4531 struct target_msqid_ds
4532 {
4533     struct target_ipc_perm msg_perm;
4534     abi_ulong msg_stime;
4535 #if TARGET_ABI_BITS == 32
4536     abi_ulong __unused1;
4537 #endif
4538     abi_ulong msg_rtime;
4539 #if TARGET_ABI_BITS == 32
4540     abi_ulong __unused2;
4541 #endif
4542     abi_ulong msg_ctime;
4543 #if TARGET_ABI_BITS == 32
4544     abi_ulong __unused3;
4545 #endif
4546     abi_ulong __msg_cbytes;
4547     abi_ulong msg_qnum;
4548     abi_ulong msg_qbytes;
4549     abi_ulong msg_lspid;
4550     abi_ulong msg_lrpid;
4551     abi_ulong __unused4;
4552     abi_ulong __unused5;
4553 };
4554 
4555 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4556                                                abi_ulong target_addr)
4557 {
4558     struct target_msqid_ds *target_md;
4559 
4560     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4561         return -TARGET_EFAULT;
4562     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4563         return -TARGET_EFAULT;
4564     host_md->msg_stime = tswapal(target_md->msg_stime);
4565     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4566     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4567     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4568     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4569     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4570     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4571     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4572     unlock_user_struct(target_md, target_addr, 0);
4573     return 0;
4574 }
4575 
4576 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4577                                                struct msqid_ds *host_md)
4578 {
4579     struct target_msqid_ds *target_md;
4580 
4581     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4582         return -TARGET_EFAULT;
4583     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4584         return -TARGET_EFAULT;
4585     target_md->msg_stime = tswapal(host_md->msg_stime);
4586     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4587     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4588     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4589     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4590     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4591     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4592     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4593     unlock_user_struct(target_md, target_addr, 1);
4594     return 0;
4595 }
4596 
4597 struct target_msginfo {
4598     int msgpool;
4599     int msgmap;
4600     int msgmax;
4601     int msgmnb;
4602     int msgmni;
4603     int msgssz;
4604     int msgtql;
4605     unsigned short int msgseg;
4606 };
4607 
4608 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4609                                               struct msginfo *host_msginfo)
4610 {
4611     struct target_msginfo *target_msginfo;
4612     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4613         return -TARGET_EFAULT;
4614     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4615     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4616     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4617     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4618     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4619     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4620     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4621     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4622     unlock_user_struct(target_msginfo, target_addr, 1);
4623     return 0;
4624 }
4625 
4626 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4627 {
4628     struct msqid_ds dsarg;
4629     struct msginfo msginfo;
4630     abi_long ret = -TARGET_EINVAL;
4631 
4632     cmd &= 0xff;
4633 
4634     switch (cmd) {
4635     case IPC_STAT:
4636     case IPC_SET:
4637     case MSG_STAT:
4638         if (target_to_host_msqid_ds(&dsarg,ptr))
4639             return -TARGET_EFAULT;
4640         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4641         if (host_to_target_msqid_ds(ptr,&dsarg))
4642             return -TARGET_EFAULT;
4643         break;
4644     case IPC_RMID:
4645         ret = get_errno(msgctl(msgid, cmd, NULL));
4646         break;
4647     case IPC_INFO:
4648     case MSG_INFO:
4649         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4650         if (host_to_target_msginfo(ptr, &msginfo))
4651             return -TARGET_EFAULT;
4652         break;
4653     }
4654 
4655     return ret;
4656 }
4657 
4658 struct target_msgbuf {
4659     abi_long mtype;
4660     char	mtext[1];
4661 };
4662 
4663 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4664                                  ssize_t msgsz, int msgflg)
4665 {
4666     struct target_msgbuf *target_mb;
4667     struct msgbuf *host_mb;
4668     abi_long ret = 0;
4669 
4670     if (msgsz < 0) {
4671         return -TARGET_EINVAL;
4672     }
4673 
4674     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4675         return -TARGET_EFAULT;
4676     host_mb = g_try_malloc(msgsz + sizeof(long));
4677     if (!host_mb) {
4678         unlock_user_struct(target_mb, msgp, 0);
4679         return -TARGET_ENOMEM;
4680     }
4681     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4682     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4683     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4684     g_free(host_mb);
4685     unlock_user_struct(target_mb, msgp, 0);
4686 
4687     return ret;
4688 }
4689 
4690 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4691                                  ssize_t msgsz, abi_long msgtyp,
4692                                  int msgflg)
4693 {
4694     struct target_msgbuf *target_mb;
4695     char *target_mtext;
4696     struct msgbuf *host_mb;
4697     abi_long ret = 0;
4698 
4699     if (msgsz < 0) {
4700         return -TARGET_EINVAL;
4701     }
4702 
4703     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4704         return -TARGET_EFAULT;
4705 
4706     host_mb = g_try_malloc(msgsz + sizeof(long));
4707     if (!host_mb) {
4708         ret = -TARGET_ENOMEM;
4709         goto end;
4710     }
4711     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4712 
4713     if (ret > 0) {
4714         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4715         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4716         if (!target_mtext) {
4717             ret = -TARGET_EFAULT;
4718             goto end;
4719         }
4720         memcpy(target_mb->mtext, host_mb->mtext, ret);
4721         unlock_user(target_mtext, target_mtext_addr, ret);
4722     }
4723 
4724     target_mb->mtype = tswapal(host_mb->mtype);
4725 
4726 end:
4727     if (target_mb)
4728         unlock_user_struct(target_mb, msgp, 1);
4729     g_free(host_mb);
4730     return ret;
4731 }
4732 
4733 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4734                                                abi_ulong target_addr)
4735 {
4736     struct target_shmid_ds *target_sd;
4737 
4738     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4739         return -TARGET_EFAULT;
4740     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4741         return -TARGET_EFAULT;
4742     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4743     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4744     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4745     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4746     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4747     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4748     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4749     unlock_user_struct(target_sd, target_addr, 0);
4750     return 0;
4751 }
4752 
4753 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4754                                                struct shmid_ds *host_sd)
4755 {
4756     struct target_shmid_ds *target_sd;
4757 
4758     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4759         return -TARGET_EFAULT;
4760     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4761         return -TARGET_EFAULT;
4762     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4763     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4764     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4765     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4766     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4767     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4768     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4769     unlock_user_struct(target_sd, target_addr, 1);
4770     return 0;
4771 }
4772 
4773 struct  target_shminfo {
4774     abi_ulong shmmax;
4775     abi_ulong shmmin;
4776     abi_ulong shmmni;
4777     abi_ulong shmseg;
4778     abi_ulong shmall;
4779 };
4780 
4781 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4782                                               struct shminfo *host_shminfo)
4783 {
4784     struct target_shminfo *target_shminfo;
4785     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4786         return -TARGET_EFAULT;
4787     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4788     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4789     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4790     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4791     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4792     unlock_user_struct(target_shminfo, target_addr, 1);
4793     return 0;
4794 }
4795 
4796 struct target_shm_info {
4797     int used_ids;
4798     abi_ulong shm_tot;
4799     abi_ulong shm_rss;
4800     abi_ulong shm_swp;
4801     abi_ulong swap_attempts;
4802     abi_ulong swap_successes;
4803 };
4804 
4805 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4806                                                struct shm_info *host_shm_info)
4807 {
4808     struct target_shm_info *target_shm_info;
4809     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4810         return -TARGET_EFAULT;
4811     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4812     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4813     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4814     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4815     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4816     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4817     unlock_user_struct(target_shm_info, target_addr, 1);
4818     return 0;
4819 }
4820 
4821 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4822 {
4823     struct shmid_ds dsarg;
4824     struct shminfo shminfo;
4825     struct shm_info shm_info;
4826     abi_long ret = -TARGET_EINVAL;
4827 
4828     cmd &= 0xff;
4829 
4830     switch(cmd) {
4831     case IPC_STAT:
4832     case IPC_SET:
4833     case SHM_STAT:
4834         if (target_to_host_shmid_ds(&dsarg, buf))
4835             return -TARGET_EFAULT;
4836         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4837         if (host_to_target_shmid_ds(buf, &dsarg))
4838             return -TARGET_EFAULT;
4839         break;
4840     case IPC_INFO:
4841         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4842         if (host_to_target_shminfo(buf, &shminfo))
4843             return -TARGET_EFAULT;
4844         break;
4845     case SHM_INFO:
4846         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4847         if (host_to_target_shm_info(buf, &shm_info))
4848             return -TARGET_EFAULT;
4849         break;
4850     case IPC_RMID:
4851     case SHM_LOCK:
4852     case SHM_UNLOCK:
4853         ret = get_errno(shmctl(shmid, cmd, NULL));
4854         break;
4855     }
4856 
4857     return ret;
4858 }
4859 
4860 #ifndef TARGET_FORCE_SHMLBA
4861 /* For most architectures, SHMLBA is the same as the page size;
4862  * some architectures have larger values, in which case they should
4863  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4864  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4865  * and defining its own value for SHMLBA.
4866  *
4867  * The kernel also permits SHMLBA to be set by the architecture to a
4868  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4869  * this means that addresses are rounded to the large size if
4870  * SHM_RND is set but addresses not aligned to that size are not rejected
4871  * as long as they are at least page-aligned. Since the only architecture
4872  * which uses this is ia64 this code doesn't provide for that oddity.
4873  */
4874 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4875 {
4876     return TARGET_PAGE_SIZE;
4877 }
4878 #endif
4879 
4880 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4881                                  int shmid, abi_ulong shmaddr, int shmflg)
4882 {
4883     abi_long raddr;
4884     void *host_raddr;
4885     struct shmid_ds shm_info;
4886     int i,ret;
4887     abi_ulong shmlba;
4888 
4889     /* find out the length of the shared memory segment */
4890     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4891     if (is_error(ret)) {
4892         /* can't get length, bail out */
4893         return ret;
4894     }
4895 
4896     shmlba = target_shmlba(cpu_env);
4897 
4898     if (shmaddr & (shmlba - 1)) {
4899         if (shmflg & SHM_RND) {
4900             shmaddr &= ~(shmlba - 1);
4901         } else {
4902             return -TARGET_EINVAL;
4903         }
4904     }
4905     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4906         return -TARGET_EINVAL;
4907     }
4908 
4909     mmap_lock();
4910 
4911     if (shmaddr)
4912         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4913     else {
4914         abi_ulong mmap_start;
4915 
4916         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4917 
4918         if (mmap_start == -1) {
4919             errno = ENOMEM;
4920             host_raddr = (void *)-1;
4921         } else
4922             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4923     }
4924 
4925     if (host_raddr == (void *)-1) {
4926         mmap_unlock();
4927         return get_errno((long)host_raddr);
4928     }
4929     raddr=h2g((unsigned long)host_raddr);
4930 
4931     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4932                    PAGE_VALID | PAGE_READ |
4933                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4934 
4935     for (i = 0; i < N_SHM_REGIONS; i++) {
4936         if (!shm_regions[i].in_use) {
4937             shm_regions[i].in_use = true;
4938             shm_regions[i].start = raddr;
4939             shm_regions[i].size = shm_info.shm_segsz;
4940             break;
4941         }
4942     }
4943 
4944     mmap_unlock();
4945     return raddr;
4946 
4947 }
4948 
4949 static inline abi_long do_shmdt(abi_ulong shmaddr)
4950 {
4951     int i;
4952     abi_long rv;
4953 
4954     mmap_lock();
4955 
4956     for (i = 0; i < N_SHM_REGIONS; ++i) {
4957         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4958             shm_regions[i].in_use = false;
4959             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4960             break;
4961         }
4962     }
4963     rv = get_errno(shmdt(g2h(shmaddr)));
4964 
4965     mmap_unlock();
4966 
4967     return rv;
4968 }
4969 
4970 #ifdef TARGET_NR_ipc
4971 /* ??? This only works with linear mappings.  */
4972 /* do_ipc() must return target values and target errnos. */
4973 static abi_long do_ipc(CPUArchState *cpu_env,
4974                        unsigned int call, abi_long first,
4975                        abi_long second, abi_long third,
4976                        abi_long ptr, abi_long fifth)
4977 {
4978     int version;
4979     abi_long ret = 0;
4980 
4981     version = call >> 16;
4982     call &= 0xffff;
4983 
4984     switch (call) {
4985     case IPCOP_semop:
4986         ret = do_semop(first, ptr, second);
4987         break;
4988 
4989     case IPCOP_semget:
4990         ret = get_errno(semget(first, second, third));
4991         break;
4992 
4993     case IPCOP_semctl: {
4994         /* The semun argument to semctl is passed by value, so dereference the
4995          * ptr argument. */
4996         abi_ulong atptr;
4997         get_user_ual(atptr, ptr);
4998         ret = do_semctl(first, second, third, atptr);
4999         break;
5000     }
5001 
5002     case IPCOP_msgget:
5003         ret = get_errno(msgget(first, second));
5004         break;
5005 
5006     case IPCOP_msgsnd:
5007         ret = do_msgsnd(first, ptr, second, third);
5008         break;
5009 
5010     case IPCOP_msgctl:
5011         ret = do_msgctl(first, second, ptr);
5012         break;
5013 
5014     case IPCOP_msgrcv:
5015         switch (version) {
5016         case 0:
5017             {
5018                 struct target_ipc_kludge {
5019                     abi_long msgp;
5020                     abi_long msgtyp;
5021                 } *tmp;
5022 
5023                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5024                     ret = -TARGET_EFAULT;
5025                     break;
5026                 }
5027 
5028                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5029 
5030                 unlock_user_struct(tmp, ptr, 0);
5031                 break;
5032             }
5033         default:
5034             ret = do_msgrcv(first, ptr, second, fifth, third);
5035         }
5036         break;
5037 
5038     case IPCOP_shmat:
5039         switch (version) {
5040         default:
5041         {
5042             abi_ulong raddr;
5043             raddr = do_shmat(cpu_env, first, ptr, second);
5044             if (is_error(raddr))
5045                 return get_errno(raddr);
5046             if (put_user_ual(raddr, third))
5047                 return -TARGET_EFAULT;
5048             break;
5049         }
5050         case 1:
5051             ret = -TARGET_EINVAL;
5052             break;
5053         }
5054 	break;
5055     case IPCOP_shmdt:
5056         ret = do_shmdt(ptr);
5057 	break;
5058 
5059     case IPCOP_shmget:
5060 	/* IPC_* flag values are the same on all linux platforms */
5061 	ret = get_errno(shmget(first, second, third));
5062 	break;
5063 
5064 	/* IPC_* and SHM_* command values are the same on all linux platforms */
5065     case IPCOP_shmctl:
5066         ret = do_shmctl(first, second, ptr);
5067         break;
5068     default:
5069 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5070 	ret = -TARGET_ENOSYS;
5071 	break;
5072     }
5073     return ret;
5074 }
5075 #endif
5076 
5077 /* kernel structure types definitions */
5078 
5079 #define STRUCT(name, ...) STRUCT_ ## name,
5080 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5081 enum {
5082 #include "syscall_types.h"
5083 STRUCT_MAX
5084 };
5085 #undef STRUCT
5086 #undef STRUCT_SPECIAL
5087 
5088 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
5089 #define STRUCT_SPECIAL(name)
5090 #include "syscall_types.h"
5091 #undef STRUCT
5092 #undef STRUCT_SPECIAL
5093 
5094 typedef struct IOCTLEntry IOCTLEntry;
5095 
5096 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5097                              int fd, int cmd, abi_long arg);
5098 
5099 struct IOCTLEntry {
5100     int target_cmd;
5101     unsigned int host_cmd;
5102     const char *name;
5103     int access;
5104     do_ioctl_fn *do_ioctl;
5105     const argtype arg_type[5];
5106 };
5107 
5108 #define IOC_R 0x0001
5109 #define IOC_W 0x0002
5110 #define IOC_RW (IOC_R | IOC_W)
5111 
5112 #define MAX_STRUCT_SIZE 4096
5113 
5114 #ifdef CONFIG_FIEMAP
5115 /* So fiemap access checks don't overflow on 32 bit systems.
5116  * This is very slightly smaller than the limit imposed by
5117  * the underlying kernel.
5118  */
5119 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
5120                             / sizeof(struct fiemap_extent))
5121 
5122 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5123                                        int fd, int cmd, abi_long arg)
5124 {
5125     /* The parameter for this ioctl is a struct fiemap followed
5126      * by an array of struct fiemap_extent whose size is set
5127      * in fiemap->fm_extent_count. The array is filled in by the
5128      * ioctl.
5129      */
5130     int target_size_in, target_size_out;
5131     struct fiemap *fm;
5132     const argtype *arg_type = ie->arg_type;
5133     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5134     void *argptr, *p;
5135     abi_long ret;
5136     int i, extent_size = thunk_type_size(extent_arg_type, 0);
5137     uint32_t outbufsz;
5138     int free_fm = 0;
5139 
5140     assert(arg_type[0] == TYPE_PTR);
5141     assert(ie->access == IOC_RW);
5142     arg_type++;
5143     target_size_in = thunk_type_size(arg_type, 0);
5144     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5145     if (!argptr) {
5146         return -TARGET_EFAULT;
5147     }
5148     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5149     unlock_user(argptr, arg, 0);
5150     fm = (struct fiemap *)buf_temp;
5151     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5152         return -TARGET_EINVAL;
5153     }
5154 
5155     outbufsz = sizeof (*fm) +
5156         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5157 
5158     if (outbufsz > MAX_STRUCT_SIZE) {
5159         /* We can't fit all the extents into the fixed size buffer.
5160          * Allocate one that is large enough and use it instead.
5161          */
5162         fm = g_try_malloc(outbufsz);
5163         if (!fm) {
5164             return -TARGET_ENOMEM;
5165         }
5166         memcpy(fm, buf_temp, sizeof(struct fiemap));
5167         free_fm = 1;
5168     }
5169     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5170     if (!is_error(ret)) {
5171         target_size_out = target_size_in;
5172         /* An extent_count of 0 means we were only counting the extents
5173          * so there are no structs to copy
5174          */
5175         if (fm->fm_extent_count != 0) {
5176             target_size_out += fm->fm_mapped_extents * extent_size;
5177         }
5178         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5179         if (!argptr) {
5180             ret = -TARGET_EFAULT;
5181         } else {
5182             /* Convert the struct fiemap */
5183             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5184             if (fm->fm_extent_count != 0) {
5185                 p = argptr + target_size_in;
5186                 /* ...and then all the struct fiemap_extents */
5187                 for (i = 0; i < fm->fm_mapped_extents; i++) {
5188                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5189                                   THUNK_TARGET);
5190                     p += extent_size;
5191                 }
5192             }
5193             unlock_user(argptr, arg, target_size_out);
5194         }
5195     }
5196     if (free_fm) {
5197         g_free(fm);
5198     }
5199     return ret;
5200 }
5201 #endif
5202 
5203 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5204                                 int fd, int cmd, abi_long arg)
5205 {
5206     const argtype *arg_type = ie->arg_type;
5207     int target_size;
5208     void *argptr;
5209     int ret;
5210     struct ifconf *host_ifconf;
5211     uint32_t outbufsz;
5212     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5213     int target_ifreq_size;
5214     int nb_ifreq;
5215     int free_buf = 0;
5216     int i;
5217     int target_ifc_len;
5218     abi_long target_ifc_buf;
5219     int host_ifc_len;
5220     char *host_ifc_buf;
5221 
5222     assert(arg_type[0] == TYPE_PTR);
5223     assert(ie->access == IOC_RW);
5224 
5225     arg_type++;
5226     target_size = thunk_type_size(arg_type, 0);
5227 
5228     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5229     if (!argptr)
5230         return -TARGET_EFAULT;
5231     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5232     unlock_user(argptr, arg, 0);
5233 
5234     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5235     target_ifc_len = host_ifconf->ifc_len;
5236     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5237 
5238     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5239     nb_ifreq = target_ifc_len / target_ifreq_size;
5240     host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5241 
5242     outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5243     if (outbufsz > MAX_STRUCT_SIZE) {
5244         /* We can't fit all the extents into the fixed size buffer.
5245          * Allocate one that is large enough and use it instead.
5246          */
5247         host_ifconf = malloc(outbufsz);
5248         if (!host_ifconf) {
5249             return -TARGET_ENOMEM;
5250         }
5251         memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5252         free_buf = 1;
5253     }
5254     host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5255 
5256     host_ifconf->ifc_len = host_ifc_len;
5257     host_ifconf->ifc_buf = host_ifc_buf;
5258 
5259     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5260     if (!is_error(ret)) {
5261 	/* convert host ifc_len to target ifc_len */
5262 
5263         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5264         target_ifc_len = nb_ifreq * target_ifreq_size;
5265         host_ifconf->ifc_len = target_ifc_len;
5266 
5267 	/* restore target ifc_buf */
5268 
5269         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5270 
5271 	/* copy struct ifconf to target user */
5272 
5273         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5274         if (!argptr)
5275             return -TARGET_EFAULT;
5276         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5277         unlock_user(argptr, arg, target_size);
5278 
5279 	/* copy ifreq[] to target user */
5280 
5281         argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5282         for (i = 0; i < nb_ifreq ; i++) {
5283             thunk_convert(argptr + i * target_ifreq_size,
5284                           host_ifc_buf + i * sizeof(struct ifreq),
5285                           ifreq_arg_type, THUNK_TARGET);
5286         }
5287         unlock_user(argptr, target_ifc_buf, target_ifc_len);
5288     }
5289 
5290     if (free_buf) {
5291         free(host_ifconf);
5292     }
5293 
5294     return ret;
5295 }
5296 
5297 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5298                             int cmd, abi_long arg)
5299 {
5300     void *argptr;
5301     struct dm_ioctl *host_dm;
5302     abi_long guest_data;
5303     uint32_t guest_data_size;
5304     int target_size;
5305     const argtype *arg_type = ie->arg_type;
5306     abi_long ret;
5307     void *big_buf = NULL;
5308     char *host_data;
5309 
5310     arg_type++;
5311     target_size = thunk_type_size(arg_type, 0);
5312     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5313     if (!argptr) {
5314         ret = -TARGET_EFAULT;
5315         goto out;
5316     }
5317     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5318     unlock_user(argptr, arg, 0);
5319 
5320     /* buf_temp is too small, so fetch things into a bigger buffer */
5321     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5322     memcpy(big_buf, buf_temp, target_size);
5323     buf_temp = big_buf;
5324     host_dm = big_buf;
5325 
5326     guest_data = arg + host_dm->data_start;
5327     if ((guest_data - arg) < 0) {
5328         ret = -TARGET_EINVAL;
5329         goto out;
5330     }
5331     guest_data_size = host_dm->data_size - host_dm->data_start;
5332     host_data = (char*)host_dm + host_dm->data_start;
5333 
5334     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5335     if (!argptr) {
5336         ret = -TARGET_EFAULT;
5337         goto out;
5338     }
5339 
5340     switch (ie->host_cmd) {
5341     case DM_REMOVE_ALL:
5342     case DM_LIST_DEVICES:
5343     case DM_DEV_CREATE:
5344     case DM_DEV_REMOVE:
5345     case DM_DEV_SUSPEND:
5346     case DM_DEV_STATUS:
5347     case DM_DEV_WAIT:
5348     case DM_TABLE_STATUS:
5349     case DM_TABLE_CLEAR:
5350     case DM_TABLE_DEPS:
5351     case DM_LIST_VERSIONS:
5352         /* no input data */
5353         break;
5354     case DM_DEV_RENAME:
5355     case DM_DEV_SET_GEOMETRY:
5356         /* data contains only strings */
5357         memcpy(host_data, argptr, guest_data_size);
5358         break;
5359     case DM_TARGET_MSG:
5360         memcpy(host_data, argptr, guest_data_size);
5361         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5362         break;
5363     case DM_TABLE_LOAD:
5364     {
5365         void *gspec = argptr;
5366         void *cur_data = host_data;
5367         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5368         int spec_size = thunk_type_size(arg_type, 0);
5369         int i;
5370 
5371         for (i = 0; i < host_dm->target_count; i++) {
5372             struct dm_target_spec *spec = cur_data;
5373             uint32_t next;
5374             int slen;
5375 
5376             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5377             slen = strlen((char*)gspec + spec_size) + 1;
5378             next = spec->next;
5379             spec->next = sizeof(*spec) + slen;
5380             strcpy((char*)&spec[1], gspec + spec_size);
5381             gspec += next;
5382             cur_data += spec->next;
5383         }
5384         break;
5385     }
5386     default:
5387         ret = -TARGET_EINVAL;
5388         unlock_user(argptr, guest_data, 0);
5389         goto out;
5390     }
5391     unlock_user(argptr, guest_data, 0);
5392 
5393     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5394     if (!is_error(ret)) {
5395         guest_data = arg + host_dm->data_start;
5396         guest_data_size = host_dm->data_size - host_dm->data_start;
5397         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5398         switch (ie->host_cmd) {
5399         case DM_REMOVE_ALL:
5400         case DM_DEV_CREATE:
5401         case DM_DEV_REMOVE:
5402         case DM_DEV_RENAME:
5403         case DM_DEV_SUSPEND:
5404         case DM_DEV_STATUS:
5405         case DM_TABLE_LOAD:
5406         case DM_TABLE_CLEAR:
5407         case DM_TARGET_MSG:
5408         case DM_DEV_SET_GEOMETRY:
5409             /* no return data */
5410             break;
5411         case DM_LIST_DEVICES:
5412         {
5413             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5414             uint32_t remaining_data = guest_data_size;
5415             void *cur_data = argptr;
5416             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5417             int nl_size = 12; /* can't use thunk_size due to alignment */
5418 
5419             while (1) {
5420                 uint32_t next = nl->next;
5421                 if (next) {
5422                     nl->next = nl_size + (strlen(nl->name) + 1);
5423                 }
5424                 if (remaining_data < nl->next) {
5425                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5426                     break;
5427                 }
5428                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5429                 strcpy(cur_data + nl_size, nl->name);
5430                 cur_data += nl->next;
5431                 remaining_data -= nl->next;
5432                 if (!next) {
5433                     break;
5434                 }
5435                 nl = (void*)nl + next;
5436             }
5437             break;
5438         }
5439         case DM_DEV_WAIT:
5440         case DM_TABLE_STATUS:
5441         {
5442             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5443             void *cur_data = argptr;
5444             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5445             int spec_size = thunk_type_size(arg_type, 0);
5446             int i;
5447 
5448             for (i = 0; i < host_dm->target_count; i++) {
5449                 uint32_t next = spec->next;
5450                 int slen = strlen((char*)&spec[1]) + 1;
5451                 spec->next = (cur_data - argptr) + spec_size + slen;
5452                 if (guest_data_size < spec->next) {
5453                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5454                     break;
5455                 }
5456                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5457                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5458                 cur_data = argptr + spec->next;
5459                 spec = (void*)host_dm + host_dm->data_start + next;
5460             }
5461             break;
5462         }
5463         case DM_TABLE_DEPS:
5464         {
5465             void *hdata = (void*)host_dm + host_dm->data_start;
5466             int count = *(uint32_t*)hdata;
5467             uint64_t *hdev = hdata + 8;
5468             uint64_t *gdev = argptr + 8;
5469             int i;
5470 
5471             *(uint32_t*)argptr = tswap32(count);
5472             for (i = 0; i < count; i++) {
5473                 *gdev = tswap64(*hdev);
5474                 gdev++;
5475                 hdev++;
5476             }
5477             break;
5478         }
5479         case DM_LIST_VERSIONS:
5480         {
5481             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5482             uint32_t remaining_data = guest_data_size;
5483             void *cur_data = argptr;
5484             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5485             int vers_size = thunk_type_size(arg_type, 0);
5486 
5487             while (1) {
5488                 uint32_t next = vers->next;
5489                 if (next) {
5490                     vers->next = vers_size + (strlen(vers->name) + 1);
5491                 }
5492                 if (remaining_data < vers->next) {
5493                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5494                     break;
5495                 }
5496                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5497                 strcpy(cur_data + vers_size, vers->name);
5498                 cur_data += vers->next;
5499                 remaining_data -= vers->next;
5500                 if (!next) {
5501                     break;
5502                 }
5503                 vers = (void*)vers + next;
5504             }
5505             break;
5506         }
5507         default:
5508             unlock_user(argptr, guest_data, 0);
5509             ret = -TARGET_EINVAL;
5510             goto out;
5511         }
5512         unlock_user(argptr, guest_data, guest_data_size);
5513 
5514         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5515         if (!argptr) {
5516             ret = -TARGET_EFAULT;
5517             goto out;
5518         }
5519         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5520         unlock_user(argptr, arg, target_size);
5521     }
5522 out:
5523     g_free(big_buf);
5524     return ret;
5525 }
5526 
5527 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5528                                int cmd, abi_long arg)
5529 {
5530     void *argptr;
5531     int target_size;
5532     const argtype *arg_type = ie->arg_type;
5533     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5534     abi_long ret;
5535 
5536     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5537     struct blkpg_partition host_part;
5538 
5539     /* Read and convert blkpg */
5540     arg_type++;
5541     target_size = thunk_type_size(arg_type, 0);
5542     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5543     if (!argptr) {
5544         ret = -TARGET_EFAULT;
5545         goto out;
5546     }
5547     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5548     unlock_user(argptr, arg, 0);
5549 
5550     switch (host_blkpg->op) {
5551     case BLKPG_ADD_PARTITION:
5552     case BLKPG_DEL_PARTITION:
5553         /* payload is struct blkpg_partition */
5554         break;
5555     default:
5556         /* Unknown opcode */
5557         ret = -TARGET_EINVAL;
5558         goto out;
5559     }
5560 
5561     /* Read and convert blkpg->data */
5562     arg = (abi_long)(uintptr_t)host_blkpg->data;
5563     target_size = thunk_type_size(part_arg_type, 0);
5564     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5565     if (!argptr) {
5566         ret = -TARGET_EFAULT;
5567         goto out;
5568     }
5569     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5570     unlock_user(argptr, arg, 0);
5571 
5572     /* Swizzle the data pointer to our local copy and call! */
5573     host_blkpg->data = &host_part;
5574     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5575 
5576 out:
5577     return ret;
5578 }
5579 
5580 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5581                                 int fd, int cmd, abi_long arg)
5582 {
5583     const argtype *arg_type = ie->arg_type;
5584     const StructEntry *se;
5585     const argtype *field_types;
5586     const int *dst_offsets, *src_offsets;
5587     int target_size;
5588     void *argptr;
5589     abi_ulong *target_rt_dev_ptr;
5590     unsigned long *host_rt_dev_ptr;
5591     abi_long ret;
5592     int i;
5593 
5594     assert(ie->access == IOC_W);
5595     assert(*arg_type == TYPE_PTR);
5596     arg_type++;
5597     assert(*arg_type == TYPE_STRUCT);
5598     target_size = thunk_type_size(arg_type, 0);
5599     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5600     if (!argptr) {
5601         return -TARGET_EFAULT;
5602     }
5603     arg_type++;
5604     assert(*arg_type == (int)STRUCT_rtentry);
5605     se = struct_entries + *arg_type++;
5606     assert(se->convert[0] == NULL);
5607     /* convert struct here to be able to catch rt_dev string */
5608     field_types = se->field_types;
5609     dst_offsets = se->field_offsets[THUNK_HOST];
5610     src_offsets = se->field_offsets[THUNK_TARGET];
5611     for (i = 0; i < se->nb_fields; i++) {
5612         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5613             assert(*field_types == TYPE_PTRVOID);
5614             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5615             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5616             if (*target_rt_dev_ptr != 0) {
5617                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5618                                                   tswapal(*target_rt_dev_ptr));
5619                 if (!*host_rt_dev_ptr) {
5620                     unlock_user(argptr, arg, 0);
5621                     return -TARGET_EFAULT;
5622                 }
5623             } else {
5624                 *host_rt_dev_ptr = 0;
5625             }
5626             field_types++;
5627             continue;
5628         }
5629         field_types = thunk_convert(buf_temp + dst_offsets[i],
5630                                     argptr + src_offsets[i],
5631                                     field_types, THUNK_HOST);
5632     }
5633     unlock_user(argptr, arg, 0);
5634 
5635     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5636     if (*host_rt_dev_ptr != 0) {
5637         unlock_user((void *)*host_rt_dev_ptr,
5638                     *target_rt_dev_ptr, 0);
5639     }
5640     return ret;
5641 }
5642 
5643 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5644                                      int fd, int cmd, abi_long arg)
5645 {
5646     int sig = target_to_host_signal(arg);
5647     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5648 }
5649 
5650 #ifdef TIOCGPTPEER
5651 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5652                                      int fd, int cmd, abi_long arg)
5653 {
5654     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5655     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5656 }
5657 #endif
5658 
5659 static IOCTLEntry ioctl_entries[] = {
5660 #define IOCTL(cmd, access, ...) \
5661     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5662 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5663     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5664 #define IOCTL_IGNORE(cmd) \
5665     { TARGET_ ## cmd, 0, #cmd },
5666 #include "ioctls.h"
5667     { 0, 0, },
5668 };
5669 
5670 /* ??? Implement proper locking for ioctls.  */
5671 /* do_ioctl() Must return target values and target errnos. */
5672 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5673 {
5674     const IOCTLEntry *ie;
5675     const argtype *arg_type;
5676     abi_long ret;
5677     uint8_t buf_temp[MAX_STRUCT_SIZE];
5678     int target_size;
5679     void *argptr;
5680 
5681     ie = ioctl_entries;
5682     for(;;) {
5683         if (ie->target_cmd == 0) {
5684             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5685             return -TARGET_ENOSYS;
5686         }
5687         if (ie->target_cmd == cmd)
5688             break;
5689         ie++;
5690     }
5691     arg_type = ie->arg_type;
5692 #if defined(DEBUG)
5693     gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5694 #endif
5695     if (ie->do_ioctl) {
5696         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5697     } else if (!ie->host_cmd) {
5698         /* Some architectures define BSD ioctls in their headers
5699            that are not implemented in Linux.  */
5700         return -TARGET_ENOSYS;
5701     }
5702 
5703     switch(arg_type[0]) {
5704     case TYPE_NULL:
5705         /* no argument */
5706         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5707         break;
5708     case TYPE_PTRVOID:
5709     case TYPE_INT:
5710         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5711         break;
5712     case TYPE_PTR:
5713         arg_type++;
5714         target_size = thunk_type_size(arg_type, 0);
5715         switch(ie->access) {
5716         case IOC_R:
5717             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5718             if (!is_error(ret)) {
5719                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5720                 if (!argptr)
5721                     return -TARGET_EFAULT;
5722                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5723                 unlock_user(argptr, arg, target_size);
5724             }
5725             break;
5726         case IOC_W:
5727             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5728             if (!argptr)
5729                 return -TARGET_EFAULT;
5730             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5731             unlock_user(argptr, arg, 0);
5732             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5733             break;
5734         default:
5735         case IOC_RW:
5736             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5737             if (!argptr)
5738                 return -TARGET_EFAULT;
5739             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5740             unlock_user(argptr, arg, 0);
5741             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5742             if (!is_error(ret)) {
5743                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5744                 if (!argptr)
5745                     return -TARGET_EFAULT;
5746                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5747                 unlock_user(argptr, arg, target_size);
5748             }
5749             break;
5750         }
5751         break;
5752     default:
5753         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5754                  (long)cmd, arg_type[0]);
5755         ret = -TARGET_ENOSYS;
5756         break;
5757     }
5758     return ret;
5759 }
5760 
5761 static const bitmask_transtbl iflag_tbl[] = {
5762         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5763         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5764         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5765         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5766         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5767         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5768         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5769         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5770         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5771         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5772         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5773         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5774         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5775         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5776         { 0, 0, 0, 0 }
5777 };
5778 
5779 static const bitmask_transtbl oflag_tbl[] = {
5780 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5781 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5782 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5783 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5784 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5785 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5786 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5787 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5788 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5789 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5790 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5791 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5792 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5793 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5794 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5795 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5796 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5797 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5798 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5799 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5800 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5801 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5802 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5803 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5804 	{ 0, 0, 0, 0 }
5805 };
5806 
5807 static const bitmask_transtbl cflag_tbl[] = {
5808 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5809 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5810 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5811 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5812 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5813 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5814 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5815 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5816 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5817 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5818 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5819 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5820 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5821 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5822 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5823 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5824 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5825 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5826 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5827 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5828 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5829 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5830 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5831 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5832 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5833 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5834 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5835 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5836 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5837 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5838 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5839 	{ 0, 0, 0, 0 }
5840 };
5841 
5842 static const bitmask_transtbl lflag_tbl[] = {
5843 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5844 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5845 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5846 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5847 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5848 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5849 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5850 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5851 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5852 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5853 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5854 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5855 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5856 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5857 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5858 	{ 0, 0, 0, 0 }
5859 };
5860 
5861 static void target_to_host_termios (void *dst, const void *src)
5862 {
5863     struct host_termios *host = dst;
5864     const struct target_termios *target = src;
5865 
5866     host->c_iflag =
5867         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5868     host->c_oflag =
5869         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5870     host->c_cflag =
5871         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5872     host->c_lflag =
5873         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5874     host->c_line = target->c_line;
5875 
5876     memset(host->c_cc, 0, sizeof(host->c_cc));
5877     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5878     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5879     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5880     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5881     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5882     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5883     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5884     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5885     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5886     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5887     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5888     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5889     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5890     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5891     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5892     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5893     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5894 }
5895 
5896 static void host_to_target_termios (void *dst, const void *src)
5897 {
5898     struct target_termios *target = dst;
5899     const struct host_termios *host = src;
5900 
5901     target->c_iflag =
5902         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5903     target->c_oflag =
5904         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5905     target->c_cflag =
5906         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5907     target->c_lflag =
5908         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5909     target->c_line = host->c_line;
5910 
5911     memset(target->c_cc, 0, sizeof(target->c_cc));
5912     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5913     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5914     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5915     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5916     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5917     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5918     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5919     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5920     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5921     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5922     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5923     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5924     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5925     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5926     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5927     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5928     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5929 }
5930 
5931 static const StructEntry struct_termios_def = {
5932     .convert = { host_to_target_termios, target_to_host_termios },
5933     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5934     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5935 };
5936 
5937 static bitmask_transtbl mmap_flags_tbl[] = {
5938     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5939     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5940     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5941     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5942       MAP_ANONYMOUS, MAP_ANONYMOUS },
5943     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5944       MAP_GROWSDOWN, MAP_GROWSDOWN },
5945     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5946       MAP_DENYWRITE, MAP_DENYWRITE },
5947     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5948       MAP_EXECUTABLE, MAP_EXECUTABLE },
5949     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5950     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5951       MAP_NORESERVE, MAP_NORESERVE },
5952     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5953     /* MAP_STACK had been ignored by the kernel for quite some time.
5954        Recognize it for the target insofar as we do not want to pass
5955        it through to the host.  */
5956     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5957     { 0, 0, 0, 0 }
5958 };
5959 
5960 #if defined(TARGET_I386)
5961 
5962 /* NOTE: there is really one LDT for all the threads */
5963 static uint8_t *ldt_table;
5964 
5965 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5966 {
5967     int size;
5968     void *p;
5969 
5970     if (!ldt_table)
5971         return 0;
5972     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5973     if (size > bytecount)
5974         size = bytecount;
5975     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5976     if (!p)
5977         return -TARGET_EFAULT;
5978     /* ??? Should this by byteswapped?  */
5979     memcpy(p, ldt_table, size);
5980     unlock_user(p, ptr, size);
5981     return size;
5982 }
5983 
5984 /* XXX: add locking support */
5985 static abi_long write_ldt(CPUX86State *env,
5986                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5987 {
5988     struct target_modify_ldt_ldt_s ldt_info;
5989     struct target_modify_ldt_ldt_s *target_ldt_info;
5990     int seg_32bit, contents, read_exec_only, limit_in_pages;
5991     int seg_not_present, useable, lm;
5992     uint32_t *lp, entry_1, entry_2;
5993 
5994     if (bytecount != sizeof(ldt_info))
5995         return -TARGET_EINVAL;
5996     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5997         return -TARGET_EFAULT;
5998     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5999     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6000     ldt_info.limit = tswap32(target_ldt_info->limit);
6001     ldt_info.flags = tswap32(target_ldt_info->flags);
6002     unlock_user_struct(target_ldt_info, ptr, 0);
6003 
6004     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6005         return -TARGET_EINVAL;
6006     seg_32bit = ldt_info.flags & 1;
6007     contents = (ldt_info.flags >> 1) & 3;
6008     read_exec_only = (ldt_info.flags >> 3) & 1;
6009     limit_in_pages = (ldt_info.flags >> 4) & 1;
6010     seg_not_present = (ldt_info.flags >> 5) & 1;
6011     useable = (ldt_info.flags >> 6) & 1;
6012 #ifdef TARGET_ABI32
6013     lm = 0;
6014 #else
6015     lm = (ldt_info.flags >> 7) & 1;
6016 #endif
6017     if (contents == 3) {
6018         if (oldmode)
6019             return -TARGET_EINVAL;
6020         if (seg_not_present == 0)
6021             return -TARGET_EINVAL;
6022     }
6023     /* allocate the LDT */
6024     if (!ldt_table) {
6025         env->ldt.base = target_mmap(0,
6026                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6027                                     PROT_READ|PROT_WRITE,
6028                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6029         if (env->ldt.base == -1)
6030             return -TARGET_ENOMEM;
6031         memset(g2h(env->ldt.base), 0,
6032                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6033         env->ldt.limit = 0xffff;
6034         ldt_table = g2h(env->ldt.base);
6035     }
6036 
6037     /* NOTE: same code as Linux kernel */
6038     /* Allow LDTs to be cleared by the user. */
6039     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6040         if (oldmode ||
6041             (contents == 0		&&
6042              read_exec_only == 1	&&
6043              seg_32bit == 0		&&
6044              limit_in_pages == 0	&&
6045              seg_not_present == 1	&&
6046              useable == 0 )) {
6047             entry_1 = 0;
6048             entry_2 = 0;
6049             goto install;
6050         }
6051     }
6052 
6053     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6054         (ldt_info.limit & 0x0ffff);
6055     entry_2 = (ldt_info.base_addr & 0xff000000) |
6056         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6057         (ldt_info.limit & 0xf0000) |
6058         ((read_exec_only ^ 1) << 9) |
6059         (contents << 10) |
6060         ((seg_not_present ^ 1) << 15) |
6061         (seg_32bit << 22) |
6062         (limit_in_pages << 23) |
6063         (lm << 21) |
6064         0x7000;
6065     if (!oldmode)
6066         entry_2 |= (useable << 20);
6067 
6068     /* Install the new entry ...  */
6069 install:
6070     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6071     lp[0] = tswap32(entry_1);
6072     lp[1] = tswap32(entry_2);
6073     return 0;
6074 }
6075 
6076 /* specific and weird i386 syscalls */
6077 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6078                               unsigned long bytecount)
6079 {
6080     abi_long ret;
6081 
6082     switch (func) {
6083     case 0:
6084         ret = read_ldt(ptr, bytecount);
6085         break;
6086     case 1:
6087         ret = write_ldt(env, ptr, bytecount, 1);
6088         break;
6089     case 0x11:
6090         ret = write_ldt(env, ptr, bytecount, 0);
6091         break;
6092     default:
6093         ret = -TARGET_ENOSYS;
6094         break;
6095     }
6096     return ret;
6097 }
6098 
6099 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6100 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6101 {
6102     uint64_t *gdt_table = g2h(env->gdt.base);
6103     struct target_modify_ldt_ldt_s ldt_info;
6104     struct target_modify_ldt_ldt_s *target_ldt_info;
6105     int seg_32bit, contents, read_exec_only, limit_in_pages;
6106     int seg_not_present, useable, lm;
6107     uint32_t *lp, entry_1, entry_2;
6108     int i;
6109 
6110     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6111     if (!target_ldt_info)
6112         return -TARGET_EFAULT;
6113     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6114     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6115     ldt_info.limit = tswap32(target_ldt_info->limit);
6116     ldt_info.flags = tswap32(target_ldt_info->flags);
6117     if (ldt_info.entry_number == -1) {
6118         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6119             if (gdt_table[i] == 0) {
6120                 ldt_info.entry_number = i;
6121                 target_ldt_info->entry_number = tswap32(i);
6122                 break;
6123             }
6124         }
6125     }
6126     unlock_user_struct(target_ldt_info, ptr, 1);
6127 
6128     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6129         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6130            return -TARGET_EINVAL;
6131     seg_32bit = ldt_info.flags & 1;
6132     contents = (ldt_info.flags >> 1) & 3;
6133     read_exec_only = (ldt_info.flags >> 3) & 1;
6134     limit_in_pages = (ldt_info.flags >> 4) & 1;
6135     seg_not_present = (ldt_info.flags >> 5) & 1;
6136     useable = (ldt_info.flags >> 6) & 1;
6137 #ifdef TARGET_ABI32
6138     lm = 0;
6139 #else
6140     lm = (ldt_info.flags >> 7) & 1;
6141 #endif
6142 
6143     if (contents == 3) {
6144         if (seg_not_present == 0)
6145             return -TARGET_EINVAL;
6146     }
6147 
6148     /* NOTE: same code as Linux kernel */
6149     /* Allow LDTs to be cleared by the user. */
6150     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6151         if ((contents == 0             &&
6152              read_exec_only == 1       &&
6153              seg_32bit == 0            &&
6154              limit_in_pages == 0       &&
6155              seg_not_present == 1      &&
6156              useable == 0 )) {
6157             entry_1 = 0;
6158             entry_2 = 0;
6159             goto install;
6160         }
6161     }
6162 
6163     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6164         (ldt_info.limit & 0x0ffff);
6165     entry_2 = (ldt_info.base_addr & 0xff000000) |
6166         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6167         (ldt_info.limit & 0xf0000) |
6168         ((read_exec_only ^ 1) << 9) |
6169         (contents << 10) |
6170         ((seg_not_present ^ 1) << 15) |
6171         (seg_32bit << 22) |
6172         (limit_in_pages << 23) |
6173         (useable << 20) |
6174         (lm << 21) |
6175         0x7000;
6176 
6177     /* Install the new entry ...  */
6178 install:
6179     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6180     lp[0] = tswap32(entry_1);
6181     lp[1] = tswap32(entry_2);
6182     return 0;
6183 }
6184 
6185 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6186 {
6187     struct target_modify_ldt_ldt_s *target_ldt_info;
6188     uint64_t *gdt_table = g2h(env->gdt.base);
6189     uint32_t base_addr, limit, flags;
6190     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6191     int seg_not_present, useable, lm;
6192     uint32_t *lp, entry_1, entry_2;
6193 
6194     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6195     if (!target_ldt_info)
6196         return -TARGET_EFAULT;
6197     idx = tswap32(target_ldt_info->entry_number);
6198     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6199         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6200         unlock_user_struct(target_ldt_info, ptr, 1);
6201         return -TARGET_EINVAL;
6202     }
6203     lp = (uint32_t *)(gdt_table + idx);
6204     entry_1 = tswap32(lp[0]);
6205     entry_2 = tswap32(lp[1]);
6206 
6207     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6208     contents = (entry_2 >> 10) & 3;
6209     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6210     seg_32bit = (entry_2 >> 22) & 1;
6211     limit_in_pages = (entry_2 >> 23) & 1;
6212     useable = (entry_2 >> 20) & 1;
6213 #ifdef TARGET_ABI32
6214     lm = 0;
6215 #else
6216     lm = (entry_2 >> 21) & 1;
6217 #endif
6218     flags = (seg_32bit << 0) | (contents << 1) |
6219         (read_exec_only << 3) | (limit_in_pages << 4) |
6220         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6221     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6222     base_addr = (entry_1 >> 16) |
6223         (entry_2 & 0xff000000) |
6224         ((entry_2 & 0xff) << 16);
6225     target_ldt_info->base_addr = tswapal(base_addr);
6226     target_ldt_info->limit = tswap32(limit);
6227     target_ldt_info->flags = tswap32(flags);
6228     unlock_user_struct(target_ldt_info, ptr, 1);
6229     return 0;
6230 }
6231 #endif /* TARGET_I386 && TARGET_ABI32 */
6232 
6233 #ifndef TARGET_ABI32
6234 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6235 {
6236     abi_long ret = 0;
6237     abi_ulong val;
6238     int idx;
6239 
6240     switch(code) {
6241     case TARGET_ARCH_SET_GS:
6242     case TARGET_ARCH_SET_FS:
6243         if (code == TARGET_ARCH_SET_GS)
6244             idx = R_GS;
6245         else
6246             idx = R_FS;
6247         cpu_x86_load_seg(env, idx, 0);
6248         env->segs[idx].base = addr;
6249         break;
6250     case TARGET_ARCH_GET_GS:
6251     case TARGET_ARCH_GET_FS:
6252         if (code == TARGET_ARCH_GET_GS)
6253             idx = R_GS;
6254         else
6255             idx = R_FS;
6256         val = env->segs[idx].base;
6257         if (put_user(val, addr, abi_ulong))
6258             ret = -TARGET_EFAULT;
6259         break;
6260     default:
6261         ret = -TARGET_EINVAL;
6262         break;
6263     }
6264     return ret;
6265 }
6266 #endif
6267 
6268 #endif /* defined(TARGET_I386) */
6269 
6270 #define NEW_STACK_SIZE 0x40000
6271 
6272 
6273 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6274 typedef struct {
6275     CPUArchState *env;
6276     pthread_mutex_t mutex;
6277     pthread_cond_t cond;
6278     pthread_t thread;
6279     uint32_t tid;
6280     abi_ulong child_tidptr;
6281     abi_ulong parent_tidptr;
6282     sigset_t sigmask;
6283 } new_thread_info;
6284 
6285 static void *clone_func(void *arg)
6286 {
6287     new_thread_info *info = arg;
6288     CPUArchState *env;
6289     CPUState *cpu;
6290     TaskState *ts;
6291 
6292     rcu_register_thread();
6293     tcg_register_thread();
6294     env = info->env;
6295     cpu = ENV_GET_CPU(env);
6296     thread_cpu = cpu;
6297     ts = (TaskState *)cpu->opaque;
6298     info->tid = gettid();
6299     task_settid(ts);
6300     if (info->child_tidptr)
6301         put_user_u32(info->tid, info->child_tidptr);
6302     if (info->parent_tidptr)
6303         put_user_u32(info->tid, info->parent_tidptr);
6304     /* Enable signals.  */
6305     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6306     /* Signal to the parent that we're ready.  */
6307     pthread_mutex_lock(&info->mutex);
6308     pthread_cond_broadcast(&info->cond);
6309     pthread_mutex_unlock(&info->mutex);
6310     /* Wait until the parent has finished initializing the tls state.  */
6311     pthread_mutex_lock(&clone_lock);
6312     pthread_mutex_unlock(&clone_lock);
6313     cpu_loop(env);
6314     /* never exits */
6315     return NULL;
6316 }
6317 
6318 /* do_fork() Must return host values and target errnos (unlike most
6319    do_*() functions). */
6320 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6321                    abi_ulong parent_tidptr, target_ulong newtls,
6322                    abi_ulong child_tidptr)
6323 {
6324     CPUState *cpu = ENV_GET_CPU(env);
6325     int ret;
6326     TaskState *ts;
6327     CPUState *new_cpu;
6328     CPUArchState *new_env;
6329     sigset_t sigmask;
6330 
6331     flags &= ~CLONE_IGNORED_FLAGS;
6332 
6333     /* Emulate vfork() with fork() */
6334     if (flags & CLONE_VFORK)
6335         flags &= ~(CLONE_VFORK | CLONE_VM);
6336 
6337     if (flags & CLONE_VM) {
6338         TaskState *parent_ts = (TaskState *)cpu->opaque;
6339         new_thread_info info;
6340         pthread_attr_t attr;
6341 
6342         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6343             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6344             return -TARGET_EINVAL;
6345         }
6346 
6347         ts = g_new0(TaskState, 1);
6348         init_task_state(ts);
6349         /* we create a new CPU instance. */
6350         new_env = cpu_copy(env);
6351         /* Init regs that differ from the parent.  */
6352         cpu_clone_regs(new_env, newsp);
6353         new_cpu = ENV_GET_CPU(new_env);
6354         new_cpu->opaque = ts;
6355         ts->bprm = parent_ts->bprm;
6356         ts->info = parent_ts->info;
6357         ts->signal_mask = parent_ts->signal_mask;
6358 
6359         if (flags & CLONE_CHILD_CLEARTID) {
6360             ts->child_tidptr = child_tidptr;
6361         }
6362 
6363         if (flags & CLONE_SETTLS) {
6364             cpu_set_tls (new_env, newtls);
6365         }
6366 
6367         /* Grab a mutex so that thread setup appears atomic.  */
6368         pthread_mutex_lock(&clone_lock);
6369 
6370         memset(&info, 0, sizeof(info));
6371         pthread_mutex_init(&info.mutex, NULL);
6372         pthread_mutex_lock(&info.mutex);
6373         pthread_cond_init(&info.cond, NULL);
6374         info.env = new_env;
6375         if (flags & CLONE_CHILD_SETTID) {
6376             info.child_tidptr = child_tidptr;
6377         }
6378         if (flags & CLONE_PARENT_SETTID) {
6379             info.parent_tidptr = parent_tidptr;
6380         }
6381 
6382         ret = pthread_attr_init(&attr);
6383         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6384         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6385         /* It is not safe to deliver signals until the child has finished
6386            initializing, so temporarily block all signals.  */
6387         sigfillset(&sigmask);
6388         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6389 
6390         /* If this is our first additional thread, we need to ensure we
6391          * generate code for parallel execution and flush old translations.
6392          */
6393         if (!parallel_cpus) {
6394             parallel_cpus = true;
6395             tb_flush(cpu);
6396         }
6397 
6398         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6399         /* TODO: Free new CPU state if thread creation failed.  */
6400 
6401         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6402         pthread_attr_destroy(&attr);
6403         if (ret == 0) {
6404             /* Wait for the child to initialize.  */
6405             pthread_cond_wait(&info.cond, &info.mutex);
6406             ret = info.tid;
6407         } else {
6408             ret = -1;
6409         }
6410         pthread_mutex_unlock(&info.mutex);
6411         pthread_cond_destroy(&info.cond);
6412         pthread_mutex_destroy(&info.mutex);
6413         pthread_mutex_unlock(&clone_lock);
6414     } else {
6415         /* if no CLONE_VM, we consider it is a fork */
6416         if (flags & CLONE_INVALID_FORK_FLAGS) {
6417             return -TARGET_EINVAL;
6418         }
6419 
6420         /* We can't support custom termination signals */
6421         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6422             return -TARGET_EINVAL;
6423         }
6424 
6425         if (block_signals()) {
6426             return -TARGET_ERESTARTSYS;
6427         }
6428 
6429         fork_start();
6430         ret = fork();
6431         if (ret == 0) {
6432             /* Child Process.  */
6433             cpu_clone_regs(env, newsp);
6434             fork_end(1);
6435             /* There is a race condition here.  The parent process could
6436                theoretically read the TID in the child process before the child
6437                tid is set.  This would require using either ptrace
6438                (not implemented) or having *_tidptr to point at a shared memory
6439                mapping.  We can't repeat the spinlock hack used above because
6440                the child process gets its own copy of the lock.  */
6441             if (flags & CLONE_CHILD_SETTID)
6442                 put_user_u32(gettid(), child_tidptr);
6443             if (flags & CLONE_PARENT_SETTID)
6444                 put_user_u32(gettid(), parent_tidptr);
6445             ts = (TaskState *)cpu->opaque;
6446             if (flags & CLONE_SETTLS)
6447                 cpu_set_tls (env, newtls);
6448             if (flags & CLONE_CHILD_CLEARTID)
6449                 ts->child_tidptr = child_tidptr;
6450         } else {
6451             fork_end(0);
6452         }
6453     }
6454     return ret;
6455 }
6456 
6457 /* warning : doesn't handle linux specific flags... */
6458 static int target_to_host_fcntl_cmd(int cmd)
6459 {
6460     switch(cmd) {
6461 	case TARGET_F_DUPFD:
6462 	case TARGET_F_GETFD:
6463 	case TARGET_F_SETFD:
6464 	case TARGET_F_GETFL:
6465 	case TARGET_F_SETFL:
6466             return cmd;
6467         case TARGET_F_GETLK:
6468             return F_GETLK64;
6469         case TARGET_F_SETLK:
6470             return F_SETLK64;
6471         case TARGET_F_SETLKW:
6472             return F_SETLKW64;
6473 	case TARGET_F_GETOWN:
6474 	    return F_GETOWN;
6475 	case TARGET_F_SETOWN:
6476 	    return F_SETOWN;
6477 	case TARGET_F_GETSIG:
6478 	    return F_GETSIG;
6479 	case TARGET_F_SETSIG:
6480 	    return F_SETSIG;
6481 #if TARGET_ABI_BITS == 32
6482         case TARGET_F_GETLK64:
6483 	    return F_GETLK64;
6484 	case TARGET_F_SETLK64:
6485 	    return F_SETLK64;
6486 	case TARGET_F_SETLKW64:
6487 	    return F_SETLKW64;
6488 #endif
6489         case TARGET_F_SETLEASE:
6490             return F_SETLEASE;
6491         case TARGET_F_GETLEASE:
6492             return F_GETLEASE;
6493 #ifdef F_DUPFD_CLOEXEC
6494         case TARGET_F_DUPFD_CLOEXEC:
6495             return F_DUPFD_CLOEXEC;
6496 #endif
6497         case TARGET_F_NOTIFY:
6498             return F_NOTIFY;
6499 #ifdef F_GETOWN_EX
6500 	case TARGET_F_GETOWN_EX:
6501 	    return F_GETOWN_EX;
6502 #endif
6503 #ifdef F_SETOWN_EX
6504 	case TARGET_F_SETOWN_EX:
6505 	    return F_SETOWN_EX;
6506 #endif
6507 #ifdef F_SETPIPE_SZ
6508         case TARGET_F_SETPIPE_SZ:
6509             return F_SETPIPE_SZ;
6510         case TARGET_F_GETPIPE_SZ:
6511             return F_GETPIPE_SZ;
6512 #endif
6513 	default:
6514             return -TARGET_EINVAL;
6515     }
6516     return -TARGET_EINVAL;
6517 }
6518 
6519 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6520 static const bitmask_transtbl flock_tbl[] = {
6521     TRANSTBL_CONVERT(F_RDLCK),
6522     TRANSTBL_CONVERT(F_WRLCK),
6523     TRANSTBL_CONVERT(F_UNLCK),
6524     TRANSTBL_CONVERT(F_EXLCK),
6525     TRANSTBL_CONVERT(F_SHLCK),
6526     { 0, 0, 0, 0 }
6527 };
6528 
6529 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6530                                             abi_ulong target_flock_addr)
6531 {
6532     struct target_flock *target_fl;
6533     short l_type;
6534 
6535     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6536         return -TARGET_EFAULT;
6537     }
6538 
6539     __get_user(l_type, &target_fl->l_type);
6540     fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6541     __get_user(fl->l_whence, &target_fl->l_whence);
6542     __get_user(fl->l_start, &target_fl->l_start);
6543     __get_user(fl->l_len, &target_fl->l_len);
6544     __get_user(fl->l_pid, &target_fl->l_pid);
6545     unlock_user_struct(target_fl, target_flock_addr, 0);
6546     return 0;
6547 }
6548 
6549 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6550                                           const struct flock64 *fl)
6551 {
6552     struct target_flock *target_fl;
6553     short l_type;
6554 
6555     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6556         return -TARGET_EFAULT;
6557     }
6558 
6559     l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6560     __put_user(l_type, &target_fl->l_type);
6561     __put_user(fl->l_whence, &target_fl->l_whence);
6562     __put_user(fl->l_start, &target_fl->l_start);
6563     __put_user(fl->l_len, &target_fl->l_len);
6564     __put_user(fl->l_pid, &target_fl->l_pid);
6565     unlock_user_struct(target_fl, target_flock_addr, 1);
6566     return 0;
6567 }
6568 
6569 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6570 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6571 
6572 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6573 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6574                                                    abi_ulong target_flock_addr)
6575 {
6576     struct target_eabi_flock64 *target_fl;
6577     short l_type;
6578 
6579     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6580         return -TARGET_EFAULT;
6581     }
6582 
6583     __get_user(l_type, &target_fl->l_type);
6584     fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6585     __get_user(fl->l_whence, &target_fl->l_whence);
6586     __get_user(fl->l_start, &target_fl->l_start);
6587     __get_user(fl->l_len, &target_fl->l_len);
6588     __get_user(fl->l_pid, &target_fl->l_pid);
6589     unlock_user_struct(target_fl, target_flock_addr, 0);
6590     return 0;
6591 }
6592 
6593 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6594                                                  const struct flock64 *fl)
6595 {
6596     struct target_eabi_flock64 *target_fl;
6597     short l_type;
6598 
6599     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6600         return -TARGET_EFAULT;
6601     }
6602 
6603     l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6604     __put_user(l_type, &target_fl->l_type);
6605     __put_user(fl->l_whence, &target_fl->l_whence);
6606     __put_user(fl->l_start, &target_fl->l_start);
6607     __put_user(fl->l_len, &target_fl->l_len);
6608     __put_user(fl->l_pid, &target_fl->l_pid);
6609     unlock_user_struct(target_fl, target_flock_addr, 1);
6610     return 0;
6611 }
6612 #endif
6613 
6614 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6615                                               abi_ulong target_flock_addr)
6616 {
6617     struct target_flock64 *target_fl;
6618     short l_type;
6619 
6620     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6621         return -TARGET_EFAULT;
6622     }
6623 
6624     __get_user(l_type, &target_fl->l_type);
6625     fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6626     __get_user(fl->l_whence, &target_fl->l_whence);
6627     __get_user(fl->l_start, &target_fl->l_start);
6628     __get_user(fl->l_len, &target_fl->l_len);
6629     __get_user(fl->l_pid, &target_fl->l_pid);
6630     unlock_user_struct(target_fl, target_flock_addr, 0);
6631     return 0;
6632 }
6633 
6634 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6635                                             const struct flock64 *fl)
6636 {
6637     struct target_flock64 *target_fl;
6638     short l_type;
6639 
6640     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6641         return -TARGET_EFAULT;
6642     }
6643 
6644     l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6645     __put_user(l_type, &target_fl->l_type);
6646     __put_user(fl->l_whence, &target_fl->l_whence);
6647     __put_user(fl->l_start, &target_fl->l_start);
6648     __put_user(fl->l_len, &target_fl->l_len);
6649     __put_user(fl->l_pid, &target_fl->l_pid);
6650     unlock_user_struct(target_fl, target_flock_addr, 1);
6651     return 0;
6652 }
6653 
6654 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6655 {
6656     struct flock64 fl64;
6657 #ifdef F_GETOWN_EX
6658     struct f_owner_ex fox;
6659     struct target_f_owner_ex *target_fox;
6660 #endif
6661     abi_long ret;
6662     int host_cmd = target_to_host_fcntl_cmd(cmd);
6663 
6664     if (host_cmd == -TARGET_EINVAL)
6665 	    return host_cmd;
6666 
6667     switch(cmd) {
6668     case TARGET_F_GETLK:
6669         ret = copy_from_user_flock(&fl64, arg);
6670         if (ret) {
6671             return ret;
6672         }
6673         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6674         if (ret == 0) {
6675             ret = copy_to_user_flock(arg, &fl64);
6676         }
6677         break;
6678 
6679     case TARGET_F_SETLK:
6680     case TARGET_F_SETLKW:
6681         ret = copy_from_user_flock(&fl64, arg);
6682         if (ret) {
6683             return ret;
6684         }
6685         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6686         break;
6687 
6688     case TARGET_F_GETLK64:
6689         ret = copy_from_user_flock64(&fl64, arg);
6690         if (ret) {
6691             return ret;
6692         }
6693         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6694         if (ret == 0) {
6695             ret = copy_to_user_flock64(arg, &fl64);
6696         }
6697         break;
6698     case TARGET_F_SETLK64:
6699     case TARGET_F_SETLKW64:
6700         ret = copy_from_user_flock64(&fl64, arg);
6701         if (ret) {
6702             return ret;
6703         }
6704         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6705         break;
6706 
6707     case TARGET_F_GETFL:
6708         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6709         if (ret >= 0) {
6710             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6711         }
6712         break;
6713 
6714     case TARGET_F_SETFL:
6715         ret = get_errno(safe_fcntl(fd, host_cmd,
6716                                    target_to_host_bitmask(arg,
6717                                                           fcntl_flags_tbl)));
6718         break;
6719 
6720 #ifdef F_GETOWN_EX
6721     case TARGET_F_GETOWN_EX:
6722         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6723         if (ret >= 0) {
6724             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6725                 return -TARGET_EFAULT;
6726             target_fox->type = tswap32(fox.type);
6727             target_fox->pid = tswap32(fox.pid);
6728             unlock_user_struct(target_fox, arg, 1);
6729         }
6730         break;
6731 #endif
6732 
6733 #ifdef F_SETOWN_EX
6734     case TARGET_F_SETOWN_EX:
6735         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6736             return -TARGET_EFAULT;
6737         fox.type = tswap32(target_fox->type);
6738         fox.pid = tswap32(target_fox->pid);
6739         unlock_user_struct(target_fox, arg, 0);
6740         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6741         break;
6742 #endif
6743 
6744     case TARGET_F_SETOWN:
6745     case TARGET_F_GETOWN:
6746     case TARGET_F_SETSIG:
6747     case TARGET_F_GETSIG:
6748     case TARGET_F_SETLEASE:
6749     case TARGET_F_GETLEASE:
6750     case TARGET_F_SETPIPE_SZ:
6751     case TARGET_F_GETPIPE_SZ:
6752         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6753         break;
6754 
6755     default:
6756         ret = get_errno(safe_fcntl(fd, cmd, arg));
6757         break;
6758     }
6759     return ret;
6760 }
6761 
6762 #ifdef USE_UID16
6763 
6764 static inline int high2lowuid(int uid)
6765 {
6766     if (uid > 65535)
6767         return 65534;
6768     else
6769         return uid;
6770 }
6771 
6772 static inline int high2lowgid(int gid)
6773 {
6774     if (gid > 65535)
6775         return 65534;
6776     else
6777         return gid;
6778 }
6779 
6780 static inline int low2highuid(int uid)
6781 {
6782     if ((int16_t)uid == -1)
6783         return -1;
6784     else
6785         return uid;
6786 }
6787 
6788 static inline int low2highgid(int gid)
6789 {
6790     if ((int16_t)gid == -1)
6791         return -1;
6792     else
6793         return gid;
6794 }
6795 static inline int tswapid(int id)
6796 {
6797     return tswap16(id);
6798 }
6799 
6800 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6801 
6802 #else /* !USE_UID16 */
6803 static inline int high2lowuid(int uid)
6804 {
6805     return uid;
6806 }
6807 static inline int high2lowgid(int gid)
6808 {
6809     return gid;
6810 }
6811 static inline int low2highuid(int uid)
6812 {
6813     return uid;
6814 }
6815 static inline int low2highgid(int gid)
6816 {
6817     return gid;
6818 }
6819 static inline int tswapid(int id)
6820 {
6821     return tswap32(id);
6822 }
6823 
6824 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6825 
6826 #endif /* USE_UID16 */
6827 
6828 /* We must do direct syscalls for setting UID/GID, because we want to
6829  * implement the Linux system call semantics of "change only for this thread",
6830  * not the libc/POSIX semantics of "change for all threads in process".
6831  * (See http://ewontfix.com/17/ for more details.)
6832  * We use the 32-bit version of the syscalls if present; if it is not
6833  * then either the host architecture supports 32-bit UIDs natively with
6834  * the standard syscall, or the 16-bit UID is the best we can do.
6835  */
6836 #ifdef __NR_setuid32
6837 #define __NR_sys_setuid __NR_setuid32
6838 #else
6839 #define __NR_sys_setuid __NR_setuid
6840 #endif
6841 #ifdef __NR_setgid32
6842 #define __NR_sys_setgid __NR_setgid32
6843 #else
6844 #define __NR_sys_setgid __NR_setgid
6845 #endif
6846 #ifdef __NR_setresuid32
6847 #define __NR_sys_setresuid __NR_setresuid32
6848 #else
6849 #define __NR_sys_setresuid __NR_setresuid
6850 #endif
6851 #ifdef __NR_setresgid32
6852 #define __NR_sys_setresgid __NR_setresgid32
6853 #else
6854 #define __NR_sys_setresgid __NR_setresgid
6855 #endif
6856 
6857 _syscall1(int, sys_setuid, uid_t, uid)
6858 _syscall1(int, sys_setgid, gid_t, gid)
6859 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6860 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6861 
6862 void syscall_init(void)
6863 {
6864     IOCTLEntry *ie;
6865     const argtype *arg_type;
6866     int size;
6867     int i;
6868 
6869     thunk_init(STRUCT_MAX);
6870 
6871 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6872 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6873 #include "syscall_types.h"
6874 #undef STRUCT
6875 #undef STRUCT_SPECIAL
6876 
6877     /* Build target_to_host_errno_table[] table from
6878      * host_to_target_errno_table[]. */
6879     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6880         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6881     }
6882 
6883     /* we patch the ioctl size if necessary. We rely on the fact that
6884        no ioctl has all the bits at '1' in the size field */
6885     ie = ioctl_entries;
6886     while (ie->target_cmd != 0) {
6887         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6888             TARGET_IOC_SIZEMASK) {
6889             arg_type = ie->arg_type;
6890             if (arg_type[0] != TYPE_PTR) {
6891                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6892                         ie->target_cmd);
6893                 exit(1);
6894             }
6895             arg_type++;
6896             size = thunk_type_size(arg_type, 0);
6897             ie->target_cmd = (ie->target_cmd &
6898                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6899                 (size << TARGET_IOC_SIZESHIFT);
6900         }
6901 
6902         /* automatic consistency check if same arch */
6903 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6904     (defined(__x86_64__) && defined(TARGET_X86_64))
6905         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6906             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6907                     ie->name, ie->target_cmd, ie->host_cmd);
6908         }
6909 #endif
6910         ie++;
6911     }
6912 }
6913 
6914 #if TARGET_ABI_BITS == 32
6915 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6916 {
6917 #ifdef TARGET_WORDS_BIGENDIAN
6918     return ((uint64_t)word0 << 32) | word1;
6919 #else
6920     return ((uint64_t)word1 << 32) | word0;
6921 #endif
6922 }
6923 #else /* TARGET_ABI_BITS == 32 */
6924 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6925 {
6926     return word0;
6927 }
6928 #endif /* TARGET_ABI_BITS != 32 */
6929 
6930 #ifdef TARGET_NR_truncate64
6931 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6932                                          abi_long arg2,
6933                                          abi_long arg3,
6934                                          abi_long arg4)
6935 {
6936     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6937         arg2 = arg3;
6938         arg3 = arg4;
6939     }
6940     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6941 }
6942 #endif
6943 
6944 #ifdef TARGET_NR_ftruncate64
6945 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6946                                           abi_long arg2,
6947                                           abi_long arg3,
6948                                           abi_long arg4)
6949 {
6950     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6951         arg2 = arg3;
6952         arg3 = arg4;
6953     }
6954     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6955 }
6956 #endif
6957 
6958 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6959                                                abi_ulong target_addr)
6960 {
6961     struct target_timespec *target_ts;
6962 
6963     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6964         return -TARGET_EFAULT;
6965     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6966     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6967     unlock_user_struct(target_ts, target_addr, 0);
6968     return 0;
6969 }
6970 
6971 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6972                                                struct timespec *host_ts)
6973 {
6974     struct target_timespec *target_ts;
6975 
6976     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6977         return -TARGET_EFAULT;
6978     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6979     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6980     unlock_user_struct(target_ts, target_addr, 1);
6981     return 0;
6982 }
6983 
6984 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6985                                                  abi_ulong target_addr)
6986 {
6987     struct target_itimerspec *target_itspec;
6988 
6989     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6990         return -TARGET_EFAULT;
6991     }
6992 
6993     host_itspec->it_interval.tv_sec =
6994                             tswapal(target_itspec->it_interval.tv_sec);
6995     host_itspec->it_interval.tv_nsec =
6996                             tswapal(target_itspec->it_interval.tv_nsec);
6997     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6998     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6999 
7000     unlock_user_struct(target_itspec, target_addr, 1);
7001     return 0;
7002 }
7003 
7004 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7005                                                struct itimerspec *host_its)
7006 {
7007     struct target_itimerspec *target_itspec;
7008 
7009     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
7010         return -TARGET_EFAULT;
7011     }
7012 
7013     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
7014     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
7015 
7016     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
7017     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7018 
7019     unlock_user_struct(target_itspec, target_addr, 0);
7020     return 0;
7021 }
7022 
7023 static inline abi_long target_to_host_timex(struct timex *host_tx,
7024                                             abi_long target_addr)
7025 {
7026     struct target_timex *target_tx;
7027 
7028     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7029         return -TARGET_EFAULT;
7030     }
7031 
7032     __get_user(host_tx->modes, &target_tx->modes);
7033     __get_user(host_tx->offset, &target_tx->offset);
7034     __get_user(host_tx->freq, &target_tx->freq);
7035     __get_user(host_tx->maxerror, &target_tx->maxerror);
7036     __get_user(host_tx->esterror, &target_tx->esterror);
7037     __get_user(host_tx->status, &target_tx->status);
7038     __get_user(host_tx->constant, &target_tx->constant);
7039     __get_user(host_tx->precision, &target_tx->precision);
7040     __get_user(host_tx->tolerance, &target_tx->tolerance);
7041     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7042     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7043     __get_user(host_tx->tick, &target_tx->tick);
7044     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7045     __get_user(host_tx->jitter, &target_tx->jitter);
7046     __get_user(host_tx->shift, &target_tx->shift);
7047     __get_user(host_tx->stabil, &target_tx->stabil);
7048     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7049     __get_user(host_tx->calcnt, &target_tx->calcnt);
7050     __get_user(host_tx->errcnt, &target_tx->errcnt);
7051     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7052     __get_user(host_tx->tai, &target_tx->tai);
7053 
7054     unlock_user_struct(target_tx, target_addr, 0);
7055     return 0;
7056 }
7057 
7058 static inline abi_long host_to_target_timex(abi_long target_addr,
7059                                             struct timex *host_tx)
7060 {
7061     struct target_timex *target_tx;
7062 
7063     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7064         return -TARGET_EFAULT;
7065     }
7066 
7067     __put_user(host_tx->modes, &target_tx->modes);
7068     __put_user(host_tx->offset, &target_tx->offset);
7069     __put_user(host_tx->freq, &target_tx->freq);
7070     __put_user(host_tx->maxerror, &target_tx->maxerror);
7071     __put_user(host_tx->esterror, &target_tx->esterror);
7072     __put_user(host_tx->status, &target_tx->status);
7073     __put_user(host_tx->constant, &target_tx->constant);
7074     __put_user(host_tx->precision, &target_tx->precision);
7075     __put_user(host_tx->tolerance, &target_tx->tolerance);
7076     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7077     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7078     __put_user(host_tx->tick, &target_tx->tick);
7079     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7080     __put_user(host_tx->jitter, &target_tx->jitter);
7081     __put_user(host_tx->shift, &target_tx->shift);
7082     __put_user(host_tx->stabil, &target_tx->stabil);
7083     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7084     __put_user(host_tx->calcnt, &target_tx->calcnt);
7085     __put_user(host_tx->errcnt, &target_tx->errcnt);
7086     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7087     __put_user(host_tx->tai, &target_tx->tai);
7088 
7089     unlock_user_struct(target_tx, target_addr, 1);
7090     return 0;
7091 }
7092 
7093 
7094 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7095                                                abi_ulong target_addr)
7096 {
7097     struct target_sigevent *target_sevp;
7098 
7099     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7100         return -TARGET_EFAULT;
7101     }
7102 
7103     /* This union is awkward on 64 bit systems because it has a 32 bit
7104      * integer and a pointer in it; we follow the conversion approach
7105      * used for handling sigval types in signal.c so the guest should get
7106      * the correct value back even if we did a 64 bit byteswap and it's
7107      * using the 32 bit integer.
7108      */
7109     host_sevp->sigev_value.sival_ptr =
7110         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7111     host_sevp->sigev_signo =
7112         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7113     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7114     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7115 
7116     unlock_user_struct(target_sevp, target_addr, 1);
7117     return 0;
7118 }
7119 
7120 #if defined(TARGET_NR_mlockall)
7121 static inline int target_to_host_mlockall_arg(int arg)
7122 {
7123     int result = 0;
7124 
7125     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7126         result |= MCL_CURRENT;
7127     }
7128     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7129         result |= MCL_FUTURE;
7130     }
7131     return result;
7132 }
7133 #endif
7134 
7135 static inline abi_long host_to_target_stat64(void *cpu_env,
7136                                              abi_ulong target_addr,
7137                                              struct stat *host_st)
7138 {
7139 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7140     if (((CPUARMState *)cpu_env)->eabi) {
7141         struct target_eabi_stat64 *target_st;
7142 
7143         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7144             return -TARGET_EFAULT;
7145         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7146         __put_user(host_st->st_dev, &target_st->st_dev);
7147         __put_user(host_st->st_ino, &target_st->st_ino);
7148 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7149         __put_user(host_st->st_ino, &target_st->__st_ino);
7150 #endif
7151         __put_user(host_st->st_mode, &target_st->st_mode);
7152         __put_user(host_st->st_nlink, &target_st->st_nlink);
7153         __put_user(host_st->st_uid, &target_st->st_uid);
7154         __put_user(host_st->st_gid, &target_st->st_gid);
7155         __put_user(host_st->st_rdev, &target_st->st_rdev);
7156         __put_user(host_st->st_size, &target_st->st_size);
7157         __put_user(host_st->st_blksize, &target_st->st_blksize);
7158         __put_user(host_st->st_blocks, &target_st->st_blocks);
7159         __put_user(host_st->st_atime, &target_st->target_st_atime);
7160         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7161         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7162         unlock_user_struct(target_st, target_addr, 1);
7163     } else
7164 #endif
7165     {
7166 #if defined(TARGET_HAS_STRUCT_STAT64)
7167         struct target_stat64 *target_st;
7168 #else
7169         struct target_stat *target_st;
7170 #endif
7171 
7172         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7173             return -TARGET_EFAULT;
7174         memset(target_st, 0, sizeof(*target_st));
7175         __put_user(host_st->st_dev, &target_st->st_dev);
7176         __put_user(host_st->st_ino, &target_st->st_ino);
7177 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7178         __put_user(host_st->st_ino, &target_st->__st_ino);
7179 #endif
7180         __put_user(host_st->st_mode, &target_st->st_mode);
7181         __put_user(host_st->st_nlink, &target_st->st_nlink);
7182         __put_user(host_st->st_uid, &target_st->st_uid);
7183         __put_user(host_st->st_gid, &target_st->st_gid);
7184         __put_user(host_st->st_rdev, &target_st->st_rdev);
7185         /* XXX: better use of kernel struct */
7186         __put_user(host_st->st_size, &target_st->st_size);
7187         __put_user(host_st->st_blksize, &target_st->st_blksize);
7188         __put_user(host_st->st_blocks, &target_st->st_blocks);
7189         __put_user(host_st->st_atime, &target_st->target_st_atime);
7190         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7191         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7192         unlock_user_struct(target_st, target_addr, 1);
7193     }
7194 
7195     return 0;
7196 }
7197 
7198 /* ??? Using host futex calls even when target atomic operations
7199    are not really atomic probably breaks things.  However implementing
7200    futexes locally would make futexes shared between multiple processes
7201    tricky.  However they're probably useless because guest atomic
7202    operations won't work either.  */
7203 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7204                     target_ulong uaddr2, int val3)
7205 {
7206     struct timespec ts, *pts;
7207     int base_op;
7208 
7209     /* ??? We assume FUTEX_* constants are the same on both host
7210        and target.  */
7211 #ifdef FUTEX_CMD_MASK
7212     base_op = op & FUTEX_CMD_MASK;
7213 #else
7214     base_op = op;
7215 #endif
7216     switch (base_op) {
7217     case FUTEX_WAIT:
7218     case FUTEX_WAIT_BITSET:
7219         if (timeout) {
7220             pts = &ts;
7221             target_to_host_timespec(pts, timeout);
7222         } else {
7223             pts = NULL;
7224         }
7225         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7226                          pts, NULL, val3));
7227     case FUTEX_WAKE:
7228         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7229     case FUTEX_FD:
7230         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7231     case FUTEX_REQUEUE:
7232     case FUTEX_CMP_REQUEUE:
7233     case FUTEX_WAKE_OP:
7234         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7235            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7236            But the prototype takes a `struct timespec *'; insert casts
7237            to satisfy the compiler.  We do not need to tswap TIMEOUT
7238            since it's not compared to guest memory.  */
7239         pts = (struct timespec *)(uintptr_t) timeout;
7240         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7241                                     g2h(uaddr2),
7242                                     (base_op == FUTEX_CMP_REQUEUE
7243                                      ? tswap32(val3)
7244                                      : val3)));
7245     default:
7246         return -TARGET_ENOSYS;
7247     }
7248 }
7249 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7250 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7251                                      abi_long handle, abi_long mount_id,
7252                                      abi_long flags)
7253 {
7254     struct file_handle *target_fh;
7255     struct file_handle *fh;
7256     int mid = 0;
7257     abi_long ret;
7258     char *name;
7259     unsigned int size, total_size;
7260 
7261     if (get_user_s32(size, handle)) {
7262         return -TARGET_EFAULT;
7263     }
7264 
7265     name = lock_user_string(pathname);
7266     if (!name) {
7267         return -TARGET_EFAULT;
7268     }
7269 
7270     total_size = sizeof(struct file_handle) + size;
7271     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7272     if (!target_fh) {
7273         unlock_user(name, pathname, 0);
7274         return -TARGET_EFAULT;
7275     }
7276 
7277     fh = g_malloc0(total_size);
7278     fh->handle_bytes = size;
7279 
7280     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7281     unlock_user(name, pathname, 0);
7282 
7283     /* man name_to_handle_at(2):
7284      * Other than the use of the handle_bytes field, the caller should treat
7285      * the file_handle structure as an opaque data type
7286      */
7287 
7288     memcpy(target_fh, fh, total_size);
7289     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7290     target_fh->handle_type = tswap32(fh->handle_type);
7291     g_free(fh);
7292     unlock_user(target_fh, handle, total_size);
7293 
7294     if (put_user_s32(mid, mount_id)) {
7295         return -TARGET_EFAULT;
7296     }
7297 
7298     return ret;
7299 
7300 }
7301 #endif
7302 
7303 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7304 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7305                                      abi_long flags)
7306 {
7307     struct file_handle *target_fh;
7308     struct file_handle *fh;
7309     unsigned int size, total_size;
7310     abi_long ret;
7311 
7312     if (get_user_s32(size, handle)) {
7313         return -TARGET_EFAULT;
7314     }
7315 
7316     total_size = sizeof(struct file_handle) + size;
7317     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7318     if (!target_fh) {
7319         return -TARGET_EFAULT;
7320     }
7321 
7322     fh = g_memdup(target_fh, total_size);
7323     fh->handle_bytes = size;
7324     fh->handle_type = tswap32(target_fh->handle_type);
7325 
7326     ret = get_errno(open_by_handle_at(mount_fd, fh,
7327                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7328 
7329     g_free(fh);
7330 
7331     unlock_user(target_fh, handle, total_size);
7332 
7333     return ret;
7334 }
7335 #endif
7336 
7337 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7338 
7339 /* signalfd siginfo conversion */
7340 
7341 static void
7342 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7343                                 const struct signalfd_siginfo *info)
7344 {
7345     int sig = host_to_target_signal(info->ssi_signo);
7346 
7347     /* linux/signalfd.h defines a ssi_addr_lsb
7348      * not defined in sys/signalfd.h but used by some kernels
7349      */
7350 
7351 #ifdef BUS_MCEERR_AO
7352     if (tinfo->ssi_signo == SIGBUS &&
7353         (tinfo->ssi_code == BUS_MCEERR_AR ||
7354          tinfo->ssi_code == BUS_MCEERR_AO)) {
7355         uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7356         uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7357         *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7358     }
7359 #endif
7360 
7361     tinfo->ssi_signo = tswap32(sig);
7362     tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7363     tinfo->ssi_code = tswap32(info->ssi_code);
7364     tinfo->ssi_pid = tswap32(info->ssi_pid);
7365     tinfo->ssi_uid = tswap32(info->ssi_uid);
7366     tinfo->ssi_fd = tswap32(info->ssi_fd);
7367     tinfo->ssi_tid = tswap32(info->ssi_tid);
7368     tinfo->ssi_band = tswap32(info->ssi_band);
7369     tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7370     tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7371     tinfo->ssi_status = tswap32(info->ssi_status);
7372     tinfo->ssi_int = tswap32(info->ssi_int);
7373     tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7374     tinfo->ssi_utime = tswap64(info->ssi_utime);
7375     tinfo->ssi_stime = tswap64(info->ssi_stime);
7376     tinfo->ssi_addr = tswap64(info->ssi_addr);
7377 }
7378 
7379 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7380 {
7381     int i;
7382 
7383     for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7384         host_to_target_signalfd_siginfo(buf + i, buf + i);
7385     }
7386 
7387     return len;
7388 }
7389 
7390 static TargetFdTrans target_signalfd_trans = {
7391     .host_to_target_data = host_to_target_data_signalfd,
7392 };
7393 
7394 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7395 {
7396     int host_flags;
7397     target_sigset_t *target_mask;
7398     sigset_t host_mask;
7399     abi_long ret;
7400 
7401     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7402         return -TARGET_EINVAL;
7403     }
7404     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7405         return -TARGET_EFAULT;
7406     }
7407 
7408     target_to_host_sigset(&host_mask, target_mask);
7409 
7410     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7411 
7412     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7413     if (ret >= 0) {
7414         fd_trans_register(ret, &target_signalfd_trans);
7415     }
7416 
7417     unlock_user_struct(target_mask, mask, 0);
7418 
7419     return ret;
7420 }
7421 #endif
7422 
7423 /* Map host to target signal numbers for the wait family of syscalls.
7424    Assume all other status bits are the same.  */
7425 int host_to_target_waitstatus(int status)
7426 {
7427     if (WIFSIGNALED(status)) {
7428         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7429     }
7430     if (WIFSTOPPED(status)) {
7431         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7432                | (status & 0xff);
7433     }
7434     return status;
7435 }
7436 
7437 static int open_self_cmdline(void *cpu_env, int fd)
7438 {
7439     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7440     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7441     int i;
7442 
7443     for (i = 0; i < bprm->argc; i++) {
7444         size_t len = strlen(bprm->argv[i]) + 1;
7445 
7446         if (write(fd, bprm->argv[i], len) != len) {
7447             return -1;
7448         }
7449     }
7450 
7451     return 0;
7452 }
7453 
7454 static int open_self_maps(void *cpu_env, int fd)
7455 {
7456     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7457     TaskState *ts = cpu->opaque;
7458     FILE *fp;
7459     char *line = NULL;
7460     size_t len = 0;
7461     ssize_t read;
7462 
7463     fp = fopen("/proc/self/maps", "r");
7464     if (fp == NULL) {
7465         return -1;
7466     }
7467 
7468     while ((read = getline(&line, &len, fp)) != -1) {
7469         int fields, dev_maj, dev_min, inode;
7470         uint64_t min, max, offset;
7471         char flag_r, flag_w, flag_x, flag_p;
7472         char path[512] = "";
7473         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7474                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7475                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7476 
7477         if ((fields < 10) || (fields > 11)) {
7478             continue;
7479         }
7480         if (h2g_valid(min)) {
7481             int flags = page_get_flags(h2g(min));
7482             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7483             if (page_check_range(h2g(min), max - min, flags) == -1) {
7484                 continue;
7485             }
7486             if (h2g(min) == ts->info->stack_limit) {
7487                 pstrcpy(path, sizeof(path), "      [stack]");
7488             }
7489             dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7490                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7491                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7492                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
7493                     path[0] ? "         " : "", path);
7494         }
7495     }
7496 
7497     free(line);
7498     fclose(fp);
7499 
7500     return 0;
7501 }
7502 
7503 static int open_self_stat(void *cpu_env, int fd)
7504 {
7505     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7506     TaskState *ts = cpu->opaque;
7507     abi_ulong start_stack = ts->info->start_stack;
7508     int i;
7509 
7510     for (i = 0; i < 44; i++) {
7511       char buf[128];
7512       int len;
7513       uint64_t val = 0;
7514 
7515       if (i == 0) {
7516         /* pid */
7517         val = getpid();
7518         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7519       } else if (i == 1) {
7520         /* app name */
7521         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7522       } else if (i == 27) {
7523         /* stack bottom */
7524         val = start_stack;
7525         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7526       } else {
7527         /* for the rest, there is MasterCard */
7528         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7529       }
7530 
7531       len = strlen(buf);
7532       if (write(fd, buf, len) != len) {
7533           return -1;
7534       }
7535     }
7536 
7537     return 0;
7538 }
7539 
7540 static int open_self_auxv(void *cpu_env, int fd)
7541 {
7542     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7543     TaskState *ts = cpu->opaque;
7544     abi_ulong auxv = ts->info->saved_auxv;
7545     abi_ulong len = ts->info->auxv_len;
7546     char *ptr;
7547 
7548     /*
7549      * Auxiliary vector is stored in target process stack.
7550      * read in whole auxv vector and copy it to file
7551      */
7552     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7553     if (ptr != NULL) {
7554         while (len > 0) {
7555             ssize_t r;
7556             r = write(fd, ptr, len);
7557             if (r <= 0) {
7558                 break;
7559             }
7560             len -= r;
7561             ptr += r;
7562         }
7563         lseek(fd, 0, SEEK_SET);
7564         unlock_user(ptr, auxv, len);
7565     }
7566 
7567     return 0;
7568 }
7569 
7570 static int is_proc_myself(const char *filename, const char *entry)
7571 {
7572     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7573         filename += strlen("/proc/");
7574         if (!strncmp(filename, "self/", strlen("self/"))) {
7575             filename += strlen("self/");
7576         } else if (*filename >= '1' && *filename <= '9') {
7577             char myself[80];
7578             snprintf(myself, sizeof(myself), "%d/", getpid());
7579             if (!strncmp(filename, myself, strlen(myself))) {
7580                 filename += strlen(myself);
7581             } else {
7582                 return 0;
7583             }
7584         } else {
7585             return 0;
7586         }
7587         if (!strcmp(filename, entry)) {
7588             return 1;
7589         }
7590     }
7591     return 0;
7592 }
7593 
7594 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7595 static int is_proc(const char *filename, const char *entry)
7596 {
7597     return strcmp(filename, entry) == 0;
7598 }
7599 
7600 static int open_net_route(void *cpu_env, int fd)
7601 {
7602     FILE *fp;
7603     char *line = NULL;
7604     size_t len = 0;
7605     ssize_t read;
7606 
7607     fp = fopen("/proc/net/route", "r");
7608     if (fp == NULL) {
7609         return -1;
7610     }
7611 
7612     /* read header */
7613 
7614     read = getline(&line, &len, fp);
7615     dprintf(fd, "%s", line);
7616 
7617     /* read routes */
7618 
7619     while ((read = getline(&line, &len, fp)) != -1) {
7620         char iface[16];
7621         uint32_t dest, gw, mask;
7622         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7623         sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7624                      iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7625                      &mask, &mtu, &window, &irtt);
7626         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7627                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7628                 metric, tswap32(mask), mtu, window, irtt);
7629     }
7630 
7631     free(line);
7632     fclose(fp);
7633 
7634     return 0;
7635 }
7636 #endif
7637 
7638 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7639 {
7640     struct fake_open {
7641         const char *filename;
7642         int (*fill)(void *cpu_env, int fd);
7643         int (*cmp)(const char *s1, const char *s2);
7644     };
7645     const struct fake_open *fake_open;
7646     static const struct fake_open fakes[] = {
7647         { "maps", open_self_maps, is_proc_myself },
7648         { "stat", open_self_stat, is_proc_myself },
7649         { "auxv", open_self_auxv, is_proc_myself },
7650         { "cmdline", open_self_cmdline, is_proc_myself },
7651 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7652         { "/proc/net/route", open_net_route, is_proc },
7653 #endif
7654         { NULL, NULL, NULL }
7655     };
7656 
7657     if (is_proc_myself(pathname, "exe")) {
7658         int execfd = qemu_getauxval(AT_EXECFD);
7659         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7660     }
7661 
7662     for (fake_open = fakes; fake_open->filename; fake_open++) {
7663         if (fake_open->cmp(pathname, fake_open->filename)) {
7664             break;
7665         }
7666     }
7667 
7668     if (fake_open->filename) {
7669         const char *tmpdir;
7670         char filename[PATH_MAX];
7671         int fd, r;
7672 
7673         /* create temporary file to map stat to */
7674         tmpdir = getenv("TMPDIR");
7675         if (!tmpdir)
7676             tmpdir = "/tmp";
7677         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7678         fd = mkstemp(filename);
7679         if (fd < 0) {
7680             return fd;
7681         }
7682         unlink(filename);
7683 
7684         if ((r = fake_open->fill(cpu_env, fd))) {
7685             int e = errno;
7686             close(fd);
7687             errno = e;
7688             return r;
7689         }
7690         lseek(fd, 0, SEEK_SET);
7691 
7692         return fd;
7693     }
7694 
7695     return safe_openat(dirfd, path(pathname), flags, mode);
7696 }
7697 
7698 #define TIMER_MAGIC 0x0caf0000
7699 #define TIMER_MAGIC_MASK 0xffff0000
7700 
7701 /* Convert QEMU provided timer ID back to internal 16bit index format */
7702 static target_timer_t get_timer_id(abi_long arg)
7703 {
7704     target_timer_t timerid = arg;
7705 
7706     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7707         return -TARGET_EINVAL;
7708     }
7709 
7710     timerid &= 0xffff;
7711 
7712     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7713         return -TARGET_EINVAL;
7714     }
7715 
7716     return timerid;
7717 }
7718 
7719 static abi_long swap_data_eventfd(void *buf, size_t len)
7720 {
7721     uint64_t *counter = buf;
7722     int i;
7723 
7724     if (len < sizeof(uint64_t)) {
7725         return -EINVAL;
7726     }
7727 
7728     for (i = 0; i < len; i += sizeof(uint64_t)) {
7729         *counter = tswap64(*counter);
7730         counter++;
7731     }
7732 
7733     return len;
7734 }
7735 
7736 static TargetFdTrans target_eventfd_trans = {
7737     .host_to_target_data = swap_data_eventfd,
7738     .target_to_host_data = swap_data_eventfd,
7739 };
7740 
7741 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7742     (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7743      defined(__NR_inotify_init1))
7744 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7745 {
7746     struct inotify_event *ev;
7747     int i;
7748     uint32_t name_len;
7749 
7750     for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7751         ev = (struct inotify_event *)((char *)buf + i);
7752         name_len = ev->len;
7753 
7754         ev->wd = tswap32(ev->wd);
7755         ev->mask = tswap32(ev->mask);
7756         ev->cookie = tswap32(ev->cookie);
7757         ev->len = tswap32(name_len);
7758     }
7759 
7760     return len;
7761 }
7762 
7763 static TargetFdTrans target_inotify_trans = {
7764     .host_to_target_data = host_to_target_data_inotify,
7765 };
7766 #endif
7767 
7768 static int target_to_host_cpu_mask(unsigned long *host_mask,
7769                                    size_t host_size,
7770                                    abi_ulong target_addr,
7771                                    size_t target_size)
7772 {
7773     unsigned target_bits = sizeof(abi_ulong) * 8;
7774     unsigned host_bits = sizeof(*host_mask) * 8;
7775     abi_ulong *target_mask;
7776     unsigned i, j;
7777 
7778     assert(host_size >= target_size);
7779 
7780     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7781     if (!target_mask) {
7782         return -TARGET_EFAULT;
7783     }
7784     memset(host_mask, 0, host_size);
7785 
7786     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7787         unsigned bit = i * target_bits;
7788         abi_ulong val;
7789 
7790         __get_user(val, &target_mask[i]);
7791         for (j = 0; j < target_bits; j++, bit++) {
7792             if (val & (1UL << j)) {
7793                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7794             }
7795         }
7796     }
7797 
7798     unlock_user(target_mask, target_addr, 0);
7799     return 0;
7800 }
7801 
7802 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7803                                    size_t host_size,
7804                                    abi_ulong target_addr,
7805                                    size_t target_size)
7806 {
7807     unsigned target_bits = sizeof(abi_ulong) * 8;
7808     unsigned host_bits = sizeof(*host_mask) * 8;
7809     abi_ulong *target_mask;
7810     unsigned i, j;
7811 
7812     assert(host_size >= target_size);
7813 
7814     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7815     if (!target_mask) {
7816         return -TARGET_EFAULT;
7817     }
7818 
7819     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7820         unsigned bit = i * target_bits;
7821         abi_ulong val = 0;
7822 
7823         for (j = 0; j < target_bits; j++, bit++) {
7824             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7825                 val |= 1UL << j;
7826             }
7827         }
7828         __put_user(val, &target_mask[i]);
7829     }
7830 
7831     unlock_user(target_mask, target_addr, target_size);
7832     return 0;
7833 }
7834 
7835 /* do_syscall() should always have a single exit point at the end so
7836    that actions, such as logging of syscall results, can be performed.
7837    All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7838 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7839                     abi_long arg2, abi_long arg3, abi_long arg4,
7840                     abi_long arg5, abi_long arg6, abi_long arg7,
7841                     abi_long arg8)
7842 {
7843     CPUState *cpu = ENV_GET_CPU(cpu_env);
7844     abi_long ret;
7845     struct stat st;
7846     struct statfs stfs;
7847     void *p;
7848 
7849 #if defined(DEBUG_ERESTARTSYS)
7850     /* Debug-only code for exercising the syscall-restart code paths
7851      * in the per-architecture cpu main loops: restart every syscall
7852      * the guest makes once before letting it through.
7853      */
7854     {
7855         static int flag;
7856 
7857         flag = !flag;
7858         if (flag) {
7859             return -TARGET_ERESTARTSYS;
7860         }
7861     }
7862 #endif
7863 
7864 #ifdef DEBUG
7865     gemu_log("syscall %d", num);
7866 #endif
7867     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7868     if(do_strace)
7869         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7870 
7871     switch(num) {
7872     case TARGET_NR_exit:
7873         /* In old applications this may be used to implement _exit(2).
7874            However in threaded applictions it is used for thread termination,
7875            and _exit_group is used for application termination.
7876            Do thread termination if we have more then one thread.  */
7877 
7878         if (block_signals()) {
7879             ret = -TARGET_ERESTARTSYS;
7880             break;
7881         }
7882 
7883         cpu_list_lock();
7884 
7885         if (CPU_NEXT(first_cpu)) {
7886             TaskState *ts;
7887 
7888             /* Remove the CPU from the list.  */
7889             QTAILQ_REMOVE(&cpus, cpu, node);
7890 
7891             cpu_list_unlock();
7892 
7893             ts = cpu->opaque;
7894             if (ts->child_tidptr) {
7895                 put_user_u32(0, ts->child_tidptr);
7896                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7897                           NULL, NULL, 0);
7898             }
7899             thread_cpu = NULL;
7900             object_unref(OBJECT(cpu));
7901             g_free(ts);
7902             rcu_unregister_thread();
7903             pthread_exit(NULL);
7904         }
7905 
7906         cpu_list_unlock();
7907 #ifdef TARGET_GPROF
7908         _mcleanup();
7909 #endif
7910         gdb_exit(cpu_env, arg1);
7911         _exit(arg1);
7912         ret = 0; /* avoid warning */
7913         break;
7914     case TARGET_NR_read:
7915         if (arg3 == 0)
7916             ret = 0;
7917         else {
7918             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7919                 goto efault;
7920             ret = get_errno(safe_read(arg1, p, arg3));
7921             if (ret >= 0 &&
7922                 fd_trans_host_to_target_data(arg1)) {
7923                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7924             }
7925             unlock_user(p, arg2, ret);
7926         }
7927         break;
7928     case TARGET_NR_write:
7929         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7930             goto efault;
7931         if (fd_trans_target_to_host_data(arg1)) {
7932             void *copy = g_malloc(arg3);
7933             memcpy(copy, p, arg3);
7934             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7935             if (ret >= 0) {
7936                 ret = get_errno(safe_write(arg1, copy, ret));
7937             }
7938             g_free(copy);
7939         } else {
7940             ret = get_errno(safe_write(arg1, p, arg3));
7941         }
7942         unlock_user(p, arg2, 0);
7943         break;
7944 #ifdef TARGET_NR_open
7945     case TARGET_NR_open:
7946         if (!(p = lock_user_string(arg1)))
7947             goto efault;
7948         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7949                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7950                                   arg3));
7951         fd_trans_unregister(ret);
7952         unlock_user(p, arg1, 0);
7953         break;
7954 #endif
7955     case TARGET_NR_openat:
7956         if (!(p = lock_user_string(arg2)))
7957             goto efault;
7958         ret = get_errno(do_openat(cpu_env, arg1, p,
7959                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7960                                   arg4));
7961         fd_trans_unregister(ret);
7962         unlock_user(p, arg2, 0);
7963         break;
7964 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7965     case TARGET_NR_name_to_handle_at:
7966         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7967         break;
7968 #endif
7969 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7970     case TARGET_NR_open_by_handle_at:
7971         ret = do_open_by_handle_at(arg1, arg2, arg3);
7972         fd_trans_unregister(ret);
7973         break;
7974 #endif
7975     case TARGET_NR_close:
7976         fd_trans_unregister(arg1);
7977         ret = get_errno(close(arg1));
7978         break;
7979     case TARGET_NR_brk:
7980         ret = do_brk(arg1);
7981         break;
7982 #ifdef TARGET_NR_fork
7983     case TARGET_NR_fork:
7984         ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7985         break;
7986 #endif
7987 #ifdef TARGET_NR_waitpid
7988     case TARGET_NR_waitpid:
7989         {
7990             int status;
7991             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7992             if (!is_error(ret) && arg2 && ret
7993                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7994                 goto efault;
7995         }
7996         break;
7997 #endif
7998 #ifdef TARGET_NR_waitid
7999     case TARGET_NR_waitid:
8000         {
8001             siginfo_t info;
8002             info.si_pid = 0;
8003             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8004             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8005                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8006                     goto efault;
8007                 host_to_target_siginfo(p, &info);
8008                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8009             }
8010         }
8011         break;
8012 #endif
8013 #ifdef TARGET_NR_creat /* not on alpha */
8014     case TARGET_NR_creat:
8015         if (!(p = lock_user_string(arg1)))
8016             goto efault;
8017         ret = get_errno(creat(p, arg2));
8018         fd_trans_unregister(ret);
8019         unlock_user(p, arg1, 0);
8020         break;
8021 #endif
8022 #ifdef TARGET_NR_link
8023     case TARGET_NR_link:
8024         {
8025             void * p2;
8026             p = lock_user_string(arg1);
8027             p2 = lock_user_string(arg2);
8028             if (!p || !p2)
8029                 ret = -TARGET_EFAULT;
8030             else
8031                 ret = get_errno(link(p, p2));
8032             unlock_user(p2, arg2, 0);
8033             unlock_user(p, arg1, 0);
8034         }
8035         break;
8036 #endif
8037 #if defined(TARGET_NR_linkat)
8038     case TARGET_NR_linkat:
8039         {
8040             void * p2 = NULL;
8041             if (!arg2 || !arg4)
8042                 goto efault;
8043             p  = lock_user_string(arg2);
8044             p2 = lock_user_string(arg4);
8045             if (!p || !p2)
8046                 ret = -TARGET_EFAULT;
8047             else
8048                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8049             unlock_user(p, arg2, 0);
8050             unlock_user(p2, arg4, 0);
8051         }
8052         break;
8053 #endif
8054 #ifdef TARGET_NR_unlink
8055     case TARGET_NR_unlink:
8056         if (!(p = lock_user_string(arg1)))
8057             goto efault;
8058         ret = get_errno(unlink(p));
8059         unlock_user(p, arg1, 0);
8060         break;
8061 #endif
8062 #if defined(TARGET_NR_unlinkat)
8063     case TARGET_NR_unlinkat:
8064         if (!(p = lock_user_string(arg2)))
8065             goto efault;
8066         ret = get_errno(unlinkat(arg1, p, arg3));
8067         unlock_user(p, arg2, 0);
8068         break;
8069 #endif
8070     case TARGET_NR_execve:
8071         {
8072             char **argp, **envp;
8073             int argc, envc;
8074             abi_ulong gp;
8075             abi_ulong guest_argp;
8076             abi_ulong guest_envp;
8077             abi_ulong addr;
8078             char **q;
8079             int total_size = 0;
8080 
8081             argc = 0;
8082             guest_argp = arg2;
8083             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8084                 if (get_user_ual(addr, gp))
8085                     goto efault;
8086                 if (!addr)
8087                     break;
8088                 argc++;
8089             }
8090             envc = 0;
8091             guest_envp = arg3;
8092             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8093                 if (get_user_ual(addr, gp))
8094                     goto efault;
8095                 if (!addr)
8096                     break;
8097                 envc++;
8098             }
8099 
8100             argp = g_new0(char *, argc + 1);
8101             envp = g_new0(char *, envc + 1);
8102 
8103             for (gp = guest_argp, q = argp; gp;
8104                   gp += sizeof(abi_ulong), q++) {
8105                 if (get_user_ual(addr, gp))
8106                     goto execve_efault;
8107                 if (!addr)
8108                     break;
8109                 if (!(*q = lock_user_string(addr)))
8110                     goto execve_efault;
8111                 total_size += strlen(*q) + 1;
8112             }
8113             *q = NULL;
8114 
8115             for (gp = guest_envp, q = envp; gp;
8116                   gp += sizeof(abi_ulong), q++) {
8117                 if (get_user_ual(addr, gp))
8118                     goto execve_efault;
8119                 if (!addr)
8120                     break;
8121                 if (!(*q = lock_user_string(addr)))
8122                     goto execve_efault;
8123                 total_size += strlen(*q) + 1;
8124             }
8125             *q = NULL;
8126 
8127             if (!(p = lock_user_string(arg1)))
8128                 goto execve_efault;
8129             /* Although execve() is not an interruptible syscall it is
8130              * a special case where we must use the safe_syscall wrapper:
8131              * if we allow a signal to happen before we make the host
8132              * syscall then we will 'lose' it, because at the point of
8133              * execve the process leaves QEMU's control. So we use the
8134              * safe syscall wrapper to ensure that we either take the
8135              * signal as a guest signal, or else it does not happen
8136              * before the execve completes and makes it the other
8137              * program's problem.
8138              */
8139             ret = get_errno(safe_execve(p, argp, envp));
8140             unlock_user(p, arg1, 0);
8141 
8142             goto execve_end;
8143 
8144         execve_efault:
8145             ret = -TARGET_EFAULT;
8146 
8147         execve_end:
8148             for (gp = guest_argp, q = argp; *q;
8149                   gp += sizeof(abi_ulong), q++) {
8150                 if (get_user_ual(addr, gp)
8151                     || !addr)
8152                     break;
8153                 unlock_user(*q, addr, 0);
8154             }
8155             for (gp = guest_envp, q = envp; *q;
8156                   gp += sizeof(abi_ulong), q++) {
8157                 if (get_user_ual(addr, gp)
8158                     || !addr)
8159                     break;
8160                 unlock_user(*q, addr, 0);
8161             }
8162 
8163             g_free(argp);
8164             g_free(envp);
8165         }
8166         break;
8167     case TARGET_NR_chdir:
8168         if (!(p = lock_user_string(arg1)))
8169             goto efault;
8170         ret = get_errno(chdir(p));
8171         unlock_user(p, arg1, 0);
8172         break;
8173 #ifdef TARGET_NR_time
8174     case TARGET_NR_time:
8175         {
8176             time_t host_time;
8177             ret = get_errno(time(&host_time));
8178             if (!is_error(ret)
8179                 && arg1
8180                 && put_user_sal(host_time, arg1))
8181                 goto efault;
8182         }
8183         break;
8184 #endif
8185 #ifdef TARGET_NR_mknod
8186     case TARGET_NR_mknod:
8187         if (!(p = lock_user_string(arg1)))
8188             goto efault;
8189         ret = get_errno(mknod(p, arg2, arg3));
8190         unlock_user(p, arg1, 0);
8191         break;
8192 #endif
8193 #if defined(TARGET_NR_mknodat)
8194     case TARGET_NR_mknodat:
8195         if (!(p = lock_user_string(arg2)))
8196             goto efault;
8197         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8198         unlock_user(p, arg2, 0);
8199         break;
8200 #endif
8201 #ifdef TARGET_NR_chmod
8202     case TARGET_NR_chmod:
8203         if (!(p = lock_user_string(arg1)))
8204             goto efault;
8205         ret = get_errno(chmod(p, arg2));
8206         unlock_user(p, arg1, 0);
8207         break;
8208 #endif
8209 #ifdef TARGET_NR_break
8210     case TARGET_NR_break:
8211         goto unimplemented;
8212 #endif
8213 #ifdef TARGET_NR_oldstat
8214     case TARGET_NR_oldstat:
8215         goto unimplemented;
8216 #endif
8217     case TARGET_NR_lseek:
8218         ret = get_errno(lseek(arg1, arg2, arg3));
8219         break;
8220 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8221     /* Alpha specific */
8222     case TARGET_NR_getxpid:
8223         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8224         ret = get_errno(getpid());
8225         break;
8226 #endif
8227 #ifdef TARGET_NR_getpid
8228     case TARGET_NR_getpid:
8229         ret = get_errno(getpid());
8230         break;
8231 #endif
8232     case TARGET_NR_mount:
8233         {
8234             /* need to look at the data field */
8235             void *p2, *p3;
8236 
8237             if (arg1) {
8238                 p = lock_user_string(arg1);
8239                 if (!p) {
8240                     goto efault;
8241                 }
8242             } else {
8243                 p = NULL;
8244             }
8245 
8246             p2 = lock_user_string(arg2);
8247             if (!p2) {
8248                 if (arg1) {
8249                     unlock_user(p, arg1, 0);
8250                 }
8251                 goto efault;
8252             }
8253 
8254             if (arg3) {
8255                 p3 = lock_user_string(arg3);
8256                 if (!p3) {
8257                     if (arg1) {
8258                         unlock_user(p, arg1, 0);
8259                     }
8260                     unlock_user(p2, arg2, 0);
8261                     goto efault;
8262                 }
8263             } else {
8264                 p3 = NULL;
8265             }
8266 
8267             /* FIXME - arg5 should be locked, but it isn't clear how to
8268              * do that since it's not guaranteed to be a NULL-terminated
8269              * string.
8270              */
8271             if (!arg5) {
8272                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8273             } else {
8274                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8275             }
8276             ret = get_errno(ret);
8277 
8278             if (arg1) {
8279                 unlock_user(p, arg1, 0);
8280             }
8281             unlock_user(p2, arg2, 0);
8282             if (arg3) {
8283                 unlock_user(p3, arg3, 0);
8284             }
8285         }
8286         break;
8287 #ifdef TARGET_NR_umount
8288     case TARGET_NR_umount:
8289         if (!(p = lock_user_string(arg1)))
8290             goto efault;
8291         ret = get_errno(umount(p));
8292         unlock_user(p, arg1, 0);
8293         break;
8294 #endif
8295 #ifdef TARGET_NR_stime /* not on alpha */
8296     case TARGET_NR_stime:
8297         {
8298             time_t host_time;
8299             if (get_user_sal(host_time, arg1))
8300                 goto efault;
8301             ret = get_errno(stime(&host_time));
8302         }
8303         break;
8304 #endif
8305     case TARGET_NR_ptrace:
8306         goto unimplemented;
8307 #ifdef TARGET_NR_alarm /* not on alpha */
8308     case TARGET_NR_alarm:
8309         ret = alarm(arg1);
8310         break;
8311 #endif
8312 #ifdef TARGET_NR_oldfstat
8313     case TARGET_NR_oldfstat:
8314         goto unimplemented;
8315 #endif
8316 #ifdef TARGET_NR_pause /* not on alpha */
8317     case TARGET_NR_pause:
8318         if (!block_signals()) {
8319             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8320         }
8321         ret = -TARGET_EINTR;
8322         break;
8323 #endif
8324 #ifdef TARGET_NR_utime
8325     case TARGET_NR_utime:
8326         {
8327             struct utimbuf tbuf, *host_tbuf;
8328             struct target_utimbuf *target_tbuf;
8329             if (arg2) {
8330                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8331                     goto efault;
8332                 tbuf.actime = tswapal(target_tbuf->actime);
8333                 tbuf.modtime = tswapal(target_tbuf->modtime);
8334                 unlock_user_struct(target_tbuf, arg2, 0);
8335                 host_tbuf = &tbuf;
8336             } else {
8337                 host_tbuf = NULL;
8338             }
8339             if (!(p = lock_user_string(arg1)))
8340                 goto efault;
8341             ret = get_errno(utime(p, host_tbuf));
8342             unlock_user(p, arg1, 0);
8343         }
8344         break;
8345 #endif
8346 #ifdef TARGET_NR_utimes
8347     case TARGET_NR_utimes:
8348         {
8349             struct timeval *tvp, tv[2];
8350             if (arg2) {
8351                 if (copy_from_user_timeval(&tv[0], arg2)
8352                     || copy_from_user_timeval(&tv[1],
8353                                               arg2 + sizeof(struct target_timeval)))
8354                     goto efault;
8355                 tvp = tv;
8356             } else {
8357                 tvp = NULL;
8358             }
8359             if (!(p = lock_user_string(arg1)))
8360                 goto efault;
8361             ret = get_errno(utimes(p, tvp));
8362             unlock_user(p, arg1, 0);
8363         }
8364         break;
8365 #endif
8366 #if defined(TARGET_NR_futimesat)
8367     case TARGET_NR_futimesat:
8368         {
8369             struct timeval *tvp, tv[2];
8370             if (arg3) {
8371                 if (copy_from_user_timeval(&tv[0], arg3)
8372                     || copy_from_user_timeval(&tv[1],
8373                                               arg3 + sizeof(struct target_timeval)))
8374                     goto efault;
8375                 tvp = tv;
8376             } else {
8377                 tvp = NULL;
8378             }
8379             if (!(p = lock_user_string(arg2)))
8380                 goto efault;
8381             ret = get_errno(futimesat(arg1, path(p), tvp));
8382             unlock_user(p, arg2, 0);
8383         }
8384         break;
8385 #endif
8386 #ifdef TARGET_NR_stty
8387     case TARGET_NR_stty:
8388         goto unimplemented;
8389 #endif
8390 #ifdef TARGET_NR_gtty
8391     case TARGET_NR_gtty:
8392         goto unimplemented;
8393 #endif
8394 #ifdef TARGET_NR_access
8395     case TARGET_NR_access:
8396         if (!(p = lock_user_string(arg1)))
8397             goto efault;
8398         ret = get_errno(access(path(p), arg2));
8399         unlock_user(p, arg1, 0);
8400         break;
8401 #endif
8402 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8403     case TARGET_NR_faccessat:
8404         if (!(p = lock_user_string(arg2)))
8405             goto efault;
8406         ret = get_errno(faccessat(arg1, p, arg3, 0));
8407         unlock_user(p, arg2, 0);
8408         break;
8409 #endif
8410 #ifdef TARGET_NR_nice /* not on alpha */
8411     case TARGET_NR_nice:
8412         ret = get_errno(nice(arg1));
8413         break;
8414 #endif
8415 #ifdef TARGET_NR_ftime
8416     case TARGET_NR_ftime:
8417         goto unimplemented;
8418 #endif
8419     case TARGET_NR_sync:
8420         sync();
8421         ret = 0;
8422         break;
8423 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8424     case TARGET_NR_syncfs:
8425         ret = get_errno(syncfs(arg1));
8426         break;
8427 #endif
8428     case TARGET_NR_kill:
8429         ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8430         break;
8431 #ifdef TARGET_NR_rename
8432     case TARGET_NR_rename:
8433         {
8434             void *p2;
8435             p = lock_user_string(arg1);
8436             p2 = lock_user_string(arg2);
8437             if (!p || !p2)
8438                 ret = -TARGET_EFAULT;
8439             else
8440                 ret = get_errno(rename(p, p2));
8441             unlock_user(p2, arg2, 0);
8442             unlock_user(p, arg1, 0);
8443         }
8444         break;
8445 #endif
8446 #if defined(TARGET_NR_renameat)
8447     case TARGET_NR_renameat:
8448         {
8449             void *p2;
8450             p  = lock_user_string(arg2);
8451             p2 = lock_user_string(arg4);
8452             if (!p || !p2)
8453                 ret = -TARGET_EFAULT;
8454             else
8455                 ret = get_errno(renameat(arg1, p, arg3, p2));
8456             unlock_user(p2, arg4, 0);
8457             unlock_user(p, arg2, 0);
8458         }
8459         break;
8460 #endif
8461 #if defined(TARGET_NR_renameat2)
8462     case TARGET_NR_renameat2:
8463         {
8464             void *p2;
8465             p  = lock_user_string(arg2);
8466             p2 = lock_user_string(arg4);
8467             if (!p || !p2) {
8468                 ret = -TARGET_EFAULT;
8469             } else {
8470                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8471             }
8472             unlock_user(p2, arg4, 0);
8473             unlock_user(p, arg2, 0);
8474         }
8475         break;
8476 #endif
8477 #ifdef TARGET_NR_mkdir
8478     case TARGET_NR_mkdir:
8479         if (!(p = lock_user_string(arg1)))
8480             goto efault;
8481         ret = get_errno(mkdir(p, arg2));
8482         unlock_user(p, arg1, 0);
8483         break;
8484 #endif
8485 #if defined(TARGET_NR_mkdirat)
8486     case TARGET_NR_mkdirat:
8487         if (!(p = lock_user_string(arg2)))
8488             goto efault;
8489         ret = get_errno(mkdirat(arg1, p, arg3));
8490         unlock_user(p, arg2, 0);
8491         break;
8492 #endif
8493 #ifdef TARGET_NR_rmdir
8494     case TARGET_NR_rmdir:
8495         if (!(p = lock_user_string(arg1)))
8496             goto efault;
8497         ret = get_errno(rmdir(p));
8498         unlock_user(p, arg1, 0);
8499         break;
8500 #endif
8501     case TARGET_NR_dup:
8502         ret = get_errno(dup(arg1));
8503         if (ret >= 0) {
8504             fd_trans_dup(arg1, ret);
8505         }
8506         break;
8507 #ifdef TARGET_NR_pipe
8508     case TARGET_NR_pipe:
8509         ret = do_pipe(cpu_env, arg1, 0, 0);
8510         break;
8511 #endif
8512 #ifdef TARGET_NR_pipe2
8513     case TARGET_NR_pipe2:
8514         ret = do_pipe(cpu_env, arg1,
8515                       target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8516         break;
8517 #endif
8518     case TARGET_NR_times:
8519         {
8520             struct target_tms *tmsp;
8521             struct tms tms;
8522             ret = get_errno(times(&tms));
8523             if (arg1) {
8524                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8525                 if (!tmsp)
8526                     goto efault;
8527                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8528                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8529                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8530                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8531             }
8532             if (!is_error(ret))
8533                 ret = host_to_target_clock_t(ret);
8534         }
8535         break;
8536 #ifdef TARGET_NR_prof
8537     case TARGET_NR_prof:
8538         goto unimplemented;
8539 #endif
8540 #ifdef TARGET_NR_signal
8541     case TARGET_NR_signal:
8542         goto unimplemented;
8543 #endif
8544     case TARGET_NR_acct:
8545         if (arg1 == 0) {
8546             ret = get_errno(acct(NULL));
8547         } else {
8548             if (!(p = lock_user_string(arg1)))
8549                 goto efault;
8550             ret = get_errno(acct(path(p)));
8551             unlock_user(p, arg1, 0);
8552         }
8553         break;
8554 #ifdef TARGET_NR_umount2
8555     case TARGET_NR_umount2:
8556         if (!(p = lock_user_string(arg1)))
8557             goto efault;
8558         ret = get_errno(umount2(p, arg2));
8559         unlock_user(p, arg1, 0);
8560         break;
8561 #endif
8562 #ifdef TARGET_NR_lock
8563     case TARGET_NR_lock:
8564         goto unimplemented;
8565 #endif
8566     case TARGET_NR_ioctl:
8567         ret = do_ioctl(arg1, arg2, arg3);
8568         break;
8569 #ifdef TARGET_NR_fcntl
8570     case TARGET_NR_fcntl:
8571         ret = do_fcntl(arg1, arg2, arg3);
8572         break;
8573 #endif
8574 #ifdef TARGET_NR_mpx
8575     case TARGET_NR_mpx:
8576         goto unimplemented;
8577 #endif
8578     case TARGET_NR_setpgid:
8579         ret = get_errno(setpgid(arg1, arg2));
8580         break;
8581 #ifdef TARGET_NR_ulimit
8582     case TARGET_NR_ulimit:
8583         goto unimplemented;
8584 #endif
8585 #ifdef TARGET_NR_oldolduname
8586     case TARGET_NR_oldolduname:
8587         goto unimplemented;
8588 #endif
8589     case TARGET_NR_umask:
8590         ret = get_errno(umask(arg1));
8591         break;
8592     case TARGET_NR_chroot:
8593         if (!(p = lock_user_string(arg1)))
8594             goto efault;
8595         ret = get_errno(chroot(p));
8596         unlock_user(p, arg1, 0);
8597         break;
8598 #ifdef TARGET_NR_ustat
8599     case TARGET_NR_ustat:
8600         goto unimplemented;
8601 #endif
8602 #ifdef TARGET_NR_dup2
8603     case TARGET_NR_dup2:
8604         ret = get_errno(dup2(arg1, arg2));
8605         if (ret >= 0) {
8606             fd_trans_dup(arg1, arg2);
8607         }
8608         break;
8609 #endif
8610 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8611     case TARGET_NR_dup3:
8612     {
8613         int host_flags;
8614 
8615         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8616             return -EINVAL;
8617         }
8618         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8619         ret = get_errno(dup3(arg1, arg2, host_flags));
8620         if (ret >= 0) {
8621             fd_trans_dup(arg1, arg2);
8622         }
8623         break;
8624     }
8625 #endif
8626 #ifdef TARGET_NR_getppid /* not on alpha */
8627     case TARGET_NR_getppid:
8628         ret = get_errno(getppid());
8629         break;
8630 #endif
8631 #ifdef TARGET_NR_getpgrp
8632     case TARGET_NR_getpgrp:
8633         ret = get_errno(getpgrp());
8634         break;
8635 #endif
8636     case TARGET_NR_setsid:
8637         ret = get_errno(setsid());
8638         break;
8639 #ifdef TARGET_NR_sigaction
8640     case TARGET_NR_sigaction:
8641         {
8642 #if defined(TARGET_ALPHA)
8643             struct target_sigaction act, oact, *pact = 0;
8644             struct target_old_sigaction *old_act;
8645             if (arg2) {
8646                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8647                     goto efault;
8648                 act._sa_handler = old_act->_sa_handler;
8649                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8650                 act.sa_flags = old_act->sa_flags;
8651                 act.sa_restorer = 0;
8652                 unlock_user_struct(old_act, arg2, 0);
8653                 pact = &act;
8654             }
8655             ret = get_errno(do_sigaction(arg1, pact, &oact));
8656             if (!is_error(ret) && arg3) {
8657                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8658                     goto efault;
8659                 old_act->_sa_handler = oact._sa_handler;
8660                 old_act->sa_mask = oact.sa_mask.sig[0];
8661                 old_act->sa_flags = oact.sa_flags;
8662                 unlock_user_struct(old_act, arg3, 1);
8663             }
8664 #elif defined(TARGET_MIPS)
8665 	    struct target_sigaction act, oact, *pact, *old_act;
8666 
8667 	    if (arg2) {
8668                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8669                     goto efault;
8670 		act._sa_handler = old_act->_sa_handler;
8671 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8672 		act.sa_flags = old_act->sa_flags;
8673 		unlock_user_struct(old_act, arg2, 0);
8674 		pact = &act;
8675 	    } else {
8676 		pact = NULL;
8677 	    }
8678 
8679 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8680 
8681 	    if (!is_error(ret) && arg3) {
8682                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8683                     goto efault;
8684 		old_act->_sa_handler = oact._sa_handler;
8685 		old_act->sa_flags = oact.sa_flags;
8686 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8687 		old_act->sa_mask.sig[1] = 0;
8688 		old_act->sa_mask.sig[2] = 0;
8689 		old_act->sa_mask.sig[3] = 0;
8690 		unlock_user_struct(old_act, arg3, 1);
8691 	    }
8692 #else
8693             struct target_old_sigaction *old_act;
8694             struct target_sigaction act, oact, *pact;
8695             if (arg2) {
8696                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8697                     goto efault;
8698                 act._sa_handler = old_act->_sa_handler;
8699                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8700                 act.sa_flags = old_act->sa_flags;
8701                 act.sa_restorer = old_act->sa_restorer;
8702 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8703                 act.ka_restorer = 0;
8704 #endif
8705                 unlock_user_struct(old_act, arg2, 0);
8706                 pact = &act;
8707             } else {
8708                 pact = NULL;
8709             }
8710             ret = get_errno(do_sigaction(arg1, pact, &oact));
8711             if (!is_error(ret) && arg3) {
8712                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8713                     goto efault;
8714                 old_act->_sa_handler = oact._sa_handler;
8715                 old_act->sa_mask = oact.sa_mask.sig[0];
8716                 old_act->sa_flags = oact.sa_flags;
8717                 old_act->sa_restorer = oact.sa_restorer;
8718                 unlock_user_struct(old_act, arg3, 1);
8719             }
8720 #endif
8721         }
8722         break;
8723 #endif
8724     case TARGET_NR_rt_sigaction:
8725         {
8726 #if defined(TARGET_ALPHA)
8727             /* For Alpha and SPARC this is a 5 argument syscall, with
8728              * a 'restorer' parameter which must be copied into the
8729              * sa_restorer field of the sigaction struct.
8730              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8731              * and arg5 is the sigsetsize.
8732              * Alpha also has a separate rt_sigaction struct that it uses
8733              * here; SPARC uses the usual sigaction struct.
8734              */
8735             struct target_rt_sigaction *rt_act;
8736             struct target_sigaction act, oact, *pact = 0;
8737 
8738             if (arg4 != sizeof(target_sigset_t)) {
8739                 ret = -TARGET_EINVAL;
8740                 break;
8741             }
8742             if (arg2) {
8743                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8744                     goto efault;
8745                 act._sa_handler = rt_act->_sa_handler;
8746                 act.sa_mask = rt_act->sa_mask;
8747                 act.sa_flags = rt_act->sa_flags;
8748                 act.sa_restorer = arg5;
8749                 unlock_user_struct(rt_act, arg2, 0);
8750                 pact = &act;
8751             }
8752             ret = get_errno(do_sigaction(arg1, pact, &oact));
8753             if (!is_error(ret) && arg3) {
8754                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8755                     goto efault;
8756                 rt_act->_sa_handler = oact._sa_handler;
8757                 rt_act->sa_mask = oact.sa_mask;
8758                 rt_act->sa_flags = oact.sa_flags;
8759                 unlock_user_struct(rt_act, arg3, 1);
8760             }
8761 #else
8762 #ifdef TARGET_SPARC
8763             target_ulong restorer = arg4;
8764             target_ulong sigsetsize = arg5;
8765 #else
8766             target_ulong sigsetsize = arg4;
8767 #endif
8768             struct target_sigaction *act;
8769             struct target_sigaction *oact;
8770 
8771             if (sigsetsize != sizeof(target_sigset_t)) {
8772                 ret = -TARGET_EINVAL;
8773                 break;
8774             }
8775             if (arg2) {
8776                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8777                     goto efault;
8778                 }
8779 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8780                 act->ka_restorer = restorer;
8781 #endif
8782             } else {
8783                 act = NULL;
8784             }
8785             if (arg3) {
8786                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8787                     ret = -TARGET_EFAULT;
8788                     goto rt_sigaction_fail;
8789                 }
8790             } else
8791                 oact = NULL;
8792             ret = get_errno(do_sigaction(arg1, act, oact));
8793 	rt_sigaction_fail:
8794             if (act)
8795                 unlock_user_struct(act, arg2, 0);
8796             if (oact)
8797                 unlock_user_struct(oact, arg3, 1);
8798 #endif
8799         }
8800         break;
8801 #ifdef TARGET_NR_sgetmask /* not on alpha */
8802     case TARGET_NR_sgetmask:
8803         {
8804             sigset_t cur_set;
8805             abi_ulong target_set;
8806             ret = do_sigprocmask(0, NULL, &cur_set);
8807             if (!ret) {
8808                 host_to_target_old_sigset(&target_set, &cur_set);
8809                 ret = target_set;
8810             }
8811         }
8812         break;
8813 #endif
8814 #ifdef TARGET_NR_ssetmask /* not on alpha */
8815     case TARGET_NR_ssetmask:
8816         {
8817             sigset_t set, oset;
8818             abi_ulong target_set = arg1;
8819             target_to_host_old_sigset(&set, &target_set);
8820             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8821             if (!ret) {
8822                 host_to_target_old_sigset(&target_set, &oset);
8823                 ret = target_set;
8824             }
8825         }
8826         break;
8827 #endif
8828 #ifdef TARGET_NR_sigprocmask
8829     case TARGET_NR_sigprocmask:
8830         {
8831 #if defined(TARGET_ALPHA)
8832             sigset_t set, oldset;
8833             abi_ulong mask;
8834             int how;
8835 
8836             switch (arg1) {
8837             case TARGET_SIG_BLOCK:
8838                 how = SIG_BLOCK;
8839                 break;
8840             case TARGET_SIG_UNBLOCK:
8841                 how = SIG_UNBLOCK;
8842                 break;
8843             case TARGET_SIG_SETMASK:
8844                 how = SIG_SETMASK;
8845                 break;
8846             default:
8847                 ret = -TARGET_EINVAL;
8848                 goto fail;
8849             }
8850             mask = arg2;
8851             target_to_host_old_sigset(&set, &mask);
8852 
8853             ret = do_sigprocmask(how, &set, &oldset);
8854             if (!is_error(ret)) {
8855                 host_to_target_old_sigset(&mask, &oldset);
8856                 ret = mask;
8857                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8858             }
8859 #else
8860             sigset_t set, oldset, *set_ptr;
8861             int how;
8862 
8863             if (arg2) {
8864                 switch (arg1) {
8865                 case TARGET_SIG_BLOCK:
8866                     how = SIG_BLOCK;
8867                     break;
8868                 case TARGET_SIG_UNBLOCK:
8869                     how = SIG_UNBLOCK;
8870                     break;
8871                 case TARGET_SIG_SETMASK:
8872                     how = SIG_SETMASK;
8873                     break;
8874                 default:
8875                     ret = -TARGET_EINVAL;
8876                     goto fail;
8877                 }
8878                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8879                     goto efault;
8880                 target_to_host_old_sigset(&set, p);
8881                 unlock_user(p, arg2, 0);
8882                 set_ptr = &set;
8883             } else {
8884                 how = 0;
8885                 set_ptr = NULL;
8886             }
8887             ret = do_sigprocmask(how, set_ptr, &oldset);
8888             if (!is_error(ret) && arg3) {
8889                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8890                     goto efault;
8891                 host_to_target_old_sigset(p, &oldset);
8892                 unlock_user(p, arg3, sizeof(target_sigset_t));
8893             }
8894 #endif
8895         }
8896         break;
8897 #endif
8898     case TARGET_NR_rt_sigprocmask:
8899         {
8900             int how = arg1;
8901             sigset_t set, oldset, *set_ptr;
8902 
8903             if (arg4 != sizeof(target_sigset_t)) {
8904                 ret = -TARGET_EINVAL;
8905                 break;
8906             }
8907 
8908             if (arg2) {
8909                 switch(how) {
8910                 case TARGET_SIG_BLOCK:
8911                     how = SIG_BLOCK;
8912                     break;
8913                 case TARGET_SIG_UNBLOCK:
8914                     how = SIG_UNBLOCK;
8915                     break;
8916                 case TARGET_SIG_SETMASK:
8917                     how = SIG_SETMASK;
8918                     break;
8919                 default:
8920                     ret = -TARGET_EINVAL;
8921                     goto fail;
8922                 }
8923                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8924                     goto efault;
8925                 target_to_host_sigset(&set, p);
8926                 unlock_user(p, arg2, 0);
8927                 set_ptr = &set;
8928             } else {
8929                 how = 0;
8930                 set_ptr = NULL;
8931             }
8932             ret = do_sigprocmask(how, set_ptr, &oldset);
8933             if (!is_error(ret) && arg3) {
8934                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8935                     goto efault;
8936                 host_to_target_sigset(p, &oldset);
8937                 unlock_user(p, arg3, sizeof(target_sigset_t));
8938             }
8939         }
8940         break;
8941 #ifdef TARGET_NR_sigpending
8942     case TARGET_NR_sigpending:
8943         {
8944             sigset_t set;
8945             ret = get_errno(sigpending(&set));
8946             if (!is_error(ret)) {
8947                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8948                     goto efault;
8949                 host_to_target_old_sigset(p, &set);
8950                 unlock_user(p, arg1, sizeof(target_sigset_t));
8951             }
8952         }
8953         break;
8954 #endif
8955     case TARGET_NR_rt_sigpending:
8956         {
8957             sigset_t set;
8958 
8959             /* Yes, this check is >, not != like most. We follow the kernel's
8960              * logic and it does it like this because it implements
8961              * NR_sigpending through the same code path, and in that case
8962              * the old_sigset_t is smaller in size.
8963              */
8964             if (arg2 > sizeof(target_sigset_t)) {
8965                 ret = -TARGET_EINVAL;
8966                 break;
8967             }
8968 
8969             ret = get_errno(sigpending(&set));
8970             if (!is_error(ret)) {
8971                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8972                     goto efault;
8973                 host_to_target_sigset(p, &set);
8974                 unlock_user(p, arg1, sizeof(target_sigset_t));
8975             }
8976         }
8977         break;
8978 #ifdef TARGET_NR_sigsuspend
8979     case TARGET_NR_sigsuspend:
8980         {
8981             TaskState *ts = cpu->opaque;
8982 #if defined(TARGET_ALPHA)
8983             abi_ulong mask = arg1;
8984             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8985 #else
8986             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8987                 goto efault;
8988             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8989             unlock_user(p, arg1, 0);
8990 #endif
8991             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8992                                                SIGSET_T_SIZE));
8993             if (ret != -TARGET_ERESTARTSYS) {
8994                 ts->in_sigsuspend = 1;
8995             }
8996         }
8997         break;
8998 #endif
8999     case TARGET_NR_rt_sigsuspend:
9000         {
9001             TaskState *ts = cpu->opaque;
9002 
9003             if (arg2 != sizeof(target_sigset_t)) {
9004                 ret = -TARGET_EINVAL;
9005                 break;
9006             }
9007             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9008                 goto efault;
9009             target_to_host_sigset(&ts->sigsuspend_mask, p);
9010             unlock_user(p, arg1, 0);
9011             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9012                                                SIGSET_T_SIZE));
9013             if (ret != -TARGET_ERESTARTSYS) {
9014                 ts->in_sigsuspend = 1;
9015             }
9016         }
9017         break;
9018     case TARGET_NR_rt_sigtimedwait:
9019         {
9020             sigset_t set;
9021             struct timespec uts, *puts;
9022             siginfo_t uinfo;
9023 
9024             if (arg4 != sizeof(target_sigset_t)) {
9025                 ret = -TARGET_EINVAL;
9026                 break;
9027             }
9028 
9029             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9030                 goto efault;
9031             target_to_host_sigset(&set, p);
9032             unlock_user(p, arg1, 0);
9033             if (arg3) {
9034                 puts = &uts;
9035                 target_to_host_timespec(puts, arg3);
9036             } else {
9037                 puts = NULL;
9038             }
9039             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9040                                                  SIGSET_T_SIZE));
9041             if (!is_error(ret)) {
9042                 if (arg2) {
9043                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9044                                   0);
9045                     if (!p) {
9046                         goto efault;
9047                     }
9048                     host_to_target_siginfo(p, &uinfo);
9049                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9050                 }
9051                 ret = host_to_target_signal(ret);
9052             }
9053         }
9054         break;
9055     case TARGET_NR_rt_sigqueueinfo:
9056         {
9057             siginfo_t uinfo;
9058 
9059             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9060             if (!p) {
9061                 goto efault;
9062             }
9063             target_to_host_siginfo(&uinfo, p);
9064             unlock_user(p, arg3, 0);
9065             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9066         }
9067         break;
9068     case TARGET_NR_rt_tgsigqueueinfo:
9069         {
9070             siginfo_t uinfo;
9071 
9072             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9073             if (!p) {
9074                 goto efault;
9075             }
9076             target_to_host_siginfo(&uinfo, p);
9077             unlock_user(p, arg4, 0);
9078             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9079         }
9080         break;
9081 #ifdef TARGET_NR_sigreturn
9082     case TARGET_NR_sigreturn:
9083         if (block_signals()) {
9084             ret = -TARGET_ERESTARTSYS;
9085         } else {
9086             ret = do_sigreturn(cpu_env);
9087         }
9088         break;
9089 #endif
9090     case TARGET_NR_rt_sigreturn:
9091         if (block_signals()) {
9092             ret = -TARGET_ERESTARTSYS;
9093         } else {
9094             ret = do_rt_sigreturn(cpu_env);
9095         }
9096         break;
9097     case TARGET_NR_sethostname:
9098         if (!(p = lock_user_string(arg1)))
9099             goto efault;
9100         ret = get_errno(sethostname(p, arg2));
9101         unlock_user(p, arg1, 0);
9102         break;
9103     case TARGET_NR_setrlimit:
9104         {
9105             int resource = target_to_host_resource(arg1);
9106             struct target_rlimit *target_rlim;
9107             struct rlimit rlim;
9108             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9109                 goto efault;
9110             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9111             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9112             unlock_user_struct(target_rlim, arg2, 0);
9113             ret = get_errno(setrlimit(resource, &rlim));
9114         }
9115         break;
9116     case TARGET_NR_getrlimit:
9117         {
9118             int resource = target_to_host_resource(arg1);
9119             struct target_rlimit *target_rlim;
9120             struct rlimit rlim;
9121 
9122             ret = get_errno(getrlimit(resource, &rlim));
9123             if (!is_error(ret)) {
9124                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9125                     goto efault;
9126                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9127                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9128                 unlock_user_struct(target_rlim, arg2, 1);
9129             }
9130         }
9131         break;
9132     case TARGET_NR_getrusage:
9133         {
9134             struct rusage rusage;
9135             ret = get_errno(getrusage(arg1, &rusage));
9136             if (!is_error(ret)) {
9137                 ret = host_to_target_rusage(arg2, &rusage);
9138             }
9139         }
9140         break;
9141     case TARGET_NR_gettimeofday:
9142         {
9143             struct timeval tv;
9144             ret = get_errno(gettimeofday(&tv, NULL));
9145             if (!is_error(ret)) {
9146                 if (copy_to_user_timeval(arg1, &tv))
9147                     goto efault;
9148             }
9149         }
9150         break;
9151     case TARGET_NR_settimeofday:
9152         {
9153             struct timeval tv, *ptv = NULL;
9154             struct timezone tz, *ptz = NULL;
9155 
9156             if (arg1) {
9157                 if (copy_from_user_timeval(&tv, arg1)) {
9158                     goto efault;
9159                 }
9160                 ptv = &tv;
9161             }
9162 
9163             if (arg2) {
9164                 if (copy_from_user_timezone(&tz, arg2)) {
9165                     goto efault;
9166                 }
9167                 ptz = &tz;
9168             }
9169 
9170             ret = get_errno(settimeofday(ptv, ptz));
9171         }
9172         break;
9173 #if defined(TARGET_NR_select)
9174     case TARGET_NR_select:
9175 #if defined(TARGET_WANT_NI_OLD_SELECT)
9176         /* some architectures used to have old_select here
9177          * but now ENOSYS it.
9178          */
9179         ret = -TARGET_ENOSYS;
9180 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9181         ret = do_old_select(arg1);
9182 #else
9183         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9184 #endif
9185         break;
9186 #endif
9187 #ifdef TARGET_NR_pselect6
9188     case TARGET_NR_pselect6:
9189         {
9190             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9191             fd_set rfds, wfds, efds;
9192             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9193             struct timespec ts, *ts_ptr;
9194 
9195             /*
9196              * The 6th arg is actually two args smashed together,
9197              * so we cannot use the C library.
9198              */
9199             sigset_t set;
9200             struct {
9201                 sigset_t *set;
9202                 size_t size;
9203             } sig, *sig_ptr;
9204 
9205             abi_ulong arg_sigset, arg_sigsize, *arg7;
9206             target_sigset_t *target_sigset;
9207 
9208             n = arg1;
9209             rfd_addr = arg2;
9210             wfd_addr = arg3;
9211             efd_addr = arg4;
9212             ts_addr = arg5;
9213 
9214             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9215             if (ret) {
9216                 goto fail;
9217             }
9218             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9219             if (ret) {
9220                 goto fail;
9221             }
9222             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9223             if (ret) {
9224                 goto fail;
9225             }
9226 
9227             /*
9228              * This takes a timespec, and not a timeval, so we cannot
9229              * use the do_select() helper ...
9230              */
9231             if (ts_addr) {
9232                 if (target_to_host_timespec(&ts, ts_addr)) {
9233                     goto efault;
9234                 }
9235                 ts_ptr = &ts;
9236             } else {
9237                 ts_ptr = NULL;
9238             }
9239 
9240             /* Extract the two packed args for the sigset */
9241             if (arg6) {
9242                 sig_ptr = &sig;
9243                 sig.size = SIGSET_T_SIZE;
9244 
9245                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9246                 if (!arg7) {
9247                     goto efault;
9248                 }
9249                 arg_sigset = tswapal(arg7[0]);
9250                 arg_sigsize = tswapal(arg7[1]);
9251                 unlock_user(arg7, arg6, 0);
9252 
9253                 if (arg_sigset) {
9254                     sig.set = &set;
9255                     if (arg_sigsize != sizeof(*target_sigset)) {
9256                         /* Like the kernel, we enforce correct size sigsets */
9257                         ret = -TARGET_EINVAL;
9258                         goto fail;
9259                     }
9260                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
9261                                               sizeof(*target_sigset), 1);
9262                     if (!target_sigset) {
9263                         goto efault;
9264                     }
9265                     target_to_host_sigset(&set, target_sigset);
9266                     unlock_user(target_sigset, arg_sigset, 0);
9267                 } else {
9268                     sig.set = NULL;
9269                 }
9270             } else {
9271                 sig_ptr = NULL;
9272             }
9273 
9274             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9275                                           ts_ptr, sig_ptr));
9276 
9277             if (!is_error(ret)) {
9278                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9279                     goto efault;
9280                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9281                     goto efault;
9282                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9283                     goto efault;
9284 
9285                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9286                     goto efault;
9287             }
9288         }
9289         break;
9290 #endif
9291 #ifdef TARGET_NR_symlink
9292     case TARGET_NR_symlink:
9293         {
9294             void *p2;
9295             p = lock_user_string(arg1);
9296             p2 = lock_user_string(arg2);
9297             if (!p || !p2)
9298                 ret = -TARGET_EFAULT;
9299             else
9300                 ret = get_errno(symlink(p, p2));
9301             unlock_user(p2, arg2, 0);
9302             unlock_user(p, arg1, 0);
9303         }
9304         break;
9305 #endif
9306 #if defined(TARGET_NR_symlinkat)
9307     case TARGET_NR_symlinkat:
9308         {
9309             void *p2;
9310             p  = lock_user_string(arg1);
9311             p2 = lock_user_string(arg3);
9312             if (!p || !p2)
9313                 ret = -TARGET_EFAULT;
9314             else
9315                 ret = get_errno(symlinkat(p, arg2, p2));
9316             unlock_user(p2, arg3, 0);
9317             unlock_user(p, arg1, 0);
9318         }
9319         break;
9320 #endif
9321 #ifdef TARGET_NR_oldlstat
9322     case TARGET_NR_oldlstat:
9323         goto unimplemented;
9324 #endif
9325 #ifdef TARGET_NR_readlink
9326     case TARGET_NR_readlink:
9327         {
9328             void *p2;
9329             p = lock_user_string(arg1);
9330             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9331             if (!p || !p2) {
9332                 ret = -TARGET_EFAULT;
9333             } else if (!arg3) {
9334                 /* Short circuit this for the magic exe check. */
9335                 ret = -TARGET_EINVAL;
9336             } else if (is_proc_myself((const char *)p, "exe")) {
9337                 char real[PATH_MAX], *temp;
9338                 temp = realpath(exec_path, real);
9339                 /* Return value is # of bytes that we wrote to the buffer. */
9340                 if (temp == NULL) {
9341                     ret = get_errno(-1);
9342                 } else {
9343                     /* Don't worry about sign mismatch as earlier mapping
9344                      * logic would have thrown a bad address error. */
9345                     ret = MIN(strlen(real), arg3);
9346                     /* We cannot NUL terminate the string. */
9347                     memcpy(p2, real, ret);
9348                 }
9349             } else {
9350                 ret = get_errno(readlink(path(p), p2, arg3));
9351             }
9352             unlock_user(p2, arg2, ret);
9353             unlock_user(p, arg1, 0);
9354         }
9355         break;
9356 #endif
9357 #if defined(TARGET_NR_readlinkat)
9358     case TARGET_NR_readlinkat:
9359         {
9360             void *p2;
9361             p  = lock_user_string(arg2);
9362             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9363             if (!p || !p2) {
9364                 ret = -TARGET_EFAULT;
9365             } else if (is_proc_myself((const char *)p, "exe")) {
9366                 char real[PATH_MAX], *temp;
9367                 temp = realpath(exec_path, real);
9368                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9369                 snprintf((char *)p2, arg4, "%s", real);
9370             } else {
9371                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9372             }
9373             unlock_user(p2, arg3, ret);
9374             unlock_user(p, arg2, 0);
9375         }
9376         break;
9377 #endif
9378 #ifdef TARGET_NR_uselib
9379     case TARGET_NR_uselib:
9380         goto unimplemented;
9381 #endif
9382 #ifdef TARGET_NR_swapon
9383     case TARGET_NR_swapon:
9384         if (!(p = lock_user_string(arg1)))
9385             goto efault;
9386         ret = get_errno(swapon(p, arg2));
9387         unlock_user(p, arg1, 0);
9388         break;
9389 #endif
9390     case TARGET_NR_reboot:
9391         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9392            /* arg4 must be ignored in all other cases */
9393            p = lock_user_string(arg4);
9394            if (!p) {
9395               goto efault;
9396            }
9397            ret = get_errno(reboot(arg1, arg2, arg3, p));
9398            unlock_user(p, arg4, 0);
9399         } else {
9400            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9401         }
9402         break;
9403 #ifdef TARGET_NR_readdir
9404     case TARGET_NR_readdir:
9405         goto unimplemented;
9406 #endif
9407 #ifdef TARGET_NR_mmap
9408     case TARGET_NR_mmap:
9409 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9410     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9411     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9412     || defined(TARGET_S390X)
9413         {
9414             abi_ulong *v;
9415             abi_ulong v1, v2, v3, v4, v5, v6;
9416             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9417                 goto efault;
9418             v1 = tswapal(v[0]);
9419             v2 = tswapal(v[1]);
9420             v3 = tswapal(v[2]);
9421             v4 = tswapal(v[3]);
9422             v5 = tswapal(v[4]);
9423             v6 = tswapal(v[5]);
9424             unlock_user(v, arg1, 0);
9425             ret = get_errno(target_mmap(v1, v2, v3,
9426                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9427                                         v5, v6));
9428         }
9429 #else
9430         ret = get_errno(target_mmap(arg1, arg2, arg3,
9431                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9432                                     arg5,
9433                                     arg6));
9434 #endif
9435         break;
9436 #endif
9437 #ifdef TARGET_NR_mmap2
9438     case TARGET_NR_mmap2:
9439 #ifndef MMAP_SHIFT
9440 #define MMAP_SHIFT 12
9441 #endif
9442         ret = get_errno(target_mmap(arg1, arg2, arg3,
9443                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9444                                     arg5,
9445                                     arg6 << MMAP_SHIFT));
9446         break;
9447 #endif
9448     case TARGET_NR_munmap:
9449         ret = get_errno(target_munmap(arg1, arg2));
9450         break;
9451     case TARGET_NR_mprotect:
9452         {
9453             TaskState *ts = cpu->opaque;
9454             /* Special hack to detect libc making the stack executable.  */
9455             if ((arg3 & PROT_GROWSDOWN)
9456                 && arg1 >= ts->info->stack_limit
9457                 && arg1 <= ts->info->start_stack) {
9458                 arg3 &= ~PROT_GROWSDOWN;
9459                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9460                 arg1 = ts->info->stack_limit;
9461             }
9462         }
9463         ret = get_errno(target_mprotect(arg1, arg2, arg3));
9464         break;
9465 #ifdef TARGET_NR_mremap
9466     case TARGET_NR_mremap:
9467         ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9468         break;
9469 #endif
9470         /* ??? msync/mlock/munlock are broken for softmmu.  */
9471 #ifdef TARGET_NR_msync
9472     case TARGET_NR_msync:
9473         ret = get_errno(msync(g2h(arg1), arg2, arg3));
9474         break;
9475 #endif
9476 #ifdef TARGET_NR_mlock
9477     case TARGET_NR_mlock:
9478         ret = get_errno(mlock(g2h(arg1), arg2));
9479         break;
9480 #endif
9481 #ifdef TARGET_NR_munlock
9482     case TARGET_NR_munlock:
9483         ret = get_errno(munlock(g2h(arg1), arg2));
9484         break;
9485 #endif
9486 #ifdef TARGET_NR_mlockall
9487     case TARGET_NR_mlockall:
9488         ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9489         break;
9490 #endif
9491 #ifdef TARGET_NR_munlockall
9492     case TARGET_NR_munlockall:
9493         ret = get_errno(munlockall());
9494         break;
9495 #endif
9496     case TARGET_NR_truncate:
9497         if (!(p = lock_user_string(arg1)))
9498             goto efault;
9499         ret = get_errno(truncate(p, arg2));
9500         unlock_user(p, arg1, 0);
9501         break;
9502     case TARGET_NR_ftruncate:
9503         ret = get_errno(ftruncate(arg1, arg2));
9504         break;
9505     case TARGET_NR_fchmod:
9506         ret = get_errno(fchmod(arg1, arg2));
9507         break;
9508 #if defined(TARGET_NR_fchmodat)
9509     case TARGET_NR_fchmodat:
9510         if (!(p = lock_user_string(arg2)))
9511             goto efault;
9512         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9513         unlock_user(p, arg2, 0);
9514         break;
9515 #endif
9516     case TARGET_NR_getpriority:
9517         /* Note that negative values are valid for getpriority, so we must
9518            differentiate based on errno settings.  */
9519         errno = 0;
9520         ret = getpriority(arg1, arg2);
9521         if (ret == -1 && errno != 0) {
9522             ret = -host_to_target_errno(errno);
9523             break;
9524         }
9525 #ifdef TARGET_ALPHA
9526         /* Return value is the unbiased priority.  Signal no error.  */
9527         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9528 #else
9529         /* Return value is a biased priority to avoid negative numbers.  */
9530         ret = 20 - ret;
9531 #endif
9532         break;
9533     case TARGET_NR_setpriority:
9534         ret = get_errno(setpriority(arg1, arg2, arg3));
9535         break;
9536 #ifdef TARGET_NR_profil
9537     case TARGET_NR_profil:
9538         goto unimplemented;
9539 #endif
9540     case TARGET_NR_statfs:
9541         if (!(p = lock_user_string(arg1)))
9542             goto efault;
9543         ret = get_errno(statfs(path(p), &stfs));
9544         unlock_user(p, arg1, 0);
9545     convert_statfs:
9546         if (!is_error(ret)) {
9547             struct target_statfs *target_stfs;
9548 
9549             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9550                 goto efault;
9551             __put_user(stfs.f_type, &target_stfs->f_type);
9552             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9553             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9554             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9555             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9556             __put_user(stfs.f_files, &target_stfs->f_files);
9557             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9558             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9559             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9560             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9561             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9562 #ifdef _STATFS_F_FLAGS
9563             __put_user(stfs.f_flags, &target_stfs->f_flags);
9564 #else
9565             __put_user(0, &target_stfs->f_flags);
9566 #endif
9567             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9568             unlock_user_struct(target_stfs, arg2, 1);
9569         }
9570         break;
9571     case TARGET_NR_fstatfs:
9572         ret = get_errno(fstatfs(arg1, &stfs));
9573         goto convert_statfs;
9574 #ifdef TARGET_NR_statfs64
9575     case TARGET_NR_statfs64:
9576         if (!(p = lock_user_string(arg1)))
9577             goto efault;
9578         ret = get_errno(statfs(path(p), &stfs));
9579         unlock_user(p, arg1, 0);
9580     convert_statfs64:
9581         if (!is_error(ret)) {
9582             struct target_statfs64 *target_stfs;
9583 
9584             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9585                 goto efault;
9586             __put_user(stfs.f_type, &target_stfs->f_type);
9587             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9588             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9589             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9590             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9591             __put_user(stfs.f_files, &target_stfs->f_files);
9592             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9593             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9594             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9595             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9596             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9597             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9598             unlock_user_struct(target_stfs, arg3, 1);
9599         }
9600         break;
9601     case TARGET_NR_fstatfs64:
9602         ret = get_errno(fstatfs(arg1, &stfs));
9603         goto convert_statfs64;
9604 #endif
9605 #ifdef TARGET_NR_ioperm
9606     case TARGET_NR_ioperm:
9607         goto unimplemented;
9608 #endif
9609 #ifdef TARGET_NR_socketcall
9610     case TARGET_NR_socketcall:
9611         ret = do_socketcall(arg1, arg2);
9612         break;
9613 #endif
9614 #ifdef TARGET_NR_accept
9615     case TARGET_NR_accept:
9616         ret = do_accept4(arg1, arg2, arg3, 0);
9617         break;
9618 #endif
9619 #ifdef TARGET_NR_accept4
9620     case TARGET_NR_accept4:
9621         ret = do_accept4(arg1, arg2, arg3, arg4);
9622         break;
9623 #endif
9624 #ifdef TARGET_NR_bind
9625     case TARGET_NR_bind:
9626         ret = do_bind(arg1, arg2, arg3);
9627         break;
9628 #endif
9629 #ifdef TARGET_NR_connect
9630     case TARGET_NR_connect:
9631         ret = do_connect(arg1, arg2, arg3);
9632         break;
9633 #endif
9634 #ifdef TARGET_NR_getpeername
9635     case TARGET_NR_getpeername:
9636         ret = do_getpeername(arg1, arg2, arg3);
9637         break;
9638 #endif
9639 #ifdef TARGET_NR_getsockname
9640     case TARGET_NR_getsockname:
9641         ret = do_getsockname(arg1, arg2, arg3);
9642         break;
9643 #endif
9644 #ifdef TARGET_NR_getsockopt
9645     case TARGET_NR_getsockopt:
9646         ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9647         break;
9648 #endif
9649 #ifdef TARGET_NR_listen
9650     case TARGET_NR_listen:
9651         ret = get_errno(listen(arg1, arg2));
9652         break;
9653 #endif
9654 #ifdef TARGET_NR_recv
9655     case TARGET_NR_recv:
9656         ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9657         break;
9658 #endif
9659 #ifdef TARGET_NR_recvfrom
9660     case TARGET_NR_recvfrom:
9661         ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9662         break;
9663 #endif
9664 #ifdef TARGET_NR_recvmsg
9665     case TARGET_NR_recvmsg:
9666         ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9667         break;
9668 #endif
9669 #ifdef TARGET_NR_send
9670     case TARGET_NR_send:
9671         ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9672         break;
9673 #endif
9674 #ifdef TARGET_NR_sendmsg
9675     case TARGET_NR_sendmsg:
9676         ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9677         break;
9678 #endif
9679 #ifdef TARGET_NR_sendmmsg
9680     case TARGET_NR_sendmmsg:
9681         ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9682         break;
9683     case TARGET_NR_recvmmsg:
9684         ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9685         break;
9686 #endif
9687 #ifdef TARGET_NR_sendto
9688     case TARGET_NR_sendto:
9689         ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9690         break;
9691 #endif
9692 #ifdef TARGET_NR_shutdown
9693     case TARGET_NR_shutdown:
9694         ret = get_errno(shutdown(arg1, arg2));
9695         break;
9696 #endif
9697 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9698     case TARGET_NR_getrandom:
9699         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9700         if (!p) {
9701             goto efault;
9702         }
9703         ret = get_errno(getrandom(p, arg2, arg3));
9704         unlock_user(p, arg1, ret);
9705         break;
9706 #endif
9707 #ifdef TARGET_NR_socket
9708     case TARGET_NR_socket:
9709         ret = do_socket(arg1, arg2, arg3);
9710         break;
9711 #endif
9712 #ifdef TARGET_NR_socketpair
9713     case TARGET_NR_socketpair:
9714         ret = do_socketpair(arg1, arg2, arg3, arg4);
9715         break;
9716 #endif
9717 #ifdef TARGET_NR_setsockopt
9718     case TARGET_NR_setsockopt:
9719         ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9720         break;
9721 #endif
9722 #if defined(TARGET_NR_syslog)
9723     case TARGET_NR_syslog:
9724         {
9725             int len = arg2;
9726 
9727             switch (arg1) {
9728             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9729             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9730             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9731             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9732             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9733             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9734             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9735             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9736                 {
9737                     ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9738                 }
9739                 break;
9740             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9741             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9742             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9743                 {
9744                     ret = -TARGET_EINVAL;
9745                     if (len < 0) {
9746                         goto fail;
9747                     }
9748                     ret = 0;
9749                     if (len == 0) {
9750                         break;
9751                     }
9752                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9753                     if (!p) {
9754                         ret = -TARGET_EFAULT;
9755                         goto fail;
9756                     }
9757                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9758                     unlock_user(p, arg2, arg3);
9759                 }
9760                 break;
9761             default:
9762                 ret = -EINVAL;
9763                 break;
9764             }
9765         }
9766         break;
9767 #endif
9768     case TARGET_NR_setitimer:
9769         {
9770             struct itimerval value, ovalue, *pvalue;
9771 
9772             if (arg2) {
9773                 pvalue = &value;
9774                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9775                     || copy_from_user_timeval(&pvalue->it_value,
9776                                               arg2 + sizeof(struct target_timeval)))
9777                     goto efault;
9778             } else {
9779                 pvalue = NULL;
9780             }
9781             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9782             if (!is_error(ret) && arg3) {
9783                 if (copy_to_user_timeval(arg3,
9784                                          &ovalue.it_interval)
9785                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9786                                             &ovalue.it_value))
9787                     goto efault;
9788             }
9789         }
9790         break;
9791     case TARGET_NR_getitimer:
9792         {
9793             struct itimerval value;
9794 
9795             ret = get_errno(getitimer(arg1, &value));
9796             if (!is_error(ret) && arg2) {
9797                 if (copy_to_user_timeval(arg2,
9798                                          &value.it_interval)
9799                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9800                                             &value.it_value))
9801                     goto efault;
9802             }
9803         }
9804         break;
9805 #ifdef TARGET_NR_stat
9806     case TARGET_NR_stat:
9807         if (!(p = lock_user_string(arg1)))
9808             goto efault;
9809         ret = get_errno(stat(path(p), &st));
9810         unlock_user(p, arg1, 0);
9811         goto do_stat;
9812 #endif
9813 #ifdef TARGET_NR_lstat
9814     case TARGET_NR_lstat:
9815         if (!(p = lock_user_string(arg1)))
9816             goto efault;
9817         ret = get_errno(lstat(path(p), &st));
9818         unlock_user(p, arg1, 0);
9819         goto do_stat;
9820 #endif
9821     case TARGET_NR_fstat:
9822         {
9823             ret = get_errno(fstat(arg1, &st));
9824 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9825         do_stat:
9826 #endif
9827             if (!is_error(ret)) {
9828                 struct target_stat *target_st;
9829 
9830                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9831                     goto efault;
9832                 memset(target_st, 0, sizeof(*target_st));
9833                 __put_user(st.st_dev, &target_st->st_dev);
9834                 __put_user(st.st_ino, &target_st->st_ino);
9835                 __put_user(st.st_mode, &target_st->st_mode);
9836                 __put_user(st.st_uid, &target_st->st_uid);
9837                 __put_user(st.st_gid, &target_st->st_gid);
9838                 __put_user(st.st_nlink, &target_st->st_nlink);
9839                 __put_user(st.st_rdev, &target_st->st_rdev);
9840                 __put_user(st.st_size, &target_st->st_size);
9841                 __put_user(st.st_blksize, &target_st->st_blksize);
9842                 __put_user(st.st_blocks, &target_st->st_blocks);
9843                 __put_user(st.st_atime, &target_st->target_st_atime);
9844                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9845                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9846                 unlock_user_struct(target_st, arg2, 1);
9847             }
9848         }
9849         break;
9850 #ifdef TARGET_NR_olduname
9851     case TARGET_NR_olduname:
9852         goto unimplemented;
9853 #endif
9854 #ifdef TARGET_NR_iopl
9855     case TARGET_NR_iopl:
9856         goto unimplemented;
9857 #endif
9858     case TARGET_NR_vhangup:
9859         ret = get_errno(vhangup());
9860         break;
9861 #ifdef TARGET_NR_idle
9862     case TARGET_NR_idle:
9863         goto unimplemented;
9864 #endif
9865 #ifdef TARGET_NR_syscall
9866     case TARGET_NR_syscall:
9867         ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9868                          arg6, arg7, arg8, 0);
9869         break;
9870 #endif
9871     case TARGET_NR_wait4:
9872         {
9873             int status;
9874             abi_long status_ptr = arg2;
9875             struct rusage rusage, *rusage_ptr;
9876             abi_ulong target_rusage = arg4;
9877             abi_long rusage_err;
9878             if (target_rusage)
9879                 rusage_ptr = &rusage;
9880             else
9881                 rusage_ptr = NULL;
9882             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9883             if (!is_error(ret)) {
9884                 if (status_ptr && ret) {
9885                     status = host_to_target_waitstatus(status);
9886                     if (put_user_s32(status, status_ptr))
9887                         goto efault;
9888                 }
9889                 if (target_rusage) {
9890                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9891                     if (rusage_err) {
9892                         ret = rusage_err;
9893                     }
9894                 }
9895             }
9896         }
9897         break;
9898 #ifdef TARGET_NR_swapoff
9899     case TARGET_NR_swapoff:
9900         if (!(p = lock_user_string(arg1)))
9901             goto efault;
9902         ret = get_errno(swapoff(p));
9903         unlock_user(p, arg1, 0);
9904         break;
9905 #endif
9906     case TARGET_NR_sysinfo:
9907         {
9908             struct target_sysinfo *target_value;
9909             struct sysinfo value;
9910             ret = get_errno(sysinfo(&value));
9911             if (!is_error(ret) && arg1)
9912             {
9913                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9914                     goto efault;
9915                 __put_user(value.uptime, &target_value->uptime);
9916                 __put_user(value.loads[0], &target_value->loads[0]);
9917                 __put_user(value.loads[1], &target_value->loads[1]);
9918                 __put_user(value.loads[2], &target_value->loads[2]);
9919                 __put_user(value.totalram, &target_value->totalram);
9920                 __put_user(value.freeram, &target_value->freeram);
9921                 __put_user(value.sharedram, &target_value->sharedram);
9922                 __put_user(value.bufferram, &target_value->bufferram);
9923                 __put_user(value.totalswap, &target_value->totalswap);
9924                 __put_user(value.freeswap, &target_value->freeswap);
9925                 __put_user(value.procs, &target_value->procs);
9926                 __put_user(value.totalhigh, &target_value->totalhigh);
9927                 __put_user(value.freehigh, &target_value->freehigh);
9928                 __put_user(value.mem_unit, &target_value->mem_unit);
9929                 unlock_user_struct(target_value, arg1, 1);
9930             }
9931         }
9932         break;
9933 #ifdef TARGET_NR_ipc
9934     case TARGET_NR_ipc:
9935         ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9936         break;
9937 #endif
9938 #ifdef TARGET_NR_semget
9939     case TARGET_NR_semget:
9940         ret = get_errno(semget(arg1, arg2, arg3));
9941         break;
9942 #endif
9943 #ifdef TARGET_NR_semop
9944     case TARGET_NR_semop:
9945         ret = do_semop(arg1, arg2, arg3);
9946         break;
9947 #endif
9948 #ifdef TARGET_NR_semctl
9949     case TARGET_NR_semctl:
9950         ret = do_semctl(arg1, arg2, arg3, arg4);
9951         break;
9952 #endif
9953 #ifdef TARGET_NR_msgctl
9954     case TARGET_NR_msgctl:
9955         ret = do_msgctl(arg1, arg2, arg3);
9956         break;
9957 #endif
9958 #ifdef TARGET_NR_msgget
9959     case TARGET_NR_msgget:
9960         ret = get_errno(msgget(arg1, arg2));
9961         break;
9962 #endif
9963 #ifdef TARGET_NR_msgrcv
9964     case TARGET_NR_msgrcv:
9965         ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9966         break;
9967 #endif
9968 #ifdef TARGET_NR_msgsnd
9969     case TARGET_NR_msgsnd:
9970         ret = do_msgsnd(arg1, arg2, arg3, arg4);
9971         break;
9972 #endif
9973 #ifdef TARGET_NR_shmget
9974     case TARGET_NR_shmget:
9975         ret = get_errno(shmget(arg1, arg2, arg3));
9976         break;
9977 #endif
9978 #ifdef TARGET_NR_shmctl
9979     case TARGET_NR_shmctl:
9980         ret = do_shmctl(arg1, arg2, arg3);
9981         break;
9982 #endif
9983 #ifdef TARGET_NR_shmat
9984     case TARGET_NR_shmat:
9985         ret = do_shmat(cpu_env, arg1, arg2, arg3);
9986         break;
9987 #endif
9988 #ifdef TARGET_NR_shmdt
9989     case TARGET_NR_shmdt:
9990         ret = do_shmdt(arg1);
9991         break;
9992 #endif
9993     case TARGET_NR_fsync:
9994         ret = get_errno(fsync(arg1));
9995         break;
9996     case TARGET_NR_clone:
9997         /* Linux manages to have three different orderings for its
9998          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9999          * match the kernel's CONFIG_CLONE_* settings.
10000          * Microblaze is further special in that it uses a sixth
10001          * implicit argument to clone for the TLS pointer.
10002          */
10003 #if defined(TARGET_MICROBLAZE)
10004         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10005 #elif defined(TARGET_CLONE_BACKWARDS)
10006         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10007 #elif defined(TARGET_CLONE_BACKWARDS2)
10008         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10009 #else
10010         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10011 #endif
10012         break;
10013 #ifdef __NR_exit_group
10014         /* new thread calls */
10015     case TARGET_NR_exit_group:
10016 #ifdef TARGET_GPROF
10017         _mcleanup();
10018 #endif
10019         gdb_exit(cpu_env, arg1);
10020         ret = get_errno(exit_group(arg1));
10021         break;
10022 #endif
10023     case TARGET_NR_setdomainname:
10024         if (!(p = lock_user_string(arg1)))
10025             goto efault;
10026         ret = get_errno(setdomainname(p, arg2));
10027         unlock_user(p, arg1, 0);
10028         break;
10029     case TARGET_NR_uname:
10030         /* no need to transcode because we use the linux syscall */
10031         {
10032             struct new_utsname * buf;
10033 
10034             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10035                 goto efault;
10036             ret = get_errno(sys_uname(buf));
10037             if (!is_error(ret)) {
10038                 /* Overwrite the native machine name with whatever is being
10039                    emulated. */
10040                 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
10041                 /* Allow the user to override the reported release.  */
10042                 if (qemu_uname_release && *qemu_uname_release) {
10043                     g_strlcpy(buf->release, qemu_uname_release,
10044                               sizeof(buf->release));
10045                 }
10046             }
10047             unlock_user_struct(buf, arg1, 1);
10048         }
10049         break;
10050 #ifdef TARGET_I386
10051     case TARGET_NR_modify_ldt:
10052         ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
10053         break;
10054 #if !defined(TARGET_X86_64)
10055     case TARGET_NR_vm86old:
10056         goto unimplemented;
10057     case TARGET_NR_vm86:
10058         ret = do_vm86(cpu_env, arg1, arg2);
10059         break;
10060 #endif
10061 #endif
10062     case TARGET_NR_adjtimex:
10063         {
10064             struct timex host_buf;
10065 
10066             if (target_to_host_timex(&host_buf, arg1) != 0) {
10067                 goto efault;
10068             }
10069             ret = get_errno(adjtimex(&host_buf));
10070             if (!is_error(ret)) {
10071                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10072                     goto efault;
10073                 }
10074             }
10075         }
10076         break;
10077 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10078     case TARGET_NR_clock_adjtime:
10079         {
10080             struct timex htx, *phtx = &htx;
10081 
10082             if (target_to_host_timex(phtx, arg2) != 0) {
10083                 goto efault;
10084             }
10085             ret = get_errno(clock_adjtime(arg1, phtx));
10086             if (!is_error(ret) && phtx) {
10087                 if (host_to_target_timex(arg2, phtx) != 0) {
10088                     goto efault;
10089                 }
10090             }
10091         }
10092         break;
10093 #endif
10094 #ifdef TARGET_NR_create_module
10095     case TARGET_NR_create_module:
10096 #endif
10097     case TARGET_NR_init_module:
10098     case TARGET_NR_delete_module:
10099 #ifdef TARGET_NR_get_kernel_syms
10100     case TARGET_NR_get_kernel_syms:
10101 #endif
10102         goto unimplemented;
10103     case TARGET_NR_quotactl:
10104         goto unimplemented;
10105     case TARGET_NR_getpgid:
10106         ret = get_errno(getpgid(arg1));
10107         break;
10108     case TARGET_NR_fchdir:
10109         ret = get_errno(fchdir(arg1));
10110         break;
10111 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10112     case TARGET_NR_bdflush:
10113         goto unimplemented;
10114 #endif
10115 #ifdef TARGET_NR_sysfs
10116     case TARGET_NR_sysfs:
10117         goto unimplemented;
10118 #endif
10119     case TARGET_NR_personality:
10120         ret = get_errno(personality(arg1));
10121         break;
10122 #ifdef TARGET_NR_afs_syscall
10123     case TARGET_NR_afs_syscall:
10124         goto unimplemented;
10125 #endif
10126 #ifdef TARGET_NR__llseek /* Not on alpha */
10127     case TARGET_NR__llseek:
10128         {
10129             int64_t res;
10130 #if !defined(__NR_llseek)
10131             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10132             if (res == -1) {
10133                 ret = get_errno(res);
10134             } else {
10135                 ret = 0;
10136             }
10137 #else
10138             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10139 #endif
10140             if ((ret == 0) && put_user_s64(res, arg4)) {
10141                 goto efault;
10142             }
10143         }
10144         break;
10145 #endif
10146 #ifdef TARGET_NR_getdents
10147     case TARGET_NR_getdents:
10148 #ifdef __NR_getdents
10149 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10150         {
10151             struct target_dirent *target_dirp;
10152             struct linux_dirent *dirp;
10153             abi_long count = arg3;
10154 
10155             dirp = g_try_malloc(count);
10156             if (!dirp) {
10157                 ret = -TARGET_ENOMEM;
10158                 goto fail;
10159             }
10160 
10161             ret = get_errno(sys_getdents(arg1, dirp, count));
10162             if (!is_error(ret)) {
10163                 struct linux_dirent *de;
10164 		struct target_dirent *tde;
10165                 int len = ret;
10166                 int reclen, treclen;
10167 		int count1, tnamelen;
10168 
10169 		count1 = 0;
10170                 de = dirp;
10171                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10172                     goto efault;
10173 		tde = target_dirp;
10174                 while (len > 0) {
10175                     reclen = de->d_reclen;
10176                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10177                     assert(tnamelen >= 0);
10178                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10179                     assert(count1 + treclen <= count);
10180                     tde->d_reclen = tswap16(treclen);
10181                     tde->d_ino = tswapal(de->d_ino);
10182                     tde->d_off = tswapal(de->d_off);
10183                     memcpy(tde->d_name, de->d_name, tnamelen);
10184                     de = (struct linux_dirent *)((char *)de + reclen);
10185                     len -= reclen;
10186                     tde = (struct target_dirent *)((char *)tde + treclen);
10187 		    count1 += treclen;
10188                 }
10189 		ret = count1;
10190                 unlock_user(target_dirp, arg2, ret);
10191             }
10192             g_free(dirp);
10193         }
10194 #else
10195         {
10196             struct linux_dirent *dirp;
10197             abi_long count = arg3;
10198 
10199             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10200                 goto efault;
10201             ret = get_errno(sys_getdents(arg1, dirp, count));
10202             if (!is_error(ret)) {
10203                 struct linux_dirent *de;
10204                 int len = ret;
10205                 int reclen;
10206                 de = dirp;
10207                 while (len > 0) {
10208                     reclen = de->d_reclen;
10209                     if (reclen > len)
10210                         break;
10211                     de->d_reclen = tswap16(reclen);
10212                     tswapls(&de->d_ino);
10213                     tswapls(&de->d_off);
10214                     de = (struct linux_dirent *)((char *)de + reclen);
10215                     len -= reclen;
10216                 }
10217             }
10218             unlock_user(dirp, arg2, ret);
10219         }
10220 #endif
10221 #else
10222         /* Implement getdents in terms of getdents64 */
10223         {
10224             struct linux_dirent64 *dirp;
10225             abi_long count = arg3;
10226 
10227             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10228             if (!dirp) {
10229                 goto efault;
10230             }
10231             ret = get_errno(sys_getdents64(arg1, dirp, count));
10232             if (!is_error(ret)) {
10233                 /* Convert the dirent64 structs to target dirent.  We do this
10234                  * in-place, since we can guarantee that a target_dirent is no
10235                  * larger than a dirent64; however this means we have to be
10236                  * careful to read everything before writing in the new format.
10237                  */
10238                 struct linux_dirent64 *de;
10239                 struct target_dirent *tde;
10240                 int len = ret;
10241                 int tlen = 0;
10242 
10243                 de = dirp;
10244                 tde = (struct target_dirent *)dirp;
10245                 while (len > 0) {
10246                     int namelen, treclen;
10247                     int reclen = de->d_reclen;
10248                     uint64_t ino = de->d_ino;
10249                     int64_t off = de->d_off;
10250                     uint8_t type = de->d_type;
10251 
10252                     namelen = strlen(de->d_name);
10253                     treclen = offsetof(struct target_dirent, d_name)
10254                         + namelen + 2;
10255                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10256 
10257                     memmove(tde->d_name, de->d_name, namelen + 1);
10258                     tde->d_ino = tswapal(ino);
10259                     tde->d_off = tswapal(off);
10260                     tde->d_reclen = tswap16(treclen);
10261                     /* The target_dirent type is in what was formerly a padding
10262                      * byte at the end of the structure:
10263                      */
10264                     *(((char *)tde) + treclen - 1) = type;
10265 
10266                     de = (struct linux_dirent64 *)((char *)de + reclen);
10267                     tde = (struct target_dirent *)((char *)tde + treclen);
10268                     len -= reclen;
10269                     tlen += treclen;
10270                 }
10271                 ret = tlen;
10272             }
10273             unlock_user(dirp, arg2, ret);
10274         }
10275 #endif
10276         break;
10277 #endif /* TARGET_NR_getdents */
10278 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10279     case TARGET_NR_getdents64:
10280         {
10281             struct linux_dirent64 *dirp;
10282             abi_long count = arg3;
10283             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10284                 goto efault;
10285             ret = get_errno(sys_getdents64(arg1, dirp, count));
10286             if (!is_error(ret)) {
10287                 struct linux_dirent64 *de;
10288                 int len = ret;
10289                 int reclen;
10290                 de = dirp;
10291                 while (len > 0) {
10292                     reclen = de->d_reclen;
10293                     if (reclen > len)
10294                         break;
10295                     de->d_reclen = tswap16(reclen);
10296                     tswap64s((uint64_t *)&de->d_ino);
10297                     tswap64s((uint64_t *)&de->d_off);
10298                     de = (struct linux_dirent64 *)((char *)de + reclen);
10299                     len -= reclen;
10300                 }
10301             }
10302             unlock_user(dirp, arg2, ret);
10303         }
10304         break;
10305 #endif /* TARGET_NR_getdents64 */
10306 #if defined(TARGET_NR__newselect)
10307     case TARGET_NR__newselect:
10308         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10309         break;
10310 #endif
10311 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10312 # ifdef TARGET_NR_poll
10313     case TARGET_NR_poll:
10314 # endif
10315 # ifdef TARGET_NR_ppoll
10316     case TARGET_NR_ppoll:
10317 # endif
10318         {
10319             struct target_pollfd *target_pfd;
10320             unsigned int nfds = arg2;
10321             struct pollfd *pfd;
10322             unsigned int i;
10323 
10324             pfd = NULL;
10325             target_pfd = NULL;
10326             if (nfds) {
10327                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10328                     ret = -TARGET_EINVAL;
10329                     break;
10330                 }
10331 
10332                 target_pfd = lock_user(VERIFY_WRITE, arg1,
10333                                        sizeof(struct target_pollfd) * nfds, 1);
10334                 if (!target_pfd) {
10335                     goto efault;
10336                 }
10337 
10338                 pfd = alloca(sizeof(struct pollfd) * nfds);
10339                 for (i = 0; i < nfds; i++) {
10340                     pfd[i].fd = tswap32(target_pfd[i].fd);
10341                     pfd[i].events = tswap16(target_pfd[i].events);
10342                 }
10343             }
10344 
10345             switch (num) {
10346 # ifdef TARGET_NR_ppoll
10347             case TARGET_NR_ppoll:
10348             {
10349                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10350                 target_sigset_t *target_set;
10351                 sigset_t _set, *set = &_set;
10352 
10353                 if (arg3) {
10354                     if (target_to_host_timespec(timeout_ts, arg3)) {
10355                         unlock_user(target_pfd, arg1, 0);
10356                         goto efault;
10357                     }
10358                 } else {
10359                     timeout_ts = NULL;
10360                 }
10361 
10362                 if (arg4) {
10363                     if (arg5 != sizeof(target_sigset_t)) {
10364                         unlock_user(target_pfd, arg1, 0);
10365                         ret = -TARGET_EINVAL;
10366                         break;
10367                     }
10368 
10369                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10370                     if (!target_set) {
10371                         unlock_user(target_pfd, arg1, 0);
10372                         goto efault;
10373                     }
10374                     target_to_host_sigset(set, target_set);
10375                 } else {
10376                     set = NULL;
10377                 }
10378 
10379                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10380                                            set, SIGSET_T_SIZE));
10381 
10382                 if (!is_error(ret) && arg3) {
10383                     host_to_target_timespec(arg3, timeout_ts);
10384                 }
10385                 if (arg4) {
10386                     unlock_user(target_set, arg4, 0);
10387                 }
10388                 break;
10389             }
10390 # endif
10391 # ifdef TARGET_NR_poll
10392             case TARGET_NR_poll:
10393             {
10394                 struct timespec ts, *pts;
10395 
10396                 if (arg3 >= 0) {
10397                     /* Convert ms to secs, ns */
10398                     ts.tv_sec = arg3 / 1000;
10399                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10400                     pts = &ts;
10401                 } else {
10402                     /* -ve poll() timeout means "infinite" */
10403                     pts = NULL;
10404                 }
10405                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10406                 break;
10407             }
10408 # endif
10409             default:
10410                 g_assert_not_reached();
10411             }
10412 
10413             if (!is_error(ret)) {
10414                 for(i = 0; i < nfds; i++) {
10415                     target_pfd[i].revents = tswap16(pfd[i].revents);
10416                 }
10417             }
10418             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10419         }
10420         break;
10421 #endif
10422     case TARGET_NR_flock:
10423         /* NOTE: the flock constant seems to be the same for every
10424            Linux platform */
10425         ret = get_errno(safe_flock(arg1, arg2));
10426         break;
10427     case TARGET_NR_readv:
10428         {
10429             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10430             if (vec != NULL) {
10431                 ret = get_errno(safe_readv(arg1, vec, arg3));
10432                 unlock_iovec(vec, arg2, arg3, 1);
10433             } else {
10434                 ret = -host_to_target_errno(errno);
10435             }
10436         }
10437         break;
10438     case TARGET_NR_writev:
10439         {
10440             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10441             if (vec != NULL) {
10442                 ret = get_errno(safe_writev(arg1, vec, arg3));
10443                 unlock_iovec(vec, arg2, arg3, 0);
10444             } else {
10445                 ret = -host_to_target_errno(errno);
10446             }
10447         }
10448         break;
10449 #if defined(TARGET_NR_preadv)
10450     case TARGET_NR_preadv:
10451         {
10452             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10453             if (vec != NULL) {
10454                 ret = get_errno(safe_preadv(arg1, vec, arg3, arg4, arg5));
10455                 unlock_iovec(vec, arg2, arg3, 1);
10456             } else {
10457                 ret = -host_to_target_errno(errno);
10458            }
10459         }
10460         break;
10461 #endif
10462 #if defined(TARGET_NR_pwritev)
10463     case TARGET_NR_pwritev:
10464         {
10465             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10466             if (vec != NULL) {
10467                 ret = get_errno(safe_pwritev(arg1, vec, arg3, arg4, arg5));
10468                 unlock_iovec(vec, arg2, arg3, 0);
10469             } else {
10470                 ret = -host_to_target_errno(errno);
10471            }
10472         }
10473         break;
10474 #endif
10475     case TARGET_NR_getsid:
10476         ret = get_errno(getsid(arg1));
10477         break;
10478 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10479     case TARGET_NR_fdatasync:
10480         ret = get_errno(fdatasync(arg1));
10481         break;
10482 #endif
10483 #ifdef TARGET_NR__sysctl
10484     case TARGET_NR__sysctl:
10485         /* We don't implement this, but ENOTDIR is always a safe
10486            return value. */
10487         ret = -TARGET_ENOTDIR;
10488         break;
10489 #endif
10490     case TARGET_NR_sched_getaffinity:
10491         {
10492             unsigned int mask_size;
10493             unsigned long *mask;
10494 
10495             /*
10496              * sched_getaffinity needs multiples of ulong, so need to take
10497              * care of mismatches between target ulong and host ulong sizes.
10498              */
10499             if (arg2 & (sizeof(abi_ulong) - 1)) {
10500                 ret = -TARGET_EINVAL;
10501                 break;
10502             }
10503             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10504 
10505             mask = alloca(mask_size);
10506             memset(mask, 0, mask_size);
10507             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10508 
10509             if (!is_error(ret)) {
10510                 if (ret > arg2) {
10511                     /* More data returned than the caller's buffer will fit.
10512                      * This only happens if sizeof(abi_long) < sizeof(long)
10513                      * and the caller passed us a buffer holding an odd number
10514                      * of abi_longs. If the host kernel is actually using the
10515                      * extra 4 bytes then fail EINVAL; otherwise we can just
10516                      * ignore them and only copy the interesting part.
10517                      */
10518                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10519                     if (numcpus > arg2 * 8) {
10520                         ret = -TARGET_EINVAL;
10521                         break;
10522                     }
10523                     ret = arg2;
10524                 }
10525 
10526                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10527                     goto efault;
10528                 }
10529             }
10530         }
10531         break;
10532     case TARGET_NR_sched_setaffinity:
10533         {
10534             unsigned int mask_size;
10535             unsigned long *mask;
10536 
10537             /*
10538              * sched_setaffinity needs multiples of ulong, so need to take
10539              * care of mismatches between target ulong and host ulong sizes.
10540              */
10541             if (arg2 & (sizeof(abi_ulong) - 1)) {
10542                 ret = -TARGET_EINVAL;
10543                 break;
10544             }
10545             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10546             mask = alloca(mask_size);
10547 
10548             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10549             if (ret) {
10550                 break;
10551             }
10552 
10553             ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10554         }
10555         break;
10556     case TARGET_NR_getcpu:
10557         {
10558             unsigned cpu, node;
10559             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10560                                        arg2 ? &node : NULL,
10561                                        NULL));
10562             if (is_error(ret)) {
10563                 goto fail;
10564             }
10565             if (arg1 && put_user_u32(cpu, arg1)) {
10566                 goto efault;
10567             }
10568             if (arg2 && put_user_u32(node, arg2)) {
10569                 goto efault;
10570             }
10571         }
10572         break;
10573     case TARGET_NR_sched_setparam:
10574         {
10575             struct sched_param *target_schp;
10576             struct sched_param schp;
10577 
10578             if (arg2 == 0) {
10579                 return -TARGET_EINVAL;
10580             }
10581             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10582                 goto efault;
10583             schp.sched_priority = tswap32(target_schp->sched_priority);
10584             unlock_user_struct(target_schp, arg2, 0);
10585             ret = get_errno(sched_setparam(arg1, &schp));
10586         }
10587         break;
10588     case TARGET_NR_sched_getparam:
10589         {
10590             struct sched_param *target_schp;
10591             struct sched_param schp;
10592 
10593             if (arg2 == 0) {
10594                 return -TARGET_EINVAL;
10595             }
10596             ret = get_errno(sched_getparam(arg1, &schp));
10597             if (!is_error(ret)) {
10598                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10599                     goto efault;
10600                 target_schp->sched_priority = tswap32(schp.sched_priority);
10601                 unlock_user_struct(target_schp, arg2, 1);
10602             }
10603         }
10604         break;
10605     case TARGET_NR_sched_setscheduler:
10606         {
10607             struct sched_param *target_schp;
10608             struct sched_param schp;
10609             if (arg3 == 0) {
10610                 return -TARGET_EINVAL;
10611             }
10612             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10613                 goto efault;
10614             schp.sched_priority = tswap32(target_schp->sched_priority);
10615             unlock_user_struct(target_schp, arg3, 0);
10616             ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10617         }
10618         break;
10619     case TARGET_NR_sched_getscheduler:
10620         ret = get_errno(sched_getscheduler(arg1));
10621         break;
10622     case TARGET_NR_sched_yield:
10623         ret = get_errno(sched_yield());
10624         break;
10625     case TARGET_NR_sched_get_priority_max:
10626         ret = get_errno(sched_get_priority_max(arg1));
10627         break;
10628     case TARGET_NR_sched_get_priority_min:
10629         ret = get_errno(sched_get_priority_min(arg1));
10630         break;
10631     case TARGET_NR_sched_rr_get_interval:
10632         {
10633             struct timespec ts;
10634             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10635             if (!is_error(ret)) {
10636                 ret = host_to_target_timespec(arg2, &ts);
10637             }
10638         }
10639         break;
10640     case TARGET_NR_nanosleep:
10641         {
10642             struct timespec req, rem;
10643             target_to_host_timespec(&req, arg1);
10644             ret = get_errno(safe_nanosleep(&req, &rem));
10645             if (is_error(ret) && arg2) {
10646                 host_to_target_timespec(arg2, &rem);
10647             }
10648         }
10649         break;
10650 #ifdef TARGET_NR_query_module
10651     case TARGET_NR_query_module:
10652         goto unimplemented;
10653 #endif
10654 #ifdef TARGET_NR_nfsservctl
10655     case TARGET_NR_nfsservctl:
10656         goto unimplemented;
10657 #endif
10658     case TARGET_NR_prctl:
10659         switch (arg1) {
10660         case PR_GET_PDEATHSIG:
10661         {
10662             int deathsig;
10663             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10664             if (!is_error(ret) && arg2
10665                 && put_user_ual(deathsig, arg2)) {
10666                 goto efault;
10667             }
10668             break;
10669         }
10670 #ifdef PR_GET_NAME
10671         case PR_GET_NAME:
10672         {
10673             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10674             if (!name) {
10675                 goto efault;
10676             }
10677             ret = get_errno(prctl(arg1, (unsigned long)name,
10678                                   arg3, arg4, arg5));
10679             unlock_user(name, arg2, 16);
10680             break;
10681         }
10682         case PR_SET_NAME:
10683         {
10684             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10685             if (!name) {
10686                 goto efault;
10687             }
10688             ret = get_errno(prctl(arg1, (unsigned long)name,
10689                                   arg3, arg4, arg5));
10690             unlock_user(name, arg2, 0);
10691             break;
10692         }
10693 #endif
10694 #ifdef TARGET_AARCH64
10695         case TARGET_PR_SVE_SET_VL:
10696             /* We cannot support either PR_SVE_SET_VL_ONEXEC
10697                or PR_SVE_VL_INHERIT.  Therefore, anything above
10698                ARM_MAX_VQ results in EINVAL.  */
10699             ret = -TARGET_EINVAL;
10700             if (arm_feature(cpu_env, ARM_FEATURE_SVE)
10701                 && arg2 >= 0 && arg2 <= ARM_MAX_VQ * 16 && !(arg2 & 15)) {
10702                 CPUARMState *env = cpu_env;
10703                 int old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10704                 int vq = MAX(arg2 / 16, 1);
10705 
10706                 if (vq < old_vq) {
10707                     aarch64_sve_narrow_vq(env, vq);
10708                 }
10709                 env->vfp.zcr_el[1] = vq - 1;
10710                 ret = vq * 16;
10711             }
10712             break;
10713         case TARGET_PR_SVE_GET_VL:
10714             ret = -TARGET_EINVAL;
10715             if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
10716                 CPUARMState *env = cpu_env;
10717                 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
10718             }
10719             break;
10720 #endif /* AARCH64 */
10721         case PR_GET_SECCOMP:
10722         case PR_SET_SECCOMP:
10723             /* Disable seccomp to prevent the target disabling syscalls we
10724              * need. */
10725             ret = -TARGET_EINVAL;
10726             break;
10727         default:
10728             /* Most prctl options have no pointer arguments */
10729             ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10730             break;
10731         }
10732         break;
10733 #ifdef TARGET_NR_arch_prctl
10734     case TARGET_NR_arch_prctl:
10735 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10736         ret = do_arch_prctl(cpu_env, arg1, arg2);
10737         break;
10738 #else
10739         goto unimplemented;
10740 #endif
10741 #endif
10742 #ifdef TARGET_NR_pread64
10743     case TARGET_NR_pread64:
10744         if (regpairs_aligned(cpu_env, num)) {
10745             arg4 = arg5;
10746             arg5 = arg6;
10747         }
10748         if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10749             goto efault;
10750         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10751         unlock_user(p, arg2, ret);
10752         break;
10753     case TARGET_NR_pwrite64:
10754         if (regpairs_aligned(cpu_env, num)) {
10755             arg4 = arg5;
10756             arg5 = arg6;
10757         }
10758         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10759             goto efault;
10760         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10761         unlock_user(p, arg2, 0);
10762         break;
10763 #endif
10764     case TARGET_NR_getcwd:
10765         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10766             goto efault;
10767         ret = get_errno(sys_getcwd1(p, arg2));
10768         unlock_user(p, arg1, ret);
10769         break;
10770     case TARGET_NR_capget:
10771     case TARGET_NR_capset:
10772     {
10773         struct target_user_cap_header *target_header;
10774         struct target_user_cap_data *target_data = NULL;
10775         struct __user_cap_header_struct header;
10776         struct __user_cap_data_struct data[2];
10777         struct __user_cap_data_struct *dataptr = NULL;
10778         int i, target_datalen;
10779         int data_items = 1;
10780 
10781         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10782             goto efault;
10783         }
10784         header.version = tswap32(target_header->version);
10785         header.pid = tswap32(target_header->pid);
10786 
10787         if (header.version != _LINUX_CAPABILITY_VERSION) {
10788             /* Version 2 and up takes pointer to two user_data structs */
10789             data_items = 2;
10790         }
10791 
10792         target_datalen = sizeof(*target_data) * data_items;
10793 
10794         if (arg2) {
10795             if (num == TARGET_NR_capget) {
10796                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10797             } else {
10798                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10799             }
10800             if (!target_data) {
10801                 unlock_user_struct(target_header, arg1, 0);
10802                 goto efault;
10803             }
10804 
10805             if (num == TARGET_NR_capset) {
10806                 for (i = 0; i < data_items; i++) {
10807                     data[i].effective = tswap32(target_data[i].effective);
10808                     data[i].permitted = tswap32(target_data[i].permitted);
10809                     data[i].inheritable = tswap32(target_data[i].inheritable);
10810                 }
10811             }
10812 
10813             dataptr = data;
10814         }
10815 
10816         if (num == TARGET_NR_capget) {
10817             ret = get_errno(capget(&header, dataptr));
10818         } else {
10819             ret = get_errno(capset(&header, dataptr));
10820         }
10821 
10822         /* The kernel always updates version for both capget and capset */
10823         target_header->version = tswap32(header.version);
10824         unlock_user_struct(target_header, arg1, 1);
10825 
10826         if (arg2) {
10827             if (num == TARGET_NR_capget) {
10828                 for (i = 0; i < data_items; i++) {
10829                     target_data[i].effective = tswap32(data[i].effective);
10830                     target_data[i].permitted = tswap32(data[i].permitted);
10831                     target_data[i].inheritable = tswap32(data[i].inheritable);
10832                 }
10833                 unlock_user(target_data, arg2, target_datalen);
10834             } else {
10835                 unlock_user(target_data, arg2, 0);
10836             }
10837         }
10838         break;
10839     }
10840     case TARGET_NR_sigaltstack:
10841         ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10842         break;
10843 
10844 #ifdef CONFIG_SENDFILE
10845     case TARGET_NR_sendfile:
10846     {
10847         off_t *offp = NULL;
10848         off_t off;
10849         if (arg3) {
10850             ret = get_user_sal(off, arg3);
10851             if (is_error(ret)) {
10852                 break;
10853             }
10854             offp = &off;
10855         }
10856         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10857         if (!is_error(ret) && arg3) {
10858             abi_long ret2 = put_user_sal(off, arg3);
10859             if (is_error(ret2)) {
10860                 ret = ret2;
10861             }
10862         }
10863         break;
10864     }
10865 #ifdef TARGET_NR_sendfile64
10866     case TARGET_NR_sendfile64:
10867     {
10868         off_t *offp = NULL;
10869         off_t off;
10870         if (arg3) {
10871             ret = get_user_s64(off, arg3);
10872             if (is_error(ret)) {
10873                 break;
10874             }
10875             offp = &off;
10876         }
10877         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10878         if (!is_error(ret) && arg3) {
10879             abi_long ret2 = put_user_s64(off, arg3);
10880             if (is_error(ret2)) {
10881                 ret = ret2;
10882             }
10883         }
10884         break;
10885     }
10886 #endif
10887 #else
10888     case TARGET_NR_sendfile:
10889 #ifdef TARGET_NR_sendfile64
10890     case TARGET_NR_sendfile64:
10891 #endif
10892         goto unimplemented;
10893 #endif
10894 
10895 #ifdef TARGET_NR_getpmsg
10896     case TARGET_NR_getpmsg:
10897         goto unimplemented;
10898 #endif
10899 #ifdef TARGET_NR_putpmsg
10900     case TARGET_NR_putpmsg:
10901         goto unimplemented;
10902 #endif
10903 #ifdef TARGET_NR_vfork
10904     case TARGET_NR_vfork:
10905         ret = get_errno(do_fork(cpu_env,
10906                         CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10907                         0, 0, 0, 0));
10908         break;
10909 #endif
10910 #ifdef TARGET_NR_ugetrlimit
10911     case TARGET_NR_ugetrlimit:
10912     {
10913 	struct rlimit rlim;
10914 	int resource = target_to_host_resource(arg1);
10915 	ret = get_errno(getrlimit(resource, &rlim));
10916 	if (!is_error(ret)) {
10917 	    struct target_rlimit *target_rlim;
10918             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10919                 goto efault;
10920 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10921 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10922             unlock_user_struct(target_rlim, arg2, 1);
10923 	}
10924 	break;
10925     }
10926 #endif
10927 #ifdef TARGET_NR_truncate64
10928     case TARGET_NR_truncate64:
10929         if (!(p = lock_user_string(arg1)))
10930             goto efault;
10931 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10932         unlock_user(p, arg1, 0);
10933 	break;
10934 #endif
10935 #ifdef TARGET_NR_ftruncate64
10936     case TARGET_NR_ftruncate64:
10937 	ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10938 	break;
10939 #endif
10940 #ifdef TARGET_NR_stat64
10941     case TARGET_NR_stat64:
10942         if (!(p = lock_user_string(arg1)))
10943             goto efault;
10944         ret = get_errno(stat(path(p), &st));
10945         unlock_user(p, arg1, 0);
10946         if (!is_error(ret))
10947             ret = host_to_target_stat64(cpu_env, arg2, &st);
10948         break;
10949 #endif
10950 #ifdef TARGET_NR_lstat64
10951     case TARGET_NR_lstat64:
10952         if (!(p = lock_user_string(arg1)))
10953             goto efault;
10954         ret = get_errno(lstat(path(p), &st));
10955         unlock_user(p, arg1, 0);
10956         if (!is_error(ret))
10957             ret = host_to_target_stat64(cpu_env, arg2, &st);
10958         break;
10959 #endif
10960 #ifdef TARGET_NR_fstat64
10961     case TARGET_NR_fstat64:
10962         ret = get_errno(fstat(arg1, &st));
10963         if (!is_error(ret))
10964             ret = host_to_target_stat64(cpu_env, arg2, &st);
10965         break;
10966 #endif
10967 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10968 #ifdef TARGET_NR_fstatat64
10969     case TARGET_NR_fstatat64:
10970 #endif
10971 #ifdef TARGET_NR_newfstatat
10972     case TARGET_NR_newfstatat:
10973 #endif
10974         if (!(p = lock_user_string(arg2)))
10975             goto efault;
10976         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10977         if (!is_error(ret))
10978             ret = host_to_target_stat64(cpu_env, arg3, &st);
10979         break;
10980 #endif
10981 #ifdef TARGET_NR_lchown
10982     case TARGET_NR_lchown:
10983         if (!(p = lock_user_string(arg1)))
10984             goto efault;
10985         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10986         unlock_user(p, arg1, 0);
10987         break;
10988 #endif
10989 #ifdef TARGET_NR_getuid
10990     case TARGET_NR_getuid:
10991         ret = get_errno(high2lowuid(getuid()));
10992         break;
10993 #endif
10994 #ifdef TARGET_NR_getgid
10995     case TARGET_NR_getgid:
10996         ret = get_errno(high2lowgid(getgid()));
10997         break;
10998 #endif
10999 #ifdef TARGET_NR_geteuid
11000     case TARGET_NR_geteuid:
11001         ret = get_errno(high2lowuid(geteuid()));
11002         break;
11003 #endif
11004 #ifdef TARGET_NR_getegid
11005     case TARGET_NR_getegid:
11006         ret = get_errno(high2lowgid(getegid()));
11007         break;
11008 #endif
11009     case TARGET_NR_setreuid:
11010         ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11011         break;
11012     case TARGET_NR_setregid:
11013         ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11014         break;
11015     case TARGET_NR_getgroups:
11016         {
11017             int gidsetsize = arg1;
11018             target_id *target_grouplist;
11019             gid_t *grouplist;
11020             int i;
11021 
11022             grouplist = alloca(gidsetsize * sizeof(gid_t));
11023             ret = get_errno(getgroups(gidsetsize, grouplist));
11024             if (gidsetsize == 0)
11025                 break;
11026             if (!is_error(ret)) {
11027                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11028                 if (!target_grouplist)
11029                     goto efault;
11030                 for(i = 0;i < ret; i++)
11031                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11032                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11033             }
11034         }
11035         break;
11036     case TARGET_NR_setgroups:
11037         {
11038             int gidsetsize = arg1;
11039             target_id *target_grouplist;
11040             gid_t *grouplist = NULL;
11041             int i;
11042             if (gidsetsize) {
11043                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11044                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11045                 if (!target_grouplist) {
11046                     ret = -TARGET_EFAULT;
11047                     goto fail;
11048                 }
11049                 for (i = 0; i < gidsetsize; i++) {
11050                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11051                 }
11052                 unlock_user(target_grouplist, arg2, 0);
11053             }
11054             ret = get_errno(setgroups(gidsetsize, grouplist));
11055         }
11056         break;
11057     case TARGET_NR_fchown:
11058         ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11059         break;
11060 #if defined(TARGET_NR_fchownat)
11061     case TARGET_NR_fchownat:
11062         if (!(p = lock_user_string(arg2)))
11063             goto efault;
11064         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11065                                  low2highgid(arg4), arg5));
11066         unlock_user(p, arg2, 0);
11067         break;
11068 #endif
11069 #ifdef TARGET_NR_setresuid
11070     case TARGET_NR_setresuid:
11071         ret = get_errno(sys_setresuid(low2highuid(arg1),
11072                                       low2highuid(arg2),
11073                                       low2highuid(arg3)));
11074         break;
11075 #endif
11076 #ifdef TARGET_NR_getresuid
11077     case TARGET_NR_getresuid:
11078         {
11079             uid_t ruid, euid, suid;
11080             ret = get_errno(getresuid(&ruid, &euid, &suid));
11081             if (!is_error(ret)) {
11082                 if (put_user_id(high2lowuid(ruid), arg1)
11083                     || put_user_id(high2lowuid(euid), arg2)
11084                     || put_user_id(high2lowuid(suid), arg3))
11085                     goto efault;
11086             }
11087         }
11088         break;
11089 #endif
11090 #ifdef TARGET_NR_getresgid
11091     case TARGET_NR_setresgid:
11092         ret = get_errno(sys_setresgid(low2highgid(arg1),
11093                                       low2highgid(arg2),
11094                                       low2highgid(arg3)));
11095         break;
11096 #endif
11097 #ifdef TARGET_NR_getresgid
11098     case TARGET_NR_getresgid:
11099         {
11100             gid_t rgid, egid, sgid;
11101             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11102             if (!is_error(ret)) {
11103                 if (put_user_id(high2lowgid(rgid), arg1)
11104                     || put_user_id(high2lowgid(egid), arg2)
11105                     || put_user_id(high2lowgid(sgid), arg3))
11106                     goto efault;
11107             }
11108         }
11109         break;
11110 #endif
11111 #ifdef TARGET_NR_chown
11112     case TARGET_NR_chown:
11113         if (!(p = lock_user_string(arg1)))
11114             goto efault;
11115         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11116         unlock_user(p, arg1, 0);
11117         break;
11118 #endif
11119     case TARGET_NR_setuid:
11120         ret = get_errno(sys_setuid(low2highuid(arg1)));
11121         break;
11122     case TARGET_NR_setgid:
11123         ret = get_errno(sys_setgid(low2highgid(arg1)));
11124         break;
11125     case TARGET_NR_setfsuid:
11126         ret = get_errno(setfsuid(arg1));
11127         break;
11128     case TARGET_NR_setfsgid:
11129         ret = get_errno(setfsgid(arg1));
11130         break;
11131 
11132 #ifdef TARGET_NR_lchown32
11133     case TARGET_NR_lchown32:
11134         if (!(p = lock_user_string(arg1)))
11135             goto efault;
11136         ret = get_errno(lchown(p, arg2, arg3));
11137         unlock_user(p, arg1, 0);
11138         break;
11139 #endif
11140 #ifdef TARGET_NR_getuid32
11141     case TARGET_NR_getuid32:
11142         ret = get_errno(getuid());
11143         break;
11144 #endif
11145 
11146 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11147    /* Alpha specific */
11148     case TARGET_NR_getxuid:
11149          {
11150             uid_t euid;
11151             euid=geteuid();
11152             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11153          }
11154         ret = get_errno(getuid());
11155         break;
11156 #endif
11157 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11158    /* Alpha specific */
11159     case TARGET_NR_getxgid:
11160          {
11161             uid_t egid;
11162             egid=getegid();
11163             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11164          }
11165         ret = get_errno(getgid());
11166         break;
11167 #endif
11168 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11169     /* Alpha specific */
11170     case TARGET_NR_osf_getsysinfo:
11171         ret = -TARGET_EOPNOTSUPP;
11172         switch (arg1) {
11173           case TARGET_GSI_IEEE_FP_CONTROL:
11174             {
11175                 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11176 
11177                 /* Copied from linux ieee_fpcr_to_swcr.  */
11178                 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11179                 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11180                 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11181                                         | SWCR_TRAP_ENABLE_DZE
11182                                         | SWCR_TRAP_ENABLE_OVF);
11183                 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11184                                         | SWCR_TRAP_ENABLE_INE);
11185                 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11186                 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11187 
11188                 if (put_user_u64 (swcr, arg2))
11189                         goto efault;
11190                 ret = 0;
11191             }
11192             break;
11193 
11194           /* case GSI_IEEE_STATE_AT_SIGNAL:
11195              -- Not implemented in linux kernel.
11196              case GSI_UACPROC:
11197              -- Retrieves current unaligned access state; not much used.
11198              case GSI_PROC_TYPE:
11199              -- Retrieves implver information; surely not used.
11200              case GSI_GET_HWRPB:
11201              -- Grabs a copy of the HWRPB; surely not used.
11202           */
11203         }
11204         break;
11205 #endif
11206 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11207     /* Alpha specific */
11208     case TARGET_NR_osf_setsysinfo:
11209         ret = -TARGET_EOPNOTSUPP;
11210         switch (arg1) {
11211           case TARGET_SSI_IEEE_FP_CONTROL:
11212             {
11213                 uint64_t swcr, fpcr, orig_fpcr;
11214 
11215                 if (get_user_u64 (swcr, arg2)) {
11216                     goto efault;
11217                 }
11218                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11219                 fpcr = orig_fpcr & FPCR_DYN_MASK;
11220 
11221                 /* Copied from linux ieee_swcr_to_fpcr.  */
11222                 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11223                 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11224                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11225                                   | SWCR_TRAP_ENABLE_DZE
11226                                   | SWCR_TRAP_ENABLE_OVF)) << 48;
11227                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11228                                   | SWCR_TRAP_ENABLE_INE)) << 57;
11229                 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11230                 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11231 
11232                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11233                 ret = 0;
11234             }
11235             break;
11236 
11237           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11238             {
11239                 uint64_t exc, fpcr, orig_fpcr;
11240                 int si_code;
11241 
11242                 if (get_user_u64(exc, arg2)) {
11243                     goto efault;
11244                 }
11245 
11246                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11247 
11248                 /* We only add to the exception status here.  */
11249                 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11250 
11251                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11252                 ret = 0;
11253 
11254                 /* Old exceptions are not signaled.  */
11255                 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11256 
11257                 /* If any exceptions set by this call,
11258                    and are unmasked, send a signal.  */
11259                 si_code = 0;
11260                 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11261                     si_code = TARGET_FPE_FLTRES;
11262                 }
11263                 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11264                     si_code = TARGET_FPE_FLTUND;
11265                 }
11266                 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11267                     si_code = TARGET_FPE_FLTOVF;
11268                 }
11269                 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11270                     si_code = TARGET_FPE_FLTDIV;
11271                 }
11272                 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11273                     si_code = TARGET_FPE_FLTINV;
11274                 }
11275                 if (si_code != 0) {
11276                     target_siginfo_t info;
11277                     info.si_signo = SIGFPE;
11278                     info.si_errno = 0;
11279                     info.si_code = si_code;
11280                     info._sifields._sigfault._addr
11281                         = ((CPUArchState *)cpu_env)->pc;
11282                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11283                                  QEMU_SI_FAULT, &info);
11284                 }
11285             }
11286             break;
11287 
11288           /* case SSI_NVPAIRS:
11289              -- Used with SSIN_UACPROC to enable unaligned accesses.
11290              case SSI_IEEE_STATE_AT_SIGNAL:
11291              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11292              -- Not implemented in linux kernel
11293           */
11294         }
11295         break;
11296 #endif
11297 #ifdef TARGET_NR_osf_sigprocmask
11298     /* Alpha specific.  */
11299     case TARGET_NR_osf_sigprocmask:
11300         {
11301             abi_ulong mask;
11302             int how;
11303             sigset_t set, oldset;
11304 
11305             switch(arg1) {
11306             case TARGET_SIG_BLOCK:
11307                 how = SIG_BLOCK;
11308                 break;
11309             case TARGET_SIG_UNBLOCK:
11310                 how = SIG_UNBLOCK;
11311                 break;
11312             case TARGET_SIG_SETMASK:
11313                 how = SIG_SETMASK;
11314                 break;
11315             default:
11316                 ret = -TARGET_EINVAL;
11317                 goto fail;
11318             }
11319             mask = arg2;
11320             target_to_host_old_sigset(&set, &mask);
11321             ret = do_sigprocmask(how, &set, &oldset);
11322             if (!ret) {
11323                 host_to_target_old_sigset(&mask, &oldset);
11324                 ret = mask;
11325             }
11326         }
11327         break;
11328 #endif
11329 
11330 #ifdef TARGET_NR_getgid32
11331     case TARGET_NR_getgid32:
11332         ret = get_errno(getgid());
11333         break;
11334 #endif
11335 #ifdef TARGET_NR_geteuid32
11336     case TARGET_NR_geteuid32:
11337         ret = get_errno(geteuid());
11338         break;
11339 #endif
11340 #ifdef TARGET_NR_getegid32
11341     case TARGET_NR_getegid32:
11342         ret = get_errno(getegid());
11343         break;
11344 #endif
11345 #ifdef TARGET_NR_setreuid32
11346     case TARGET_NR_setreuid32:
11347         ret = get_errno(setreuid(arg1, arg2));
11348         break;
11349 #endif
11350 #ifdef TARGET_NR_setregid32
11351     case TARGET_NR_setregid32:
11352         ret = get_errno(setregid(arg1, arg2));
11353         break;
11354 #endif
11355 #ifdef TARGET_NR_getgroups32
11356     case TARGET_NR_getgroups32:
11357         {
11358             int gidsetsize = arg1;
11359             uint32_t *target_grouplist;
11360             gid_t *grouplist;
11361             int i;
11362 
11363             grouplist = alloca(gidsetsize * sizeof(gid_t));
11364             ret = get_errno(getgroups(gidsetsize, grouplist));
11365             if (gidsetsize == 0)
11366                 break;
11367             if (!is_error(ret)) {
11368                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11369                 if (!target_grouplist) {
11370                     ret = -TARGET_EFAULT;
11371                     goto fail;
11372                 }
11373                 for(i = 0;i < ret; i++)
11374                     target_grouplist[i] = tswap32(grouplist[i]);
11375                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11376             }
11377         }
11378         break;
11379 #endif
11380 #ifdef TARGET_NR_setgroups32
11381     case TARGET_NR_setgroups32:
11382         {
11383             int gidsetsize = arg1;
11384             uint32_t *target_grouplist;
11385             gid_t *grouplist;
11386             int i;
11387 
11388             grouplist = alloca(gidsetsize * sizeof(gid_t));
11389             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11390             if (!target_grouplist) {
11391                 ret = -TARGET_EFAULT;
11392                 goto fail;
11393             }
11394             for(i = 0;i < gidsetsize; i++)
11395                 grouplist[i] = tswap32(target_grouplist[i]);
11396             unlock_user(target_grouplist, arg2, 0);
11397             ret = get_errno(setgroups(gidsetsize, grouplist));
11398         }
11399         break;
11400 #endif
11401 #ifdef TARGET_NR_fchown32
11402     case TARGET_NR_fchown32:
11403         ret = get_errno(fchown(arg1, arg2, arg3));
11404         break;
11405 #endif
11406 #ifdef TARGET_NR_setresuid32
11407     case TARGET_NR_setresuid32:
11408         ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11409         break;
11410 #endif
11411 #ifdef TARGET_NR_getresuid32
11412     case TARGET_NR_getresuid32:
11413         {
11414             uid_t ruid, euid, suid;
11415             ret = get_errno(getresuid(&ruid, &euid, &suid));
11416             if (!is_error(ret)) {
11417                 if (put_user_u32(ruid, arg1)
11418                     || put_user_u32(euid, arg2)
11419                     || put_user_u32(suid, arg3))
11420                     goto efault;
11421             }
11422         }
11423         break;
11424 #endif
11425 #ifdef TARGET_NR_setresgid32
11426     case TARGET_NR_setresgid32:
11427         ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11428         break;
11429 #endif
11430 #ifdef TARGET_NR_getresgid32
11431     case TARGET_NR_getresgid32:
11432         {
11433             gid_t rgid, egid, sgid;
11434             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11435             if (!is_error(ret)) {
11436                 if (put_user_u32(rgid, arg1)
11437                     || put_user_u32(egid, arg2)
11438                     || put_user_u32(sgid, arg3))
11439                     goto efault;
11440             }
11441         }
11442         break;
11443 #endif
11444 #ifdef TARGET_NR_chown32
11445     case TARGET_NR_chown32:
11446         if (!(p = lock_user_string(arg1)))
11447             goto efault;
11448         ret = get_errno(chown(p, arg2, arg3));
11449         unlock_user(p, arg1, 0);
11450         break;
11451 #endif
11452 #ifdef TARGET_NR_setuid32
11453     case TARGET_NR_setuid32:
11454         ret = get_errno(sys_setuid(arg1));
11455         break;
11456 #endif
11457 #ifdef TARGET_NR_setgid32
11458     case TARGET_NR_setgid32:
11459         ret = get_errno(sys_setgid(arg1));
11460         break;
11461 #endif
11462 #ifdef TARGET_NR_setfsuid32
11463     case TARGET_NR_setfsuid32:
11464         ret = get_errno(setfsuid(arg1));
11465         break;
11466 #endif
11467 #ifdef TARGET_NR_setfsgid32
11468     case TARGET_NR_setfsgid32:
11469         ret = get_errno(setfsgid(arg1));
11470         break;
11471 #endif
11472 
11473     case TARGET_NR_pivot_root:
11474         goto unimplemented;
11475 #ifdef TARGET_NR_mincore
11476     case TARGET_NR_mincore:
11477         {
11478             void *a;
11479             ret = -TARGET_ENOMEM;
11480             a = lock_user(VERIFY_READ, arg1, arg2, 0);
11481             if (!a) {
11482                 goto fail;
11483             }
11484             ret = -TARGET_EFAULT;
11485             p = lock_user_string(arg3);
11486             if (!p) {
11487                 goto mincore_fail;
11488             }
11489             ret = get_errno(mincore(a, arg2, p));
11490             unlock_user(p, arg3, ret);
11491             mincore_fail:
11492             unlock_user(a, arg1, 0);
11493         }
11494         break;
11495 #endif
11496 #ifdef TARGET_NR_arm_fadvise64_64
11497     case TARGET_NR_arm_fadvise64_64:
11498         /* arm_fadvise64_64 looks like fadvise64_64 but
11499          * with different argument order: fd, advice, offset, len
11500          * rather than the usual fd, offset, len, advice.
11501          * Note that offset and len are both 64-bit so appear as
11502          * pairs of 32-bit registers.
11503          */
11504         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11505                             target_offset64(arg5, arg6), arg2);
11506         ret = -host_to_target_errno(ret);
11507         break;
11508 #endif
11509 
11510 #if TARGET_ABI_BITS == 32
11511 
11512 #ifdef TARGET_NR_fadvise64_64
11513     case TARGET_NR_fadvise64_64:
11514 #if defined(TARGET_PPC)
11515         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11516         ret = arg2;
11517         arg2 = arg3;
11518         arg3 = arg4;
11519         arg4 = arg5;
11520         arg5 = arg6;
11521         arg6 = ret;
11522 #else
11523         /* 6 args: fd, offset (high, low), len (high, low), advice */
11524         if (regpairs_aligned(cpu_env, num)) {
11525             /* offset is in (3,4), len in (5,6) and advice in 7 */
11526             arg2 = arg3;
11527             arg3 = arg4;
11528             arg4 = arg5;
11529             arg5 = arg6;
11530             arg6 = arg7;
11531         }
11532 #endif
11533         ret = -host_to_target_errno(posix_fadvise(arg1,
11534                                                   target_offset64(arg2, arg3),
11535                                                   target_offset64(arg4, arg5),
11536                                                   arg6));
11537         break;
11538 #endif
11539 
11540 #ifdef TARGET_NR_fadvise64
11541     case TARGET_NR_fadvise64:
11542         /* 5 args: fd, offset (high, low), len, advice */
11543         if (regpairs_aligned(cpu_env, num)) {
11544             /* offset is in (3,4), len in 5 and advice in 6 */
11545             arg2 = arg3;
11546             arg3 = arg4;
11547             arg4 = arg5;
11548             arg5 = arg6;
11549         }
11550         ret = -host_to_target_errno(posix_fadvise(arg1,
11551                                                   target_offset64(arg2, arg3),
11552                                                   arg4, arg5));
11553         break;
11554 #endif
11555 
11556 #else /* not a 32-bit ABI */
11557 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11558 #ifdef TARGET_NR_fadvise64_64
11559     case TARGET_NR_fadvise64_64:
11560 #endif
11561 #ifdef TARGET_NR_fadvise64
11562     case TARGET_NR_fadvise64:
11563 #endif
11564 #ifdef TARGET_S390X
11565         switch (arg4) {
11566         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11567         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11568         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11569         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11570         default: break;
11571         }
11572 #endif
11573         ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11574         break;
11575 #endif
11576 #endif /* end of 64-bit ABI fadvise handling */
11577 
11578 #ifdef TARGET_NR_madvise
11579     case TARGET_NR_madvise:
11580         /* A straight passthrough may not be safe because qemu sometimes
11581            turns private file-backed mappings into anonymous mappings.
11582            This will break MADV_DONTNEED.
11583            This is a hint, so ignoring and returning success is ok.  */
11584         ret = get_errno(0);
11585         break;
11586 #endif
11587 #if TARGET_ABI_BITS == 32
11588     case TARGET_NR_fcntl64:
11589     {
11590 	int cmd;
11591 	struct flock64 fl;
11592         from_flock64_fn *copyfrom = copy_from_user_flock64;
11593         to_flock64_fn *copyto = copy_to_user_flock64;
11594 
11595 #ifdef TARGET_ARM
11596         if (((CPUARMState *)cpu_env)->eabi) {
11597             copyfrom = copy_from_user_eabi_flock64;
11598             copyto = copy_to_user_eabi_flock64;
11599         }
11600 #endif
11601 
11602 	cmd = target_to_host_fcntl_cmd(arg2);
11603         if (cmd == -TARGET_EINVAL) {
11604             ret = cmd;
11605             break;
11606         }
11607 
11608         switch(arg2) {
11609         case TARGET_F_GETLK64:
11610             ret = copyfrom(&fl, arg3);
11611             if (ret) {
11612                 break;
11613             }
11614             ret = get_errno(fcntl(arg1, cmd, &fl));
11615             if (ret == 0) {
11616                 ret = copyto(arg3, &fl);
11617             }
11618 	    break;
11619 
11620         case TARGET_F_SETLK64:
11621         case TARGET_F_SETLKW64:
11622             ret = copyfrom(&fl, arg3);
11623             if (ret) {
11624                 break;
11625             }
11626             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11627 	    break;
11628         default:
11629             ret = do_fcntl(arg1, arg2, arg3);
11630             break;
11631         }
11632 	break;
11633     }
11634 #endif
11635 #ifdef TARGET_NR_cacheflush
11636     case TARGET_NR_cacheflush:
11637         /* self-modifying code is handled automatically, so nothing needed */
11638         ret = 0;
11639         break;
11640 #endif
11641 #ifdef TARGET_NR_security
11642     case TARGET_NR_security:
11643         goto unimplemented;
11644 #endif
11645 #ifdef TARGET_NR_getpagesize
11646     case TARGET_NR_getpagesize:
11647         ret = TARGET_PAGE_SIZE;
11648         break;
11649 #endif
11650     case TARGET_NR_gettid:
11651         ret = get_errno(gettid());
11652         break;
11653 #ifdef TARGET_NR_readahead
11654     case TARGET_NR_readahead:
11655 #if TARGET_ABI_BITS == 32
11656         if (regpairs_aligned(cpu_env, num)) {
11657             arg2 = arg3;
11658             arg3 = arg4;
11659             arg4 = arg5;
11660         }
11661         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11662 #else
11663         ret = get_errno(readahead(arg1, arg2, arg3));
11664 #endif
11665         break;
11666 #endif
11667 #ifdef CONFIG_ATTR
11668 #ifdef TARGET_NR_setxattr
11669     case TARGET_NR_listxattr:
11670     case TARGET_NR_llistxattr:
11671     {
11672         void *p, *b = 0;
11673         if (arg2) {
11674             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11675             if (!b) {
11676                 ret = -TARGET_EFAULT;
11677                 break;
11678             }
11679         }
11680         p = lock_user_string(arg1);
11681         if (p) {
11682             if (num == TARGET_NR_listxattr) {
11683                 ret = get_errno(listxattr(p, b, arg3));
11684             } else {
11685                 ret = get_errno(llistxattr(p, b, arg3));
11686             }
11687         } else {
11688             ret = -TARGET_EFAULT;
11689         }
11690         unlock_user(p, arg1, 0);
11691         unlock_user(b, arg2, arg3);
11692         break;
11693     }
11694     case TARGET_NR_flistxattr:
11695     {
11696         void *b = 0;
11697         if (arg2) {
11698             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11699             if (!b) {
11700                 ret = -TARGET_EFAULT;
11701                 break;
11702             }
11703         }
11704         ret = get_errno(flistxattr(arg1, b, arg3));
11705         unlock_user(b, arg2, arg3);
11706         break;
11707     }
11708     case TARGET_NR_setxattr:
11709     case TARGET_NR_lsetxattr:
11710         {
11711             void *p, *n, *v = 0;
11712             if (arg3) {
11713                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11714                 if (!v) {
11715                     ret = -TARGET_EFAULT;
11716                     break;
11717                 }
11718             }
11719             p = lock_user_string(arg1);
11720             n = lock_user_string(arg2);
11721             if (p && n) {
11722                 if (num == TARGET_NR_setxattr) {
11723                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11724                 } else {
11725                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11726                 }
11727             } else {
11728                 ret = -TARGET_EFAULT;
11729             }
11730             unlock_user(p, arg1, 0);
11731             unlock_user(n, arg2, 0);
11732             unlock_user(v, arg3, 0);
11733         }
11734         break;
11735     case TARGET_NR_fsetxattr:
11736         {
11737             void *n, *v = 0;
11738             if (arg3) {
11739                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11740                 if (!v) {
11741                     ret = -TARGET_EFAULT;
11742                     break;
11743                 }
11744             }
11745             n = lock_user_string(arg2);
11746             if (n) {
11747                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11748             } else {
11749                 ret = -TARGET_EFAULT;
11750             }
11751             unlock_user(n, arg2, 0);
11752             unlock_user(v, arg3, 0);
11753         }
11754         break;
11755     case TARGET_NR_getxattr:
11756     case TARGET_NR_lgetxattr:
11757         {
11758             void *p, *n, *v = 0;
11759             if (arg3) {
11760                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11761                 if (!v) {
11762                     ret = -TARGET_EFAULT;
11763                     break;
11764                 }
11765             }
11766             p = lock_user_string(arg1);
11767             n = lock_user_string(arg2);
11768             if (p && n) {
11769                 if (num == TARGET_NR_getxattr) {
11770                     ret = get_errno(getxattr(p, n, v, arg4));
11771                 } else {
11772                     ret = get_errno(lgetxattr(p, n, v, arg4));
11773                 }
11774             } else {
11775                 ret = -TARGET_EFAULT;
11776             }
11777             unlock_user(p, arg1, 0);
11778             unlock_user(n, arg2, 0);
11779             unlock_user(v, arg3, arg4);
11780         }
11781         break;
11782     case TARGET_NR_fgetxattr:
11783         {
11784             void *n, *v = 0;
11785             if (arg3) {
11786                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11787                 if (!v) {
11788                     ret = -TARGET_EFAULT;
11789                     break;
11790                 }
11791             }
11792             n = lock_user_string(arg2);
11793             if (n) {
11794                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11795             } else {
11796                 ret = -TARGET_EFAULT;
11797             }
11798             unlock_user(n, arg2, 0);
11799             unlock_user(v, arg3, arg4);
11800         }
11801         break;
11802     case TARGET_NR_removexattr:
11803     case TARGET_NR_lremovexattr:
11804         {
11805             void *p, *n;
11806             p = lock_user_string(arg1);
11807             n = lock_user_string(arg2);
11808             if (p && n) {
11809                 if (num == TARGET_NR_removexattr) {
11810                     ret = get_errno(removexattr(p, n));
11811                 } else {
11812                     ret = get_errno(lremovexattr(p, n));
11813                 }
11814             } else {
11815                 ret = -TARGET_EFAULT;
11816             }
11817             unlock_user(p, arg1, 0);
11818             unlock_user(n, arg2, 0);
11819         }
11820         break;
11821     case TARGET_NR_fremovexattr:
11822         {
11823             void *n;
11824             n = lock_user_string(arg2);
11825             if (n) {
11826                 ret = get_errno(fremovexattr(arg1, n));
11827             } else {
11828                 ret = -TARGET_EFAULT;
11829             }
11830             unlock_user(n, arg2, 0);
11831         }
11832         break;
11833 #endif
11834 #endif /* CONFIG_ATTR */
11835 #ifdef TARGET_NR_set_thread_area
11836     case TARGET_NR_set_thread_area:
11837 #if defined(TARGET_MIPS)
11838       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11839       ret = 0;
11840       break;
11841 #elif defined(TARGET_CRIS)
11842       if (arg1 & 0xff)
11843           ret = -TARGET_EINVAL;
11844       else {
11845           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11846           ret = 0;
11847       }
11848       break;
11849 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11850       ret = do_set_thread_area(cpu_env, arg1);
11851       break;
11852 #elif defined(TARGET_M68K)
11853       {
11854           TaskState *ts = cpu->opaque;
11855           ts->tp_value = arg1;
11856           ret = 0;
11857           break;
11858       }
11859 #else
11860       goto unimplemented_nowarn;
11861 #endif
11862 #endif
11863 #ifdef TARGET_NR_get_thread_area
11864     case TARGET_NR_get_thread_area:
11865 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11866         ret = do_get_thread_area(cpu_env, arg1);
11867         break;
11868 #elif defined(TARGET_M68K)
11869         {
11870             TaskState *ts = cpu->opaque;
11871             ret = ts->tp_value;
11872             break;
11873         }
11874 #else
11875         goto unimplemented_nowarn;
11876 #endif
11877 #endif
11878 #ifdef TARGET_NR_getdomainname
11879     case TARGET_NR_getdomainname:
11880         goto unimplemented_nowarn;
11881 #endif
11882 
11883 #ifdef TARGET_NR_clock_gettime
11884     case TARGET_NR_clock_gettime:
11885     {
11886         struct timespec ts;
11887         ret = get_errno(clock_gettime(arg1, &ts));
11888         if (!is_error(ret)) {
11889             host_to_target_timespec(arg2, &ts);
11890         }
11891         break;
11892     }
11893 #endif
11894 #ifdef TARGET_NR_clock_getres
11895     case TARGET_NR_clock_getres:
11896     {
11897         struct timespec ts;
11898         ret = get_errno(clock_getres(arg1, &ts));
11899         if (!is_error(ret)) {
11900             host_to_target_timespec(arg2, &ts);
11901         }
11902         break;
11903     }
11904 #endif
11905 #ifdef TARGET_NR_clock_nanosleep
11906     case TARGET_NR_clock_nanosleep:
11907     {
11908         struct timespec ts;
11909         target_to_host_timespec(&ts, arg3);
11910         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11911                                              &ts, arg4 ? &ts : NULL));
11912         if (arg4)
11913             host_to_target_timespec(arg4, &ts);
11914 
11915 #if defined(TARGET_PPC)
11916         /* clock_nanosleep is odd in that it returns positive errno values.
11917          * On PPC, CR0 bit 3 should be set in such a situation. */
11918         if (ret && ret != -TARGET_ERESTARTSYS) {
11919             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11920         }
11921 #endif
11922         break;
11923     }
11924 #endif
11925 
11926 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11927     case TARGET_NR_set_tid_address:
11928         ret = get_errno(set_tid_address((int *)g2h(arg1)));
11929         break;
11930 #endif
11931 
11932     case TARGET_NR_tkill:
11933         ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11934         break;
11935 
11936     case TARGET_NR_tgkill:
11937         ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11938                         target_to_host_signal(arg3)));
11939         break;
11940 
11941 #ifdef TARGET_NR_set_robust_list
11942     case TARGET_NR_set_robust_list:
11943     case TARGET_NR_get_robust_list:
11944         /* The ABI for supporting robust futexes has userspace pass
11945          * the kernel a pointer to a linked list which is updated by
11946          * userspace after the syscall; the list is walked by the kernel
11947          * when the thread exits. Since the linked list in QEMU guest
11948          * memory isn't a valid linked list for the host and we have
11949          * no way to reliably intercept the thread-death event, we can't
11950          * support these. Silently return ENOSYS so that guest userspace
11951          * falls back to a non-robust futex implementation (which should
11952          * be OK except in the corner case of the guest crashing while
11953          * holding a mutex that is shared with another process via
11954          * shared memory).
11955          */
11956         goto unimplemented_nowarn;
11957 #endif
11958 
11959 #if defined(TARGET_NR_utimensat)
11960     case TARGET_NR_utimensat:
11961         {
11962             struct timespec *tsp, ts[2];
11963             if (!arg3) {
11964                 tsp = NULL;
11965             } else {
11966                 target_to_host_timespec(ts, arg3);
11967                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11968                 tsp = ts;
11969             }
11970             if (!arg2)
11971                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11972             else {
11973                 if (!(p = lock_user_string(arg2))) {
11974                     ret = -TARGET_EFAULT;
11975                     goto fail;
11976                 }
11977                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11978                 unlock_user(p, arg2, 0);
11979             }
11980         }
11981 	break;
11982 #endif
11983     case TARGET_NR_futex:
11984         ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11985         break;
11986 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11987     case TARGET_NR_inotify_init:
11988         ret = get_errno(sys_inotify_init());
11989         if (ret >= 0) {
11990             fd_trans_register(ret, &target_inotify_trans);
11991         }
11992         break;
11993 #endif
11994 #ifdef CONFIG_INOTIFY1
11995 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11996     case TARGET_NR_inotify_init1:
11997         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11998                                           fcntl_flags_tbl)));
11999         if (ret >= 0) {
12000             fd_trans_register(ret, &target_inotify_trans);
12001         }
12002         break;
12003 #endif
12004 #endif
12005 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12006     case TARGET_NR_inotify_add_watch:
12007         p = lock_user_string(arg2);
12008         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12009         unlock_user(p, arg2, 0);
12010         break;
12011 #endif
12012 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12013     case TARGET_NR_inotify_rm_watch:
12014         ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
12015         break;
12016 #endif
12017 
12018 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12019     case TARGET_NR_mq_open:
12020         {
12021             struct mq_attr posix_mq_attr;
12022             struct mq_attr *pposix_mq_attr;
12023             int host_flags;
12024 
12025             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12026             pposix_mq_attr = NULL;
12027             if (arg4) {
12028                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12029                     goto efault;
12030                 }
12031                 pposix_mq_attr = &posix_mq_attr;
12032             }
12033             p = lock_user_string(arg1 - 1);
12034             if (!p) {
12035                 goto efault;
12036             }
12037             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12038             unlock_user (p, arg1, 0);
12039         }
12040         break;
12041 
12042     case TARGET_NR_mq_unlink:
12043         p = lock_user_string(arg1 - 1);
12044         if (!p) {
12045             ret = -TARGET_EFAULT;
12046             break;
12047         }
12048         ret = get_errno(mq_unlink(p));
12049         unlock_user (p, arg1, 0);
12050         break;
12051 
12052     case TARGET_NR_mq_timedsend:
12053         {
12054             struct timespec ts;
12055 
12056             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12057             if (arg5 != 0) {
12058                 target_to_host_timespec(&ts, arg5);
12059                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12060                 host_to_target_timespec(arg5, &ts);
12061             } else {
12062                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12063             }
12064             unlock_user (p, arg2, arg3);
12065         }
12066         break;
12067 
12068     case TARGET_NR_mq_timedreceive:
12069         {
12070             struct timespec ts;
12071             unsigned int prio;
12072 
12073             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12074             if (arg5 != 0) {
12075                 target_to_host_timespec(&ts, arg5);
12076                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12077                                                      &prio, &ts));
12078                 host_to_target_timespec(arg5, &ts);
12079             } else {
12080                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12081                                                      &prio, NULL));
12082             }
12083             unlock_user (p, arg2, arg3);
12084             if (arg4 != 0)
12085                 put_user_u32(prio, arg4);
12086         }
12087         break;
12088 
12089     /* Not implemented for now... */
12090 /*     case TARGET_NR_mq_notify: */
12091 /*         break; */
12092 
12093     case TARGET_NR_mq_getsetattr:
12094         {
12095             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12096             ret = 0;
12097             if (arg3 != 0) {
12098                 ret = mq_getattr(arg1, &posix_mq_attr_out);
12099                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12100             }
12101             if (arg2 != 0) {
12102                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12103                 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
12104             }
12105 
12106         }
12107         break;
12108 #endif
12109 
12110 #ifdef CONFIG_SPLICE
12111 #ifdef TARGET_NR_tee
12112     case TARGET_NR_tee:
12113         {
12114             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12115         }
12116         break;
12117 #endif
12118 #ifdef TARGET_NR_splice
12119     case TARGET_NR_splice:
12120         {
12121             loff_t loff_in, loff_out;
12122             loff_t *ploff_in = NULL, *ploff_out = NULL;
12123             if (arg2) {
12124                 if (get_user_u64(loff_in, arg2)) {
12125                     goto efault;
12126                 }
12127                 ploff_in = &loff_in;
12128             }
12129             if (arg4) {
12130                 if (get_user_u64(loff_out, arg4)) {
12131                     goto efault;
12132                 }
12133                 ploff_out = &loff_out;
12134             }
12135             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12136             if (arg2) {
12137                 if (put_user_u64(loff_in, arg2)) {
12138                     goto efault;
12139                 }
12140             }
12141             if (arg4) {
12142                 if (put_user_u64(loff_out, arg4)) {
12143                     goto efault;
12144                 }
12145             }
12146         }
12147         break;
12148 #endif
12149 #ifdef TARGET_NR_vmsplice
12150 	case TARGET_NR_vmsplice:
12151         {
12152             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12153             if (vec != NULL) {
12154                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12155                 unlock_iovec(vec, arg2, arg3, 0);
12156             } else {
12157                 ret = -host_to_target_errno(errno);
12158             }
12159         }
12160         break;
12161 #endif
12162 #endif /* CONFIG_SPLICE */
12163 #ifdef CONFIG_EVENTFD
12164 #if defined(TARGET_NR_eventfd)
12165     case TARGET_NR_eventfd:
12166         ret = get_errno(eventfd(arg1, 0));
12167         if (ret >= 0) {
12168             fd_trans_register(ret, &target_eventfd_trans);
12169         }
12170         break;
12171 #endif
12172 #if defined(TARGET_NR_eventfd2)
12173     case TARGET_NR_eventfd2:
12174     {
12175         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12176         if (arg2 & TARGET_O_NONBLOCK) {
12177             host_flags |= O_NONBLOCK;
12178         }
12179         if (arg2 & TARGET_O_CLOEXEC) {
12180             host_flags |= O_CLOEXEC;
12181         }
12182         ret = get_errno(eventfd(arg1, host_flags));
12183         if (ret >= 0) {
12184             fd_trans_register(ret, &target_eventfd_trans);
12185         }
12186         break;
12187     }
12188 #endif
12189 #endif /* CONFIG_EVENTFD  */
12190 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12191     case TARGET_NR_fallocate:
12192 #if TARGET_ABI_BITS == 32
12193         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12194                                   target_offset64(arg5, arg6)));
12195 #else
12196         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12197 #endif
12198         break;
12199 #endif
12200 #if defined(CONFIG_SYNC_FILE_RANGE)
12201 #if defined(TARGET_NR_sync_file_range)
12202     case TARGET_NR_sync_file_range:
12203 #if TARGET_ABI_BITS == 32
12204 #if defined(TARGET_MIPS)
12205         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12206                                         target_offset64(arg5, arg6), arg7));
12207 #else
12208         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12209                                         target_offset64(arg4, arg5), arg6));
12210 #endif /* !TARGET_MIPS */
12211 #else
12212         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12213 #endif
12214         break;
12215 #endif
12216 #if defined(TARGET_NR_sync_file_range2)
12217     case TARGET_NR_sync_file_range2:
12218         /* This is like sync_file_range but the arguments are reordered */
12219 #if TARGET_ABI_BITS == 32
12220         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12221                                         target_offset64(arg5, arg6), arg2));
12222 #else
12223         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12224 #endif
12225         break;
12226 #endif
12227 #endif
12228 #if defined(TARGET_NR_signalfd4)
12229     case TARGET_NR_signalfd4:
12230         ret = do_signalfd4(arg1, arg2, arg4);
12231         break;
12232 #endif
12233 #if defined(TARGET_NR_signalfd)
12234     case TARGET_NR_signalfd:
12235         ret = do_signalfd4(arg1, arg2, 0);
12236         break;
12237 #endif
12238 #if defined(CONFIG_EPOLL)
12239 #if defined(TARGET_NR_epoll_create)
12240     case TARGET_NR_epoll_create:
12241         ret = get_errno(epoll_create(arg1));
12242         break;
12243 #endif
12244 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12245     case TARGET_NR_epoll_create1:
12246         ret = get_errno(epoll_create1(arg1));
12247         break;
12248 #endif
12249 #if defined(TARGET_NR_epoll_ctl)
12250     case TARGET_NR_epoll_ctl:
12251     {
12252         struct epoll_event ep;
12253         struct epoll_event *epp = 0;
12254         if (arg4) {
12255             struct target_epoll_event *target_ep;
12256             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12257                 goto efault;
12258             }
12259             ep.events = tswap32(target_ep->events);
12260             /* The epoll_data_t union is just opaque data to the kernel,
12261              * so we transfer all 64 bits across and need not worry what
12262              * actual data type it is.
12263              */
12264             ep.data.u64 = tswap64(target_ep->data.u64);
12265             unlock_user_struct(target_ep, arg4, 0);
12266             epp = &ep;
12267         }
12268         ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12269         break;
12270     }
12271 #endif
12272 
12273 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12274 #if defined(TARGET_NR_epoll_wait)
12275     case TARGET_NR_epoll_wait:
12276 #endif
12277 #if defined(TARGET_NR_epoll_pwait)
12278     case TARGET_NR_epoll_pwait:
12279 #endif
12280     {
12281         struct target_epoll_event *target_ep;
12282         struct epoll_event *ep;
12283         int epfd = arg1;
12284         int maxevents = arg3;
12285         int timeout = arg4;
12286 
12287         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12288             ret = -TARGET_EINVAL;
12289             break;
12290         }
12291 
12292         target_ep = lock_user(VERIFY_WRITE, arg2,
12293                               maxevents * sizeof(struct target_epoll_event), 1);
12294         if (!target_ep) {
12295             goto efault;
12296         }
12297 
12298         ep = g_try_new(struct epoll_event, maxevents);
12299         if (!ep) {
12300             unlock_user(target_ep, arg2, 0);
12301             ret = -TARGET_ENOMEM;
12302             break;
12303         }
12304 
12305         switch (num) {
12306 #if defined(TARGET_NR_epoll_pwait)
12307         case TARGET_NR_epoll_pwait:
12308         {
12309             target_sigset_t *target_set;
12310             sigset_t _set, *set = &_set;
12311 
12312             if (arg5) {
12313                 if (arg6 != sizeof(target_sigset_t)) {
12314                     ret = -TARGET_EINVAL;
12315                     break;
12316                 }
12317 
12318                 target_set = lock_user(VERIFY_READ, arg5,
12319                                        sizeof(target_sigset_t), 1);
12320                 if (!target_set) {
12321                     ret = -TARGET_EFAULT;
12322                     break;
12323                 }
12324                 target_to_host_sigset(set, target_set);
12325                 unlock_user(target_set, arg5, 0);
12326             } else {
12327                 set = NULL;
12328             }
12329 
12330             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12331                                              set, SIGSET_T_SIZE));
12332             break;
12333         }
12334 #endif
12335 #if defined(TARGET_NR_epoll_wait)
12336         case TARGET_NR_epoll_wait:
12337             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12338                                              NULL, 0));
12339             break;
12340 #endif
12341         default:
12342             ret = -TARGET_ENOSYS;
12343         }
12344         if (!is_error(ret)) {
12345             int i;
12346             for (i = 0; i < ret; i++) {
12347                 target_ep[i].events = tswap32(ep[i].events);
12348                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12349             }
12350             unlock_user(target_ep, arg2,
12351                         ret * sizeof(struct target_epoll_event));
12352         } else {
12353             unlock_user(target_ep, arg2, 0);
12354         }
12355         g_free(ep);
12356         break;
12357     }
12358 #endif
12359 #endif
12360 #ifdef TARGET_NR_prlimit64
12361     case TARGET_NR_prlimit64:
12362     {
12363         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12364         struct target_rlimit64 *target_rnew, *target_rold;
12365         struct host_rlimit64 rnew, rold, *rnewp = 0;
12366         int resource = target_to_host_resource(arg2);
12367         if (arg3) {
12368             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12369                 goto efault;
12370             }
12371             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12372             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12373             unlock_user_struct(target_rnew, arg3, 0);
12374             rnewp = &rnew;
12375         }
12376 
12377         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12378         if (!is_error(ret) && arg4) {
12379             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12380                 goto efault;
12381             }
12382             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12383             target_rold->rlim_max = tswap64(rold.rlim_max);
12384             unlock_user_struct(target_rold, arg4, 1);
12385         }
12386         break;
12387     }
12388 #endif
12389 #ifdef TARGET_NR_gethostname
12390     case TARGET_NR_gethostname:
12391     {
12392         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12393         if (name) {
12394             ret = get_errno(gethostname(name, arg2));
12395             unlock_user(name, arg1, arg2);
12396         } else {
12397             ret = -TARGET_EFAULT;
12398         }
12399         break;
12400     }
12401 #endif
12402 #ifdef TARGET_NR_atomic_cmpxchg_32
12403     case TARGET_NR_atomic_cmpxchg_32:
12404     {
12405         /* should use start_exclusive from main.c */
12406         abi_ulong mem_value;
12407         if (get_user_u32(mem_value, arg6)) {
12408             target_siginfo_t info;
12409             info.si_signo = SIGSEGV;
12410             info.si_errno = 0;
12411             info.si_code = TARGET_SEGV_MAPERR;
12412             info._sifields._sigfault._addr = arg6;
12413             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12414                          QEMU_SI_FAULT, &info);
12415             ret = 0xdeadbeef;
12416 
12417         }
12418         if (mem_value == arg2)
12419             put_user_u32(arg1, arg6);
12420         ret = mem_value;
12421         break;
12422     }
12423 #endif
12424 #ifdef TARGET_NR_atomic_barrier
12425     case TARGET_NR_atomic_barrier:
12426     {
12427         /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12428         ret = 0;
12429         break;
12430     }
12431 #endif
12432 
12433 #ifdef TARGET_NR_timer_create
12434     case TARGET_NR_timer_create:
12435     {
12436         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12437 
12438         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12439 
12440         int clkid = arg1;
12441         int timer_index = next_free_host_timer();
12442 
12443         if (timer_index < 0) {
12444             ret = -TARGET_EAGAIN;
12445         } else {
12446             timer_t *phtimer = g_posix_timers  + timer_index;
12447 
12448             if (arg2) {
12449                 phost_sevp = &host_sevp;
12450                 ret = target_to_host_sigevent(phost_sevp, arg2);
12451                 if (ret != 0) {
12452                     break;
12453                 }
12454             }
12455 
12456             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12457             if (ret) {
12458                 phtimer = NULL;
12459             } else {
12460                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12461                     goto efault;
12462                 }
12463             }
12464         }
12465         break;
12466     }
12467 #endif
12468 
12469 #ifdef TARGET_NR_timer_settime
12470     case TARGET_NR_timer_settime:
12471     {
12472         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12473          * struct itimerspec * old_value */
12474         target_timer_t timerid = get_timer_id(arg1);
12475 
12476         if (timerid < 0) {
12477             ret = timerid;
12478         } else if (arg3 == 0) {
12479             ret = -TARGET_EINVAL;
12480         } else {
12481             timer_t htimer = g_posix_timers[timerid];
12482             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12483 
12484             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12485                 goto efault;
12486             }
12487             ret = get_errno(
12488                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12489             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12490                 goto efault;
12491             }
12492         }
12493         break;
12494     }
12495 #endif
12496 
12497 #ifdef TARGET_NR_timer_gettime
12498     case TARGET_NR_timer_gettime:
12499     {
12500         /* args: timer_t timerid, struct itimerspec *curr_value */
12501         target_timer_t timerid = get_timer_id(arg1);
12502 
12503         if (timerid < 0) {
12504             ret = timerid;
12505         } else if (!arg2) {
12506             ret = -TARGET_EFAULT;
12507         } else {
12508             timer_t htimer = g_posix_timers[timerid];
12509             struct itimerspec hspec;
12510             ret = get_errno(timer_gettime(htimer, &hspec));
12511 
12512             if (host_to_target_itimerspec(arg2, &hspec)) {
12513                 ret = -TARGET_EFAULT;
12514             }
12515         }
12516         break;
12517     }
12518 #endif
12519 
12520 #ifdef TARGET_NR_timer_getoverrun
12521     case TARGET_NR_timer_getoverrun:
12522     {
12523         /* args: timer_t timerid */
12524         target_timer_t timerid = get_timer_id(arg1);
12525 
12526         if (timerid < 0) {
12527             ret = timerid;
12528         } else {
12529             timer_t htimer = g_posix_timers[timerid];
12530             ret = get_errno(timer_getoverrun(htimer));
12531         }
12532         fd_trans_unregister(ret);
12533         break;
12534     }
12535 #endif
12536 
12537 #ifdef TARGET_NR_timer_delete
12538     case TARGET_NR_timer_delete:
12539     {
12540         /* args: timer_t timerid */
12541         target_timer_t timerid = get_timer_id(arg1);
12542 
12543         if (timerid < 0) {
12544             ret = timerid;
12545         } else {
12546             timer_t htimer = g_posix_timers[timerid];
12547             ret = get_errno(timer_delete(htimer));
12548             g_posix_timers[timerid] = 0;
12549         }
12550         break;
12551     }
12552 #endif
12553 
12554 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12555     case TARGET_NR_timerfd_create:
12556         ret = get_errno(timerfd_create(arg1,
12557                 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12558         break;
12559 #endif
12560 
12561 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12562     case TARGET_NR_timerfd_gettime:
12563         {
12564             struct itimerspec its_curr;
12565 
12566             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12567 
12568             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12569                 goto efault;
12570             }
12571         }
12572         break;
12573 #endif
12574 
12575 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12576     case TARGET_NR_timerfd_settime:
12577         {
12578             struct itimerspec its_new, its_old, *p_new;
12579 
12580             if (arg3) {
12581                 if (target_to_host_itimerspec(&its_new, arg3)) {
12582                     goto efault;
12583                 }
12584                 p_new = &its_new;
12585             } else {
12586                 p_new = NULL;
12587             }
12588 
12589             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12590 
12591             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12592                 goto efault;
12593             }
12594         }
12595         break;
12596 #endif
12597 
12598 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12599     case TARGET_NR_ioprio_get:
12600         ret = get_errno(ioprio_get(arg1, arg2));
12601         break;
12602 #endif
12603 
12604 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12605     case TARGET_NR_ioprio_set:
12606         ret = get_errno(ioprio_set(arg1, arg2, arg3));
12607         break;
12608 #endif
12609 
12610 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12611     case TARGET_NR_setns:
12612         ret = get_errno(setns(arg1, arg2));
12613         break;
12614 #endif
12615 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12616     case TARGET_NR_unshare:
12617         ret = get_errno(unshare(arg1));
12618         break;
12619 #endif
12620 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12621     case TARGET_NR_kcmp:
12622         ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12623         break;
12624 #endif
12625 
12626     default:
12627     unimplemented:
12628         gemu_log("qemu: Unsupported syscall: %d\n", num);
12629 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12630     unimplemented_nowarn:
12631 #endif
12632         ret = -TARGET_ENOSYS;
12633         break;
12634     }
12635 fail:
12636 #ifdef DEBUG
12637     gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12638 #endif
12639     if(do_strace)
12640         print_syscall_ret(num, ret);
12641     trace_guest_user_syscall_ret(cpu, num, ret);
12642     return ret;
12643 efault:
12644     ret = -TARGET_EFAULT;
12645     goto fail;
12646 }
12647