xref: /openbmc/qemu/linux-user/syscall.c (revision 43a47399)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77 
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84 
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
112 
113 #include "qemu.h"
114 
115 #ifndef CLONE_IO
116 #define CLONE_IO                0x80000000      /* Clone io context */
117 #endif
118 
119 /* We can't directly call the host clone syscall, because this will
120  * badly confuse libc (breaking mutexes, for example). So we must
121  * divide clone flags into:
122  *  * flag combinations that look like pthread_create()
123  *  * flag combinations that look like fork()
124  *  * flags we can implement within QEMU itself
125  *  * flags we can't support and will return an error for
126  */
127 /* For thread creation, all these flags must be present; for
128  * fork, none must be present.
129  */
130 #define CLONE_THREAD_FLAGS                              \
131     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
132      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
133 
134 /* These flags are ignored:
135  * CLONE_DETACHED is now ignored by the kernel;
136  * CLONE_IO is just an optimisation hint to the I/O scheduler
137  */
138 #define CLONE_IGNORED_FLAGS                     \
139     (CLONE_DETACHED | CLONE_IO)
140 
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS               \
143     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
144      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
145 
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
148     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
149      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
150 
151 #define CLONE_INVALID_FORK_FLAGS                                        \
152     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
153 
154 #define CLONE_INVALID_THREAD_FLAGS                                      \
155     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
156        CLONE_IGNORED_FLAGS))
157 
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159  * have almost all been allocated. We cannot support any of
160  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162  * The checks against the invalid thread masks above will catch these.
163  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
164  */
165 
166 //#define DEBUG
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168  * once. This exercises the codepaths for restart.
169  */
170 //#define DEBUG_ERESTARTSYS
171 
172 //#include <linux/msdos_fs.h>
173 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
174 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
175 
176 #undef _syscall0
177 #undef _syscall1
178 #undef _syscall2
179 #undef _syscall3
180 #undef _syscall4
181 #undef _syscall5
182 #undef _syscall6
183 
184 #define _syscall0(type,name)		\
185 static type name (void)			\
186 {					\
187 	return syscall(__NR_##name);	\
188 }
189 
190 #define _syscall1(type,name,type1,arg1)		\
191 static type name (type1 arg1)			\
192 {						\
193 	return syscall(__NR_##name, arg1);	\
194 }
195 
196 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
197 static type name (type1 arg1,type2 arg2)		\
198 {							\
199 	return syscall(__NR_##name, arg1, arg2);	\
200 }
201 
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
203 static type name (type1 arg1,type2 arg2,type3 arg3)		\
204 {								\
205 	return syscall(__NR_##name, arg1, arg2, arg3);		\
206 }
207 
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
210 {										\
211 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
212 }
213 
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
215 		  type5,arg5)							\
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
217 {										\
218 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
219 }
220 
221 
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
223 		  type5,arg5,type6,arg6)					\
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
225                   type6 arg6)							\
226 {										\
227 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
228 }
229 
230 
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
243 
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
246 #endif
247 
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
251 #endif
252 
253 #ifdef __NR_gettid
254 _syscall0(int, gettid)
255 #else
256 /* This is a replacement for the host gettid() and must return a host
257    errno. */
258 static int gettid(void) {
259     return -ENOSYS;
260 }
261 #endif
262 
263 /* For the 64-bit guest on 32-bit host case we must emulate
264  * getdents using getdents64, because otherwise the host
265  * might hand us back more dirent records than we can fit
266  * into the guest buffer after structure format conversion.
267  * Otherwise we emulate getdents with getdents if the host has it.
268  */
269 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
270 #define EMULATE_GETDENTS_WITH_GETDENTS
271 #endif
272 
273 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
274 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
275 #endif
276 #if (defined(TARGET_NR_getdents) && \
277       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
278     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
279 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
280 #endif
281 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
282 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
283           loff_t *, res, uint, wh);
284 #endif
285 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
286 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
287           siginfo_t *, uinfo)
288 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
289 #ifdef __NR_exit_group
290 _syscall1(int,exit_group,int,error_code)
291 #endif
292 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
293 _syscall1(int,set_tid_address,int *,tidptr)
294 #endif
295 #if defined(TARGET_NR_futex) && defined(__NR_futex)
296 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
297           const struct timespec *,timeout,int *,uaddr2,int,val3)
298 #endif
299 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
300 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
301           unsigned long *, user_mask_ptr);
302 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
303 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
304           unsigned long *, user_mask_ptr);
305 #define __NR_sys_getcpu __NR_getcpu
306 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
307 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
308           void *, arg);
309 _syscall2(int, capget, struct __user_cap_header_struct *, header,
310           struct __user_cap_data_struct *, data);
311 _syscall2(int, capset, struct __user_cap_header_struct *, header,
312           struct __user_cap_data_struct *, data);
313 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
314 _syscall2(int, ioprio_get, int, which, int, who)
315 #endif
316 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
317 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
318 #endif
319 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
320 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
321 #endif
322 
323 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
324 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
325           unsigned long, idx1, unsigned long, idx2)
326 #endif
327 
328 static bitmask_transtbl fcntl_flags_tbl[] = {
329   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
330   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
331   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
332   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
333   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
334   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
335   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
336   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
337   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
338   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
339   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
340   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
341   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
342 #if defined(O_DIRECT)
343   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
344 #endif
345 #if defined(O_NOATIME)
346   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
347 #endif
348 #if defined(O_CLOEXEC)
349   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
350 #endif
351 #if defined(O_PATH)
352   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
353 #endif
354 #if defined(O_TMPFILE)
355   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
356 #endif
357   /* Don't terminate the list prematurely on 64-bit host+guest.  */
358 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
359   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
360 #endif
361   { 0, 0, 0, 0 }
362 };
363 
364 enum {
365     QEMU_IFLA_BR_UNSPEC,
366     QEMU_IFLA_BR_FORWARD_DELAY,
367     QEMU_IFLA_BR_HELLO_TIME,
368     QEMU_IFLA_BR_MAX_AGE,
369     QEMU_IFLA_BR_AGEING_TIME,
370     QEMU_IFLA_BR_STP_STATE,
371     QEMU_IFLA_BR_PRIORITY,
372     QEMU_IFLA_BR_VLAN_FILTERING,
373     QEMU_IFLA_BR_VLAN_PROTOCOL,
374     QEMU_IFLA_BR_GROUP_FWD_MASK,
375     QEMU_IFLA_BR_ROOT_ID,
376     QEMU_IFLA_BR_BRIDGE_ID,
377     QEMU_IFLA_BR_ROOT_PORT,
378     QEMU_IFLA_BR_ROOT_PATH_COST,
379     QEMU_IFLA_BR_TOPOLOGY_CHANGE,
380     QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
381     QEMU_IFLA_BR_HELLO_TIMER,
382     QEMU_IFLA_BR_TCN_TIMER,
383     QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
384     QEMU_IFLA_BR_GC_TIMER,
385     QEMU_IFLA_BR_GROUP_ADDR,
386     QEMU_IFLA_BR_FDB_FLUSH,
387     QEMU_IFLA_BR_MCAST_ROUTER,
388     QEMU_IFLA_BR_MCAST_SNOOPING,
389     QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
390     QEMU_IFLA_BR_MCAST_QUERIER,
391     QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
392     QEMU_IFLA_BR_MCAST_HASH_MAX,
393     QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
394     QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
395     QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
396     QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
397     QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
398     QEMU_IFLA_BR_MCAST_QUERY_INTVL,
399     QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
400     QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
401     QEMU_IFLA_BR_NF_CALL_IPTABLES,
402     QEMU_IFLA_BR_NF_CALL_IP6TABLES,
403     QEMU_IFLA_BR_NF_CALL_ARPTABLES,
404     QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
405     QEMU_IFLA_BR_PAD,
406     QEMU_IFLA_BR_VLAN_STATS_ENABLED,
407     QEMU_IFLA_BR_MCAST_STATS_ENABLED,
408     QEMU_IFLA_BR_MCAST_IGMP_VERSION,
409     QEMU_IFLA_BR_MCAST_MLD_VERSION,
410     QEMU___IFLA_BR_MAX,
411 };
412 
413 enum {
414     QEMU_IFLA_UNSPEC,
415     QEMU_IFLA_ADDRESS,
416     QEMU_IFLA_BROADCAST,
417     QEMU_IFLA_IFNAME,
418     QEMU_IFLA_MTU,
419     QEMU_IFLA_LINK,
420     QEMU_IFLA_QDISC,
421     QEMU_IFLA_STATS,
422     QEMU_IFLA_COST,
423     QEMU_IFLA_PRIORITY,
424     QEMU_IFLA_MASTER,
425     QEMU_IFLA_WIRELESS,
426     QEMU_IFLA_PROTINFO,
427     QEMU_IFLA_TXQLEN,
428     QEMU_IFLA_MAP,
429     QEMU_IFLA_WEIGHT,
430     QEMU_IFLA_OPERSTATE,
431     QEMU_IFLA_LINKMODE,
432     QEMU_IFLA_LINKINFO,
433     QEMU_IFLA_NET_NS_PID,
434     QEMU_IFLA_IFALIAS,
435     QEMU_IFLA_NUM_VF,
436     QEMU_IFLA_VFINFO_LIST,
437     QEMU_IFLA_STATS64,
438     QEMU_IFLA_VF_PORTS,
439     QEMU_IFLA_PORT_SELF,
440     QEMU_IFLA_AF_SPEC,
441     QEMU_IFLA_GROUP,
442     QEMU_IFLA_NET_NS_FD,
443     QEMU_IFLA_EXT_MASK,
444     QEMU_IFLA_PROMISCUITY,
445     QEMU_IFLA_NUM_TX_QUEUES,
446     QEMU_IFLA_NUM_RX_QUEUES,
447     QEMU_IFLA_CARRIER,
448     QEMU_IFLA_PHYS_PORT_ID,
449     QEMU_IFLA_CARRIER_CHANGES,
450     QEMU_IFLA_PHYS_SWITCH_ID,
451     QEMU_IFLA_LINK_NETNSID,
452     QEMU_IFLA_PHYS_PORT_NAME,
453     QEMU_IFLA_PROTO_DOWN,
454     QEMU_IFLA_GSO_MAX_SEGS,
455     QEMU_IFLA_GSO_MAX_SIZE,
456     QEMU_IFLA_PAD,
457     QEMU_IFLA_XDP,
458     QEMU_IFLA_EVENT,
459     QEMU_IFLA_NEW_NETNSID,
460     QEMU_IFLA_IF_NETNSID,
461     QEMU_IFLA_CARRIER_UP_COUNT,
462     QEMU_IFLA_CARRIER_DOWN_COUNT,
463     QEMU_IFLA_NEW_IFINDEX,
464     QEMU___IFLA_MAX
465 };
466 
467 enum {
468     QEMU_IFLA_BRPORT_UNSPEC,
469     QEMU_IFLA_BRPORT_STATE,
470     QEMU_IFLA_BRPORT_PRIORITY,
471     QEMU_IFLA_BRPORT_COST,
472     QEMU_IFLA_BRPORT_MODE,
473     QEMU_IFLA_BRPORT_GUARD,
474     QEMU_IFLA_BRPORT_PROTECT,
475     QEMU_IFLA_BRPORT_FAST_LEAVE,
476     QEMU_IFLA_BRPORT_LEARNING,
477     QEMU_IFLA_BRPORT_UNICAST_FLOOD,
478     QEMU_IFLA_BRPORT_PROXYARP,
479     QEMU_IFLA_BRPORT_LEARNING_SYNC,
480     QEMU_IFLA_BRPORT_PROXYARP_WIFI,
481     QEMU_IFLA_BRPORT_ROOT_ID,
482     QEMU_IFLA_BRPORT_BRIDGE_ID,
483     QEMU_IFLA_BRPORT_DESIGNATED_PORT,
484     QEMU_IFLA_BRPORT_DESIGNATED_COST,
485     QEMU_IFLA_BRPORT_ID,
486     QEMU_IFLA_BRPORT_NO,
487     QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
488     QEMU_IFLA_BRPORT_CONFIG_PENDING,
489     QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
490     QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
491     QEMU_IFLA_BRPORT_HOLD_TIMER,
492     QEMU_IFLA_BRPORT_FLUSH,
493     QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
494     QEMU_IFLA_BRPORT_PAD,
495     QEMU_IFLA_BRPORT_MCAST_FLOOD,
496     QEMU_IFLA_BRPORT_MCAST_TO_UCAST,
497     QEMU_IFLA_BRPORT_VLAN_TUNNEL,
498     QEMU_IFLA_BRPORT_BCAST_FLOOD,
499     QEMU_IFLA_BRPORT_GROUP_FWD_MASK,
500     QEMU_IFLA_BRPORT_NEIGH_SUPPRESS,
501     QEMU___IFLA_BRPORT_MAX
502 };
503 
504 enum {
505     QEMU_IFLA_INFO_UNSPEC,
506     QEMU_IFLA_INFO_KIND,
507     QEMU_IFLA_INFO_DATA,
508     QEMU_IFLA_INFO_XSTATS,
509     QEMU_IFLA_INFO_SLAVE_KIND,
510     QEMU_IFLA_INFO_SLAVE_DATA,
511     QEMU___IFLA_INFO_MAX,
512 };
513 
514 enum {
515     QEMU_IFLA_INET_UNSPEC,
516     QEMU_IFLA_INET_CONF,
517     QEMU___IFLA_INET_MAX,
518 };
519 
520 enum {
521     QEMU_IFLA_INET6_UNSPEC,
522     QEMU_IFLA_INET6_FLAGS,
523     QEMU_IFLA_INET6_CONF,
524     QEMU_IFLA_INET6_STATS,
525     QEMU_IFLA_INET6_MCAST,
526     QEMU_IFLA_INET6_CACHEINFO,
527     QEMU_IFLA_INET6_ICMP6STATS,
528     QEMU_IFLA_INET6_TOKEN,
529     QEMU_IFLA_INET6_ADDR_GEN_MODE,
530     QEMU___IFLA_INET6_MAX
531 };
532 
533 enum {
534     QEMU_IFLA_XDP_UNSPEC,
535     QEMU_IFLA_XDP_FD,
536     QEMU_IFLA_XDP_ATTACHED,
537     QEMU_IFLA_XDP_FLAGS,
538     QEMU_IFLA_XDP_PROG_ID,
539     QEMU___IFLA_XDP_MAX,
540 };
541 
542 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
543 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
544 typedef struct TargetFdTrans {
545     TargetFdDataFunc host_to_target_data;
546     TargetFdDataFunc target_to_host_data;
547     TargetFdAddrFunc target_to_host_addr;
548 } TargetFdTrans;
549 
550 static TargetFdTrans **target_fd_trans;
551 
552 static unsigned int target_fd_max;
553 
554 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
555 {
556     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
557         return target_fd_trans[fd]->target_to_host_data;
558     }
559     return NULL;
560 }
561 
562 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
563 {
564     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
565         return target_fd_trans[fd]->host_to_target_data;
566     }
567     return NULL;
568 }
569 
570 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
571 {
572     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
573         return target_fd_trans[fd]->target_to_host_addr;
574     }
575     return NULL;
576 }
577 
578 static void fd_trans_register(int fd, TargetFdTrans *trans)
579 {
580     unsigned int oldmax;
581 
582     if (fd >= target_fd_max) {
583         oldmax = target_fd_max;
584         target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
585         target_fd_trans = g_renew(TargetFdTrans *,
586                                   target_fd_trans, target_fd_max);
587         memset((void *)(target_fd_trans + oldmax), 0,
588                (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
589     }
590     target_fd_trans[fd] = trans;
591 }
592 
593 static void fd_trans_unregister(int fd)
594 {
595     if (fd >= 0 && fd < target_fd_max) {
596         target_fd_trans[fd] = NULL;
597     }
598 }
599 
600 static void fd_trans_dup(int oldfd, int newfd)
601 {
602     fd_trans_unregister(newfd);
603     if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
604         fd_trans_register(newfd, target_fd_trans[oldfd]);
605     }
606 }
607 
608 static int sys_getcwd1(char *buf, size_t size)
609 {
610   if (getcwd(buf, size) == NULL) {
611       /* getcwd() sets errno */
612       return (-1);
613   }
614   return strlen(buf)+1;
615 }
616 
617 #ifdef TARGET_NR_utimensat
618 #if defined(__NR_utimensat)
619 #define __NR_sys_utimensat __NR_utimensat
620 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
621           const struct timespec *,tsp,int,flags)
622 #else
623 static int sys_utimensat(int dirfd, const char *pathname,
624                          const struct timespec times[2], int flags)
625 {
626     errno = ENOSYS;
627     return -1;
628 }
629 #endif
630 #endif /* TARGET_NR_utimensat */
631 
632 #ifdef TARGET_NR_renameat2
633 #if defined(__NR_renameat2)
634 #define __NR_sys_renameat2 __NR_renameat2
635 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
636           const char *, new, unsigned int, flags)
637 #else
638 static int sys_renameat2(int oldfd, const char *old,
639                          int newfd, const char *new, int flags)
640 {
641     if (flags == 0) {
642         return renameat(oldfd, old, newfd, new);
643     }
644     errno = ENOSYS;
645     return -1;
646 }
647 #endif
648 #endif /* TARGET_NR_renameat2 */
649 
650 #ifdef CONFIG_INOTIFY
651 #include <sys/inotify.h>
652 
653 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
654 static int sys_inotify_init(void)
655 {
656   return (inotify_init());
657 }
658 #endif
659 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
660 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
661 {
662   return (inotify_add_watch(fd, pathname, mask));
663 }
664 #endif
665 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
666 static int sys_inotify_rm_watch(int fd, int32_t wd)
667 {
668   return (inotify_rm_watch(fd, wd));
669 }
670 #endif
671 #ifdef CONFIG_INOTIFY1
672 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
673 static int sys_inotify_init1(int flags)
674 {
675   return (inotify_init1(flags));
676 }
677 #endif
678 #endif
679 #else
680 /* Userspace can usually survive runtime without inotify */
681 #undef TARGET_NR_inotify_init
682 #undef TARGET_NR_inotify_init1
683 #undef TARGET_NR_inotify_add_watch
684 #undef TARGET_NR_inotify_rm_watch
685 #endif /* CONFIG_INOTIFY  */
686 
687 #if defined(TARGET_NR_prlimit64)
688 #ifndef __NR_prlimit64
689 # define __NR_prlimit64 -1
690 #endif
691 #define __NR_sys_prlimit64 __NR_prlimit64
692 /* The glibc rlimit structure may not be that used by the underlying syscall */
693 struct host_rlimit64 {
694     uint64_t rlim_cur;
695     uint64_t rlim_max;
696 };
697 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
698           const struct host_rlimit64 *, new_limit,
699           struct host_rlimit64 *, old_limit)
700 #endif
701 
702 
703 #if defined(TARGET_NR_timer_create)
704 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
705 static timer_t g_posix_timers[32] = { 0, } ;
706 
707 static inline int next_free_host_timer(void)
708 {
709     int k ;
710     /* FIXME: Does finding the next free slot require a lock? */
711     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
712         if (g_posix_timers[k] == 0) {
713             g_posix_timers[k] = (timer_t) 1;
714             return k;
715         }
716     }
717     return -1;
718 }
719 #endif
720 
721 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
722 #ifdef TARGET_ARM
723 static inline int regpairs_aligned(void *cpu_env, int num)
724 {
725     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
726 }
727 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
728 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
729 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
730 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
731  * of registers which translates to the same as ARM/MIPS, because we start with
732  * r3 as arg1 */
733 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
734 #elif defined(TARGET_SH4)
735 /* SH4 doesn't align register pairs, except for p{read,write}64 */
736 static inline int regpairs_aligned(void *cpu_env, int num)
737 {
738     switch (num) {
739     case TARGET_NR_pread64:
740     case TARGET_NR_pwrite64:
741         return 1;
742 
743     default:
744         return 0;
745     }
746 }
747 #elif defined(TARGET_XTENSA)
748 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
749 #else
750 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
751 #endif
752 
753 #define ERRNO_TABLE_SIZE 1200
754 
755 /* target_to_host_errno_table[] is initialized from
756  * host_to_target_errno_table[] in syscall_init(). */
757 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
758 };
759 
760 /*
761  * This list is the union of errno values overridden in asm-<arch>/errno.h
762  * minus the errnos that are not actually generic to all archs.
763  */
764 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
765     [EAGAIN]		= TARGET_EAGAIN,
766     [EIDRM]		= TARGET_EIDRM,
767     [ECHRNG]		= TARGET_ECHRNG,
768     [EL2NSYNC]		= TARGET_EL2NSYNC,
769     [EL3HLT]		= TARGET_EL3HLT,
770     [EL3RST]		= TARGET_EL3RST,
771     [ELNRNG]		= TARGET_ELNRNG,
772     [EUNATCH]		= TARGET_EUNATCH,
773     [ENOCSI]		= TARGET_ENOCSI,
774     [EL2HLT]		= TARGET_EL2HLT,
775     [EDEADLK]		= TARGET_EDEADLK,
776     [ENOLCK]		= TARGET_ENOLCK,
777     [EBADE]		= TARGET_EBADE,
778     [EBADR]		= TARGET_EBADR,
779     [EXFULL]		= TARGET_EXFULL,
780     [ENOANO]		= TARGET_ENOANO,
781     [EBADRQC]		= TARGET_EBADRQC,
782     [EBADSLT]		= TARGET_EBADSLT,
783     [EBFONT]		= TARGET_EBFONT,
784     [ENOSTR]		= TARGET_ENOSTR,
785     [ENODATA]		= TARGET_ENODATA,
786     [ETIME]		= TARGET_ETIME,
787     [ENOSR]		= TARGET_ENOSR,
788     [ENONET]		= TARGET_ENONET,
789     [ENOPKG]		= TARGET_ENOPKG,
790     [EREMOTE]		= TARGET_EREMOTE,
791     [ENOLINK]		= TARGET_ENOLINK,
792     [EADV]		= TARGET_EADV,
793     [ESRMNT]		= TARGET_ESRMNT,
794     [ECOMM]		= TARGET_ECOMM,
795     [EPROTO]		= TARGET_EPROTO,
796     [EDOTDOT]		= TARGET_EDOTDOT,
797     [EMULTIHOP]		= TARGET_EMULTIHOP,
798     [EBADMSG]		= TARGET_EBADMSG,
799     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
800     [EOVERFLOW]		= TARGET_EOVERFLOW,
801     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
802     [EBADFD]		= TARGET_EBADFD,
803     [EREMCHG]		= TARGET_EREMCHG,
804     [ELIBACC]		= TARGET_ELIBACC,
805     [ELIBBAD]		= TARGET_ELIBBAD,
806     [ELIBSCN]		= TARGET_ELIBSCN,
807     [ELIBMAX]		= TARGET_ELIBMAX,
808     [ELIBEXEC]		= TARGET_ELIBEXEC,
809     [EILSEQ]		= TARGET_EILSEQ,
810     [ENOSYS]		= TARGET_ENOSYS,
811     [ELOOP]		= TARGET_ELOOP,
812     [ERESTART]		= TARGET_ERESTART,
813     [ESTRPIPE]		= TARGET_ESTRPIPE,
814     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
815     [EUSERS]		= TARGET_EUSERS,
816     [ENOTSOCK]		= TARGET_ENOTSOCK,
817     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
818     [EMSGSIZE]		= TARGET_EMSGSIZE,
819     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
820     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
821     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
822     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
823     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
824     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
825     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
826     [EADDRINUSE]	= TARGET_EADDRINUSE,
827     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
828     [ENETDOWN]		= TARGET_ENETDOWN,
829     [ENETUNREACH]	= TARGET_ENETUNREACH,
830     [ENETRESET]		= TARGET_ENETRESET,
831     [ECONNABORTED]	= TARGET_ECONNABORTED,
832     [ECONNRESET]	= TARGET_ECONNRESET,
833     [ENOBUFS]		= TARGET_ENOBUFS,
834     [EISCONN]		= TARGET_EISCONN,
835     [ENOTCONN]		= TARGET_ENOTCONN,
836     [EUCLEAN]		= TARGET_EUCLEAN,
837     [ENOTNAM]		= TARGET_ENOTNAM,
838     [ENAVAIL]		= TARGET_ENAVAIL,
839     [EISNAM]		= TARGET_EISNAM,
840     [EREMOTEIO]		= TARGET_EREMOTEIO,
841     [EDQUOT]            = TARGET_EDQUOT,
842     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
843     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
844     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
845     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
846     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
847     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
848     [EALREADY]		= TARGET_EALREADY,
849     [EINPROGRESS]	= TARGET_EINPROGRESS,
850     [ESTALE]		= TARGET_ESTALE,
851     [ECANCELED]		= TARGET_ECANCELED,
852     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
853     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
854 #ifdef ENOKEY
855     [ENOKEY]		= TARGET_ENOKEY,
856 #endif
857 #ifdef EKEYEXPIRED
858     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
859 #endif
860 #ifdef EKEYREVOKED
861     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
862 #endif
863 #ifdef EKEYREJECTED
864     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
865 #endif
866 #ifdef EOWNERDEAD
867     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
868 #endif
869 #ifdef ENOTRECOVERABLE
870     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
871 #endif
872 #ifdef ENOMSG
873     [ENOMSG]            = TARGET_ENOMSG,
874 #endif
875 #ifdef ERKFILL
876     [ERFKILL]           = TARGET_ERFKILL,
877 #endif
878 #ifdef EHWPOISON
879     [EHWPOISON]         = TARGET_EHWPOISON,
880 #endif
881 };
882 
883 static inline int host_to_target_errno(int err)
884 {
885     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
886         host_to_target_errno_table[err]) {
887         return host_to_target_errno_table[err];
888     }
889     return err;
890 }
891 
892 static inline int target_to_host_errno(int err)
893 {
894     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
895         target_to_host_errno_table[err]) {
896         return target_to_host_errno_table[err];
897     }
898     return err;
899 }
900 
901 static inline abi_long get_errno(abi_long ret)
902 {
903     if (ret == -1)
904         return -host_to_target_errno(errno);
905     else
906         return ret;
907 }
908 
909 const char *target_strerror(int err)
910 {
911     if (err == TARGET_ERESTARTSYS) {
912         return "To be restarted";
913     }
914     if (err == TARGET_QEMU_ESIGRETURN) {
915         return "Successful exit from sigreturn";
916     }
917 
918     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
919         return NULL;
920     }
921     return strerror(target_to_host_errno(err));
922 }
923 
924 #define safe_syscall0(type, name) \
925 static type safe_##name(void) \
926 { \
927     return safe_syscall(__NR_##name); \
928 }
929 
930 #define safe_syscall1(type, name, type1, arg1) \
931 static type safe_##name(type1 arg1) \
932 { \
933     return safe_syscall(__NR_##name, arg1); \
934 }
935 
936 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
937 static type safe_##name(type1 arg1, type2 arg2) \
938 { \
939     return safe_syscall(__NR_##name, arg1, arg2); \
940 }
941 
942 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
943 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
944 { \
945     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
946 }
947 
948 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
949     type4, arg4) \
950 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
951 { \
952     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
953 }
954 
955 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
956     type4, arg4, type5, arg5) \
957 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
958     type5 arg5) \
959 { \
960     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
961 }
962 
963 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
964     type4, arg4, type5, arg5, type6, arg6) \
965 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
966     type5 arg5, type6 arg6) \
967 { \
968     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
969 }
970 
971 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
972 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
973 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
974               int, flags, mode_t, mode)
975 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
976               struct rusage *, rusage)
977 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
978               int, options, struct rusage *, rusage)
979 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
980 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
981               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
982 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
983               struct timespec *, tsp, const sigset_t *, sigmask,
984               size_t, sigsetsize)
985 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
986               int, maxevents, int, timeout, const sigset_t *, sigmask,
987               size_t, sigsetsize)
988 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
989               const struct timespec *,timeout,int *,uaddr2,int,val3)
990 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
991 safe_syscall2(int, kill, pid_t, pid, int, sig)
992 safe_syscall2(int, tkill, int, tid, int, sig)
993 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
994 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
995 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
996 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
997               unsigned long, pos_l, unsigned long, pos_h)
998 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
999               unsigned long, pos_l, unsigned long, pos_h)
1000 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
1001               socklen_t, addrlen)
1002 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
1003               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
1004 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
1005               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
1006 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
1007 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
1008 safe_syscall2(int, flock, int, fd, int, operation)
1009 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
1010               const struct timespec *, uts, size_t, sigsetsize)
1011 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
1012               int, flags)
1013 safe_syscall2(int, nanosleep, const struct timespec *, req,
1014               struct timespec *, rem)
1015 #ifdef TARGET_NR_clock_nanosleep
1016 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
1017               const struct timespec *, req, struct timespec *, rem)
1018 #endif
1019 #ifdef __NR_msgsnd
1020 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
1021               int, flags)
1022 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
1023               long, msgtype, int, flags)
1024 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
1025               unsigned, nsops, const struct timespec *, timeout)
1026 #else
1027 /* This host kernel architecture uses a single ipc syscall; fake up
1028  * wrappers for the sub-operations to hide this implementation detail.
1029  * Annoyingly we can't include linux/ipc.h to get the constant definitions
1030  * for the call parameter because some structs in there conflict with the
1031  * sys/ipc.h ones. So we just define them here, and rely on them being
1032  * the same for all host architectures.
1033  */
1034 #define Q_SEMTIMEDOP 4
1035 #define Q_MSGSND 11
1036 #define Q_MSGRCV 12
1037 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1038 
1039 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1040               void *, ptr, long, fifth)
1041 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1042 {
1043     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1044 }
1045 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1046 {
1047     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1048 }
1049 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1050                            const struct timespec *timeout)
1051 {
1052     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1053                     (long)timeout);
1054 }
1055 #endif
1056 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1057 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1058               size_t, len, unsigned, prio, const struct timespec *, timeout)
1059 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1060               size_t, len, unsigned *, prio, const struct timespec *, timeout)
1061 #endif
1062 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1063  * "third argument might be integer or pointer or not present" behaviour of
1064  * the libc function.
1065  */
1066 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1067 /* Similarly for fcntl. Note that callers must always:
1068  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1069  *  use the flock64 struct rather than unsuffixed flock
1070  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1071  */
1072 #ifdef __NR_fcntl64
1073 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1074 #else
1075 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1076 #endif
1077 
1078 static inline int host_to_target_sock_type(int host_type)
1079 {
1080     int target_type;
1081 
1082     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1083     case SOCK_DGRAM:
1084         target_type = TARGET_SOCK_DGRAM;
1085         break;
1086     case SOCK_STREAM:
1087         target_type = TARGET_SOCK_STREAM;
1088         break;
1089     default:
1090         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1091         break;
1092     }
1093 
1094 #if defined(SOCK_CLOEXEC)
1095     if (host_type & SOCK_CLOEXEC) {
1096         target_type |= TARGET_SOCK_CLOEXEC;
1097     }
1098 #endif
1099 
1100 #if defined(SOCK_NONBLOCK)
1101     if (host_type & SOCK_NONBLOCK) {
1102         target_type |= TARGET_SOCK_NONBLOCK;
1103     }
1104 #endif
1105 
1106     return target_type;
1107 }
1108 
1109 static abi_ulong target_brk;
1110 static abi_ulong target_original_brk;
1111 static abi_ulong brk_page;
1112 
1113 void target_set_brk(abi_ulong new_brk)
1114 {
1115     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1116     brk_page = HOST_PAGE_ALIGN(target_brk);
1117 }
1118 
1119 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1120 #define DEBUGF_BRK(message, args...)
1121 
1122 /* do_brk() must return target values and target errnos. */
1123 abi_long do_brk(abi_ulong new_brk)
1124 {
1125     abi_long mapped_addr;
1126     abi_ulong new_alloc_size;
1127 
1128     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1129 
1130     if (!new_brk) {
1131         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1132         return target_brk;
1133     }
1134     if (new_brk < target_original_brk) {
1135         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1136                    target_brk);
1137         return target_brk;
1138     }
1139 
1140     /* If the new brk is less than the highest page reserved to the
1141      * target heap allocation, set it and we're almost done...  */
1142     if (new_brk <= brk_page) {
1143         /* Heap contents are initialized to zero, as for anonymous
1144          * mapped pages.  */
1145         if (new_brk > target_brk) {
1146             memset(g2h(target_brk), 0, new_brk - target_brk);
1147         }
1148 	target_brk = new_brk;
1149         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1150     	return target_brk;
1151     }
1152 
1153     /* We need to allocate more memory after the brk... Note that
1154      * we don't use MAP_FIXED because that will map over the top of
1155      * any existing mapping (like the one with the host libc or qemu
1156      * itself); instead we treat "mapped but at wrong address" as
1157      * a failure and unmap again.
1158      */
1159     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1160     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1161                                         PROT_READ|PROT_WRITE,
1162                                         MAP_ANON|MAP_PRIVATE, 0, 0));
1163 
1164     if (mapped_addr == brk_page) {
1165         /* Heap contents are initialized to zero, as for anonymous
1166          * mapped pages.  Technically the new pages are already
1167          * initialized to zero since they *are* anonymous mapped
1168          * pages, however we have to take care with the contents that
1169          * come from the remaining part of the previous page: it may
1170          * contains garbage data due to a previous heap usage (grown
1171          * then shrunken).  */
1172         memset(g2h(target_brk), 0, brk_page - target_brk);
1173 
1174         target_brk = new_brk;
1175         brk_page = HOST_PAGE_ALIGN(target_brk);
1176         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1177             target_brk);
1178         return target_brk;
1179     } else if (mapped_addr != -1) {
1180         /* Mapped but at wrong address, meaning there wasn't actually
1181          * enough space for this brk.
1182          */
1183         target_munmap(mapped_addr, new_alloc_size);
1184         mapped_addr = -1;
1185         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1186     }
1187     else {
1188         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1189     }
1190 
1191 #if defined(TARGET_ALPHA)
1192     /* We (partially) emulate OSF/1 on Alpha, which requires we
1193        return a proper errno, not an unchanged brk value.  */
1194     return -TARGET_ENOMEM;
1195 #endif
1196     /* For everything else, return the previous break. */
1197     return target_brk;
1198 }
1199 
1200 static inline abi_long copy_from_user_fdset(fd_set *fds,
1201                                             abi_ulong target_fds_addr,
1202                                             int n)
1203 {
1204     int i, nw, j, k;
1205     abi_ulong b, *target_fds;
1206 
1207     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1208     if (!(target_fds = lock_user(VERIFY_READ,
1209                                  target_fds_addr,
1210                                  sizeof(abi_ulong) * nw,
1211                                  1)))
1212         return -TARGET_EFAULT;
1213 
1214     FD_ZERO(fds);
1215     k = 0;
1216     for (i = 0; i < nw; i++) {
1217         /* grab the abi_ulong */
1218         __get_user(b, &target_fds[i]);
1219         for (j = 0; j < TARGET_ABI_BITS; j++) {
1220             /* check the bit inside the abi_ulong */
1221             if ((b >> j) & 1)
1222                 FD_SET(k, fds);
1223             k++;
1224         }
1225     }
1226 
1227     unlock_user(target_fds, target_fds_addr, 0);
1228 
1229     return 0;
1230 }
1231 
1232 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1233                                                  abi_ulong target_fds_addr,
1234                                                  int n)
1235 {
1236     if (target_fds_addr) {
1237         if (copy_from_user_fdset(fds, target_fds_addr, n))
1238             return -TARGET_EFAULT;
1239         *fds_ptr = fds;
1240     } else {
1241         *fds_ptr = NULL;
1242     }
1243     return 0;
1244 }
1245 
1246 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1247                                           const fd_set *fds,
1248                                           int n)
1249 {
1250     int i, nw, j, k;
1251     abi_long v;
1252     abi_ulong *target_fds;
1253 
1254     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1255     if (!(target_fds = lock_user(VERIFY_WRITE,
1256                                  target_fds_addr,
1257                                  sizeof(abi_ulong) * nw,
1258                                  0)))
1259         return -TARGET_EFAULT;
1260 
1261     k = 0;
1262     for (i = 0; i < nw; i++) {
1263         v = 0;
1264         for (j = 0; j < TARGET_ABI_BITS; j++) {
1265             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1266             k++;
1267         }
1268         __put_user(v, &target_fds[i]);
1269     }
1270 
1271     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1272 
1273     return 0;
1274 }
1275 
1276 #if defined(__alpha__)
1277 #define HOST_HZ 1024
1278 #else
1279 #define HOST_HZ 100
1280 #endif
1281 
1282 static inline abi_long host_to_target_clock_t(long ticks)
1283 {
1284 #if HOST_HZ == TARGET_HZ
1285     return ticks;
1286 #else
1287     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1288 #endif
1289 }
1290 
1291 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1292                                              const struct rusage *rusage)
1293 {
1294     struct target_rusage *target_rusage;
1295 
1296     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1297         return -TARGET_EFAULT;
1298     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1299     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1300     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1301     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1302     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1303     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1304     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1305     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1306     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1307     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1308     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1309     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1310     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1311     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1312     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1313     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1314     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1315     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1316     unlock_user_struct(target_rusage, target_addr, 1);
1317 
1318     return 0;
1319 }
1320 
1321 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1322 {
1323     abi_ulong target_rlim_swap;
1324     rlim_t result;
1325 
1326     target_rlim_swap = tswapal(target_rlim);
1327     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1328         return RLIM_INFINITY;
1329 
1330     result = target_rlim_swap;
1331     if (target_rlim_swap != (rlim_t)result)
1332         return RLIM_INFINITY;
1333 
1334     return result;
1335 }
1336 
1337 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1338 {
1339     abi_ulong target_rlim_swap;
1340     abi_ulong result;
1341 
1342     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1343         target_rlim_swap = TARGET_RLIM_INFINITY;
1344     else
1345         target_rlim_swap = rlim;
1346     result = tswapal(target_rlim_swap);
1347 
1348     return result;
1349 }
1350 
1351 static inline int target_to_host_resource(int code)
1352 {
1353     switch (code) {
1354     case TARGET_RLIMIT_AS:
1355         return RLIMIT_AS;
1356     case TARGET_RLIMIT_CORE:
1357         return RLIMIT_CORE;
1358     case TARGET_RLIMIT_CPU:
1359         return RLIMIT_CPU;
1360     case TARGET_RLIMIT_DATA:
1361         return RLIMIT_DATA;
1362     case TARGET_RLIMIT_FSIZE:
1363         return RLIMIT_FSIZE;
1364     case TARGET_RLIMIT_LOCKS:
1365         return RLIMIT_LOCKS;
1366     case TARGET_RLIMIT_MEMLOCK:
1367         return RLIMIT_MEMLOCK;
1368     case TARGET_RLIMIT_MSGQUEUE:
1369         return RLIMIT_MSGQUEUE;
1370     case TARGET_RLIMIT_NICE:
1371         return RLIMIT_NICE;
1372     case TARGET_RLIMIT_NOFILE:
1373         return RLIMIT_NOFILE;
1374     case TARGET_RLIMIT_NPROC:
1375         return RLIMIT_NPROC;
1376     case TARGET_RLIMIT_RSS:
1377         return RLIMIT_RSS;
1378     case TARGET_RLIMIT_RTPRIO:
1379         return RLIMIT_RTPRIO;
1380     case TARGET_RLIMIT_SIGPENDING:
1381         return RLIMIT_SIGPENDING;
1382     case TARGET_RLIMIT_STACK:
1383         return RLIMIT_STACK;
1384     default:
1385         return code;
1386     }
1387 }
1388 
1389 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1390                                               abi_ulong target_tv_addr)
1391 {
1392     struct target_timeval *target_tv;
1393 
1394     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1395         return -TARGET_EFAULT;
1396 
1397     __get_user(tv->tv_sec, &target_tv->tv_sec);
1398     __get_user(tv->tv_usec, &target_tv->tv_usec);
1399 
1400     unlock_user_struct(target_tv, target_tv_addr, 0);
1401 
1402     return 0;
1403 }
1404 
1405 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1406                                             const struct timeval *tv)
1407 {
1408     struct target_timeval *target_tv;
1409 
1410     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1411         return -TARGET_EFAULT;
1412 
1413     __put_user(tv->tv_sec, &target_tv->tv_sec);
1414     __put_user(tv->tv_usec, &target_tv->tv_usec);
1415 
1416     unlock_user_struct(target_tv, target_tv_addr, 1);
1417 
1418     return 0;
1419 }
1420 
1421 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1422                                                abi_ulong target_tz_addr)
1423 {
1424     struct target_timezone *target_tz;
1425 
1426     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1427         return -TARGET_EFAULT;
1428     }
1429 
1430     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1431     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1432 
1433     unlock_user_struct(target_tz, target_tz_addr, 0);
1434 
1435     return 0;
1436 }
1437 
1438 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1439 #include <mqueue.h>
1440 
1441 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1442                                               abi_ulong target_mq_attr_addr)
1443 {
1444     struct target_mq_attr *target_mq_attr;
1445 
1446     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1447                           target_mq_attr_addr, 1))
1448         return -TARGET_EFAULT;
1449 
1450     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1451     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1452     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1453     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1454 
1455     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1456 
1457     return 0;
1458 }
1459 
1460 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1461                                             const struct mq_attr *attr)
1462 {
1463     struct target_mq_attr *target_mq_attr;
1464 
1465     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1466                           target_mq_attr_addr, 0))
1467         return -TARGET_EFAULT;
1468 
1469     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1470     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1471     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1472     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1473 
1474     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1475 
1476     return 0;
1477 }
1478 #endif
1479 
1480 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1481 /* do_select() must return target values and target errnos. */
1482 static abi_long do_select(int n,
1483                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1484                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1485 {
1486     fd_set rfds, wfds, efds;
1487     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1488     struct timeval tv;
1489     struct timespec ts, *ts_ptr;
1490     abi_long ret;
1491 
1492     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1493     if (ret) {
1494         return ret;
1495     }
1496     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1497     if (ret) {
1498         return ret;
1499     }
1500     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1501     if (ret) {
1502         return ret;
1503     }
1504 
1505     if (target_tv_addr) {
1506         if (copy_from_user_timeval(&tv, target_tv_addr))
1507             return -TARGET_EFAULT;
1508         ts.tv_sec = tv.tv_sec;
1509         ts.tv_nsec = tv.tv_usec * 1000;
1510         ts_ptr = &ts;
1511     } else {
1512         ts_ptr = NULL;
1513     }
1514 
1515     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1516                                   ts_ptr, NULL));
1517 
1518     if (!is_error(ret)) {
1519         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1520             return -TARGET_EFAULT;
1521         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1522             return -TARGET_EFAULT;
1523         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1524             return -TARGET_EFAULT;
1525 
1526         if (target_tv_addr) {
1527             tv.tv_sec = ts.tv_sec;
1528             tv.tv_usec = ts.tv_nsec / 1000;
1529             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1530                 return -TARGET_EFAULT;
1531             }
1532         }
1533     }
1534 
1535     return ret;
1536 }
1537 
1538 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1539 static abi_long do_old_select(abi_ulong arg1)
1540 {
1541     struct target_sel_arg_struct *sel;
1542     abi_ulong inp, outp, exp, tvp;
1543     long nsel;
1544 
1545     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1546         return -TARGET_EFAULT;
1547     }
1548 
1549     nsel = tswapal(sel->n);
1550     inp = tswapal(sel->inp);
1551     outp = tswapal(sel->outp);
1552     exp = tswapal(sel->exp);
1553     tvp = tswapal(sel->tvp);
1554 
1555     unlock_user_struct(sel, arg1, 0);
1556 
1557     return do_select(nsel, inp, outp, exp, tvp);
1558 }
1559 #endif
1560 #endif
1561 
1562 static abi_long do_pipe2(int host_pipe[], int flags)
1563 {
1564 #ifdef CONFIG_PIPE2
1565     return pipe2(host_pipe, flags);
1566 #else
1567     return -ENOSYS;
1568 #endif
1569 }
1570 
1571 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1572                         int flags, int is_pipe2)
1573 {
1574     int host_pipe[2];
1575     abi_long ret;
1576     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1577 
1578     if (is_error(ret))
1579         return get_errno(ret);
1580 
1581     /* Several targets have special calling conventions for the original
1582        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1583     if (!is_pipe2) {
1584 #if defined(TARGET_ALPHA)
1585         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1586         return host_pipe[0];
1587 #elif defined(TARGET_MIPS)
1588         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1589         return host_pipe[0];
1590 #elif defined(TARGET_SH4)
1591         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1592         return host_pipe[0];
1593 #elif defined(TARGET_SPARC)
1594         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1595         return host_pipe[0];
1596 #endif
1597     }
1598 
1599     if (put_user_s32(host_pipe[0], pipedes)
1600         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1601         return -TARGET_EFAULT;
1602     return get_errno(ret);
1603 }
1604 
1605 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1606                                               abi_ulong target_addr,
1607                                               socklen_t len)
1608 {
1609     struct target_ip_mreqn *target_smreqn;
1610 
1611     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1612     if (!target_smreqn)
1613         return -TARGET_EFAULT;
1614     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1615     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1616     if (len == sizeof(struct target_ip_mreqn))
1617         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1618     unlock_user(target_smreqn, target_addr, 0);
1619 
1620     return 0;
1621 }
1622 
1623 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1624                                                abi_ulong target_addr,
1625                                                socklen_t len)
1626 {
1627     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1628     sa_family_t sa_family;
1629     struct target_sockaddr *target_saddr;
1630 
1631     if (fd_trans_target_to_host_addr(fd)) {
1632         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1633     }
1634 
1635     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1636     if (!target_saddr)
1637         return -TARGET_EFAULT;
1638 
1639     sa_family = tswap16(target_saddr->sa_family);
1640 
1641     /* Oops. The caller might send a incomplete sun_path; sun_path
1642      * must be terminated by \0 (see the manual page), but
1643      * unfortunately it is quite common to specify sockaddr_un
1644      * length as "strlen(x->sun_path)" while it should be
1645      * "strlen(...) + 1". We'll fix that here if needed.
1646      * Linux kernel has a similar feature.
1647      */
1648 
1649     if (sa_family == AF_UNIX) {
1650         if (len < unix_maxlen && len > 0) {
1651             char *cp = (char*)target_saddr;
1652 
1653             if ( cp[len-1] && !cp[len] )
1654                 len++;
1655         }
1656         if (len > unix_maxlen)
1657             len = unix_maxlen;
1658     }
1659 
1660     memcpy(addr, target_saddr, len);
1661     addr->sa_family = sa_family;
1662     if (sa_family == AF_NETLINK) {
1663         struct sockaddr_nl *nladdr;
1664 
1665         nladdr = (struct sockaddr_nl *)addr;
1666         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1667         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1668     } else if (sa_family == AF_PACKET) {
1669 	struct target_sockaddr_ll *lladdr;
1670 
1671 	lladdr = (struct target_sockaddr_ll *)addr;
1672 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1673 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1674     }
1675     unlock_user(target_saddr, target_addr, 0);
1676 
1677     return 0;
1678 }
1679 
1680 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1681                                                struct sockaddr *addr,
1682                                                socklen_t len)
1683 {
1684     struct target_sockaddr *target_saddr;
1685 
1686     if (len == 0) {
1687         return 0;
1688     }
1689     assert(addr);
1690 
1691     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1692     if (!target_saddr)
1693         return -TARGET_EFAULT;
1694     memcpy(target_saddr, addr, len);
1695     if (len >= offsetof(struct target_sockaddr, sa_family) +
1696         sizeof(target_saddr->sa_family)) {
1697         target_saddr->sa_family = tswap16(addr->sa_family);
1698     }
1699     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1700         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1701         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1702         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1703     } else if (addr->sa_family == AF_PACKET) {
1704         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1705         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1706         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1707     } else if (addr->sa_family == AF_INET6 &&
1708                len >= sizeof(struct target_sockaddr_in6)) {
1709         struct target_sockaddr_in6 *target_in6 =
1710                (struct target_sockaddr_in6 *)target_saddr;
1711         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1712     }
1713     unlock_user(target_saddr, target_addr, len);
1714 
1715     return 0;
1716 }
1717 
1718 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1719                                            struct target_msghdr *target_msgh)
1720 {
1721     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1722     abi_long msg_controllen;
1723     abi_ulong target_cmsg_addr;
1724     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1725     socklen_t space = 0;
1726 
1727     msg_controllen = tswapal(target_msgh->msg_controllen);
1728     if (msg_controllen < sizeof (struct target_cmsghdr))
1729         goto the_end;
1730     target_cmsg_addr = tswapal(target_msgh->msg_control);
1731     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1732     target_cmsg_start = target_cmsg;
1733     if (!target_cmsg)
1734         return -TARGET_EFAULT;
1735 
1736     while (cmsg && target_cmsg) {
1737         void *data = CMSG_DATA(cmsg);
1738         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1739 
1740         int len = tswapal(target_cmsg->cmsg_len)
1741             - sizeof(struct target_cmsghdr);
1742 
1743         space += CMSG_SPACE(len);
1744         if (space > msgh->msg_controllen) {
1745             space -= CMSG_SPACE(len);
1746             /* This is a QEMU bug, since we allocated the payload
1747              * area ourselves (unlike overflow in host-to-target
1748              * conversion, which is just the guest giving us a buffer
1749              * that's too small). It can't happen for the payload types
1750              * we currently support; if it becomes an issue in future
1751              * we would need to improve our allocation strategy to
1752              * something more intelligent than "twice the size of the
1753              * target buffer we're reading from".
1754              */
1755             gemu_log("Host cmsg overflow\n");
1756             break;
1757         }
1758 
1759         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1760             cmsg->cmsg_level = SOL_SOCKET;
1761         } else {
1762             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1763         }
1764         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1765         cmsg->cmsg_len = CMSG_LEN(len);
1766 
1767         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1768             int *fd = (int *)data;
1769             int *target_fd = (int *)target_data;
1770             int i, numfds = len / sizeof(int);
1771 
1772             for (i = 0; i < numfds; i++) {
1773                 __get_user(fd[i], target_fd + i);
1774             }
1775         } else if (cmsg->cmsg_level == SOL_SOCKET
1776                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1777             struct ucred *cred = (struct ucred *)data;
1778             struct target_ucred *target_cred =
1779                 (struct target_ucred *)target_data;
1780 
1781             __get_user(cred->pid, &target_cred->pid);
1782             __get_user(cred->uid, &target_cred->uid);
1783             __get_user(cred->gid, &target_cred->gid);
1784         } else {
1785             gemu_log("Unsupported ancillary data: %d/%d\n",
1786                                         cmsg->cmsg_level, cmsg->cmsg_type);
1787             memcpy(data, target_data, len);
1788         }
1789 
1790         cmsg = CMSG_NXTHDR(msgh, cmsg);
1791         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1792                                          target_cmsg_start);
1793     }
1794     unlock_user(target_cmsg, target_cmsg_addr, 0);
1795  the_end:
1796     msgh->msg_controllen = space;
1797     return 0;
1798 }
1799 
1800 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1801                                            struct msghdr *msgh)
1802 {
1803     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1804     abi_long msg_controllen;
1805     abi_ulong target_cmsg_addr;
1806     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1807     socklen_t space = 0;
1808 
1809     msg_controllen = tswapal(target_msgh->msg_controllen);
1810     if (msg_controllen < sizeof (struct target_cmsghdr))
1811         goto the_end;
1812     target_cmsg_addr = tswapal(target_msgh->msg_control);
1813     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1814     target_cmsg_start = target_cmsg;
1815     if (!target_cmsg)
1816         return -TARGET_EFAULT;
1817 
1818     while (cmsg && target_cmsg) {
1819         void *data = CMSG_DATA(cmsg);
1820         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1821 
1822         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1823         int tgt_len, tgt_space;
1824 
1825         /* We never copy a half-header but may copy half-data;
1826          * this is Linux's behaviour in put_cmsg(). Note that
1827          * truncation here is a guest problem (which we report
1828          * to the guest via the CTRUNC bit), unlike truncation
1829          * in target_to_host_cmsg, which is a QEMU bug.
1830          */
1831         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1832             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1833             break;
1834         }
1835 
1836         if (cmsg->cmsg_level == SOL_SOCKET) {
1837             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1838         } else {
1839             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1840         }
1841         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1842 
1843         /* Payload types which need a different size of payload on
1844          * the target must adjust tgt_len here.
1845          */
1846         tgt_len = len;
1847         switch (cmsg->cmsg_level) {
1848         case SOL_SOCKET:
1849             switch (cmsg->cmsg_type) {
1850             case SO_TIMESTAMP:
1851                 tgt_len = sizeof(struct target_timeval);
1852                 break;
1853             default:
1854                 break;
1855             }
1856             break;
1857         default:
1858             break;
1859         }
1860 
1861         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1862             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1863             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1864         }
1865 
1866         /* We must now copy-and-convert len bytes of payload
1867          * into tgt_len bytes of destination space. Bear in mind
1868          * that in both source and destination we may be dealing
1869          * with a truncated value!
1870          */
1871         switch (cmsg->cmsg_level) {
1872         case SOL_SOCKET:
1873             switch (cmsg->cmsg_type) {
1874             case SCM_RIGHTS:
1875             {
1876                 int *fd = (int *)data;
1877                 int *target_fd = (int *)target_data;
1878                 int i, numfds = tgt_len / sizeof(int);
1879 
1880                 for (i = 0; i < numfds; i++) {
1881                     __put_user(fd[i], target_fd + i);
1882                 }
1883                 break;
1884             }
1885             case SO_TIMESTAMP:
1886             {
1887                 struct timeval *tv = (struct timeval *)data;
1888                 struct target_timeval *target_tv =
1889                     (struct target_timeval *)target_data;
1890 
1891                 if (len != sizeof(struct timeval) ||
1892                     tgt_len != sizeof(struct target_timeval)) {
1893                     goto unimplemented;
1894                 }
1895 
1896                 /* copy struct timeval to target */
1897                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1898                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1899                 break;
1900             }
1901             case SCM_CREDENTIALS:
1902             {
1903                 struct ucred *cred = (struct ucred *)data;
1904                 struct target_ucred *target_cred =
1905                     (struct target_ucred *)target_data;
1906 
1907                 __put_user(cred->pid, &target_cred->pid);
1908                 __put_user(cred->uid, &target_cred->uid);
1909                 __put_user(cred->gid, &target_cred->gid);
1910                 break;
1911             }
1912             default:
1913                 goto unimplemented;
1914             }
1915             break;
1916 
1917         case SOL_IP:
1918             switch (cmsg->cmsg_type) {
1919             case IP_TTL:
1920             {
1921                 uint32_t *v = (uint32_t *)data;
1922                 uint32_t *t_int = (uint32_t *)target_data;
1923 
1924                 if (len != sizeof(uint32_t) ||
1925                     tgt_len != sizeof(uint32_t)) {
1926                     goto unimplemented;
1927                 }
1928                 __put_user(*v, t_int);
1929                 break;
1930             }
1931             case IP_RECVERR:
1932             {
1933                 struct errhdr_t {
1934                    struct sock_extended_err ee;
1935                    struct sockaddr_in offender;
1936                 };
1937                 struct errhdr_t *errh = (struct errhdr_t *)data;
1938                 struct errhdr_t *target_errh =
1939                     (struct errhdr_t *)target_data;
1940 
1941                 if (len != sizeof(struct errhdr_t) ||
1942                     tgt_len != sizeof(struct errhdr_t)) {
1943                     goto unimplemented;
1944                 }
1945                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1946                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1947                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1948                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1949                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1950                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1951                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1952                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1953                     (void *) &errh->offender, sizeof(errh->offender));
1954                 break;
1955             }
1956             default:
1957                 goto unimplemented;
1958             }
1959             break;
1960 
1961         case SOL_IPV6:
1962             switch (cmsg->cmsg_type) {
1963             case IPV6_HOPLIMIT:
1964             {
1965                 uint32_t *v = (uint32_t *)data;
1966                 uint32_t *t_int = (uint32_t *)target_data;
1967 
1968                 if (len != sizeof(uint32_t) ||
1969                     tgt_len != sizeof(uint32_t)) {
1970                     goto unimplemented;
1971                 }
1972                 __put_user(*v, t_int);
1973                 break;
1974             }
1975             case IPV6_RECVERR:
1976             {
1977                 struct errhdr6_t {
1978                    struct sock_extended_err ee;
1979                    struct sockaddr_in6 offender;
1980                 };
1981                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1982                 struct errhdr6_t *target_errh =
1983                     (struct errhdr6_t *)target_data;
1984 
1985                 if (len != sizeof(struct errhdr6_t) ||
1986                     tgt_len != sizeof(struct errhdr6_t)) {
1987                     goto unimplemented;
1988                 }
1989                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1990                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1991                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1992                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1993                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1994                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1995                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1996                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1997                     (void *) &errh->offender, sizeof(errh->offender));
1998                 break;
1999             }
2000             default:
2001                 goto unimplemented;
2002             }
2003             break;
2004 
2005         default:
2006         unimplemented:
2007             gemu_log("Unsupported ancillary data: %d/%d\n",
2008                                         cmsg->cmsg_level, cmsg->cmsg_type);
2009             memcpy(target_data, data, MIN(len, tgt_len));
2010             if (tgt_len > len) {
2011                 memset(target_data + len, 0, tgt_len - len);
2012             }
2013         }
2014 
2015         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2016         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2017         if (msg_controllen < tgt_space) {
2018             tgt_space = msg_controllen;
2019         }
2020         msg_controllen -= tgt_space;
2021         space += tgt_space;
2022         cmsg = CMSG_NXTHDR(msgh, cmsg);
2023         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2024                                          target_cmsg_start);
2025     }
2026     unlock_user(target_cmsg, target_cmsg_addr, space);
2027  the_end:
2028     target_msgh->msg_controllen = tswapal(space);
2029     return 0;
2030 }
2031 
2032 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2033 {
2034     nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2035     nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2036     nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2037     nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2038     nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2039 }
2040 
2041 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2042                                               size_t len,
2043                                               abi_long (*host_to_target_nlmsg)
2044                                                        (struct nlmsghdr *))
2045 {
2046     uint32_t nlmsg_len;
2047     abi_long ret;
2048 
2049     while (len > sizeof(struct nlmsghdr)) {
2050 
2051         nlmsg_len = nlh->nlmsg_len;
2052         if (nlmsg_len < sizeof(struct nlmsghdr) ||
2053             nlmsg_len > len) {
2054             break;
2055         }
2056 
2057         switch (nlh->nlmsg_type) {
2058         case NLMSG_DONE:
2059             tswap_nlmsghdr(nlh);
2060             return 0;
2061         case NLMSG_NOOP:
2062             break;
2063         case NLMSG_ERROR:
2064         {
2065             struct nlmsgerr *e = NLMSG_DATA(nlh);
2066             e->error = tswap32(e->error);
2067             tswap_nlmsghdr(&e->msg);
2068             tswap_nlmsghdr(nlh);
2069             return 0;
2070         }
2071         default:
2072             ret = host_to_target_nlmsg(nlh);
2073             if (ret < 0) {
2074                 tswap_nlmsghdr(nlh);
2075                 return ret;
2076             }
2077             break;
2078         }
2079         tswap_nlmsghdr(nlh);
2080         len -= NLMSG_ALIGN(nlmsg_len);
2081         nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2082     }
2083     return 0;
2084 }
2085 
2086 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2087                                               size_t len,
2088                                               abi_long (*target_to_host_nlmsg)
2089                                                        (struct nlmsghdr *))
2090 {
2091     int ret;
2092 
2093     while (len > sizeof(struct nlmsghdr)) {
2094         if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2095             tswap32(nlh->nlmsg_len) > len) {
2096             break;
2097         }
2098         tswap_nlmsghdr(nlh);
2099         switch (nlh->nlmsg_type) {
2100         case NLMSG_DONE:
2101             return 0;
2102         case NLMSG_NOOP:
2103             break;
2104         case NLMSG_ERROR:
2105         {
2106             struct nlmsgerr *e = NLMSG_DATA(nlh);
2107             e->error = tswap32(e->error);
2108             tswap_nlmsghdr(&e->msg);
2109             return 0;
2110         }
2111         default:
2112             ret = target_to_host_nlmsg(nlh);
2113             if (ret < 0) {
2114                 return ret;
2115             }
2116         }
2117         len -= NLMSG_ALIGN(nlh->nlmsg_len);
2118         nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2119     }
2120     return 0;
2121 }
2122 
2123 #ifdef CONFIG_RTNETLINK
2124 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2125                                                size_t len, void *context,
2126                                                abi_long (*host_to_target_nlattr)
2127                                                         (struct nlattr *,
2128                                                          void *context))
2129 {
2130     unsigned short nla_len;
2131     abi_long ret;
2132 
2133     while (len > sizeof(struct nlattr)) {
2134         nla_len = nlattr->nla_len;
2135         if (nla_len < sizeof(struct nlattr) ||
2136             nla_len > len) {
2137             break;
2138         }
2139         ret = host_to_target_nlattr(nlattr, context);
2140         nlattr->nla_len = tswap16(nlattr->nla_len);
2141         nlattr->nla_type = tswap16(nlattr->nla_type);
2142         if (ret < 0) {
2143             return ret;
2144         }
2145         len -= NLA_ALIGN(nla_len);
2146         nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2147     }
2148     return 0;
2149 }
2150 
2151 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2152                                                size_t len,
2153                                                abi_long (*host_to_target_rtattr)
2154                                                         (struct rtattr *))
2155 {
2156     unsigned short rta_len;
2157     abi_long ret;
2158 
2159     while (len > sizeof(struct rtattr)) {
2160         rta_len = rtattr->rta_len;
2161         if (rta_len < sizeof(struct rtattr) ||
2162             rta_len > len) {
2163             break;
2164         }
2165         ret = host_to_target_rtattr(rtattr);
2166         rtattr->rta_len = tswap16(rtattr->rta_len);
2167         rtattr->rta_type = tswap16(rtattr->rta_type);
2168         if (ret < 0) {
2169             return ret;
2170         }
2171         len -= RTA_ALIGN(rta_len);
2172         rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2173     }
2174     return 0;
2175 }
2176 
2177 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2178 
2179 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2180                                                   void *context)
2181 {
2182     uint16_t *u16;
2183     uint32_t *u32;
2184     uint64_t *u64;
2185 
2186     switch (nlattr->nla_type) {
2187     /* no data */
2188     case QEMU_IFLA_BR_FDB_FLUSH:
2189         break;
2190     /* binary */
2191     case QEMU_IFLA_BR_GROUP_ADDR:
2192         break;
2193     /* uint8_t */
2194     case QEMU_IFLA_BR_VLAN_FILTERING:
2195     case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2196     case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2197     case QEMU_IFLA_BR_MCAST_ROUTER:
2198     case QEMU_IFLA_BR_MCAST_SNOOPING:
2199     case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2200     case QEMU_IFLA_BR_MCAST_QUERIER:
2201     case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2202     case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2203     case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2204     case QEMU_IFLA_BR_VLAN_STATS_ENABLED:
2205     case QEMU_IFLA_BR_MCAST_STATS_ENABLED:
2206     case QEMU_IFLA_BR_MCAST_IGMP_VERSION:
2207     case QEMU_IFLA_BR_MCAST_MLD_VERSION:
2208         break;
2209     /* uint16_t */
2210     case QEMU_IFLA_BR_PRIORITY:
2211     case QEMU_IFLA_BR_VLAN_PROTOCOL:
2212     case QEMU_IFLA_BR_GROUP_FWD_MASK:
2213     case QEMU_IFLA_BR_ROOT_PORT:
2214     case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2215         u16 = NLA_DATA(nlattr);
2216         *u16 = tswap16(*u16);
2217         break;
2218     /* uint32_t */
2219     case QEMU_IFLA_BR_FORWARD_DELAY:
2220     case QEMU_IFLA_BR_HELLO_TIME:
2221     case QEMU_IFLA_BR_MAX_AGE:
2222     case QEMU_IFLA_BR_AGEING_TIME:
2223     case QEMU_IFLA_BR_STP_STATE:
2224     case QEMU_IFLA_BR_ROOT_PATH_COST:
2225     case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2226     case QEMU_IFLA_BR_MCAST_HASH_MAX:
2227     case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2228     case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2229         u32 = NLA_DATA(nlattr);
2230         *u32 = tswap32(*u32);
2231         break;
2232     /* uint64_t */
2233     case QEMU_IFLA_BR_HELLO_TIMER:
2234     case QEMU_IFLA_BR_TCN_TIMER:
2235     case QEMU_IFLA_BR_GC_TIMER:
2236     case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2237     case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2238     case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2239     case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2240     case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2241     case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2242     case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2243         u64 = NLA_DATA(nlattr);
2244         *u64 = tswap64(*u64);
2245         break;
2246     /* ifla_bridge_id: uin8_t[] */
2247     case QEMU_IFLA_BR_ROOT_ID:
2248     case QEMU_IFLA_BR_BRIDGE_ID:
2249         break;
2250     default:
2251         gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2252         break;
2253     }
2254     return 0;
2255 }
2256 
2257 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2258                                                         void *context)
2259 {
2260     uint16_t *u16;
2261     uint32_t *u32;
2262     uint64_t *u64;
2263 
2264     switch (nlattr->nla_type) {
2265     /* uint8_t */
2266     case QEMU_IFLA_BRPORT_STATE:
2267     case QEMU_IFLA_BRPORT_MODE:
2268     case QEMU_IFLA_BRPORT_GUARD:
2269     case QEMU_IFLA_BRPORT_PROTECT:
2270     case QEMU_IFLA_BRPORT_FAST_LEAVE:
2271     case QEMU_IFLA_BRPORT_LEARNING:
2272     case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2273     case QEMU_IFLA_BRPORT_PROXYARP:
2274     case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2275     case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2276     case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2277     case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2278     case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2279     case QEMU_IFLA_BRPORT_MCAST_FLOOD:
2280     case QEMU_IFLA_BRPORT_MCAST_TO_UCAST:
2281     case QEMU_IFLA_BRPORT_VLAN_TUNNEL:
2282     case QEMU_IFLA_BRPORT_BCAST_FLOOD:
2283     case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS:
2284         break;
2285     /* uint16_t */
2286     case QEMU_IFLA_BRPORT_PRIORITY:
2287     case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2288     case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2289     case QEMU_IFLA_BRPORT_ID:
2290     case QEMU_IFLA_BRPORT_NO:
2291     case QEMU_IFLA_BRPORT_GROUP_FWD_MASK:
2292         u16 = NLA_DATA(nlattr);
2293         *u16 = tswap16(*u16);
2294         break;
2295     /* uin32_t */
2296     case QEMU_IFLA_BRPORT_COST:
2297         u32 = NLA_DATA(nlattr);
2298         *u32 = tswap32(*u32);
2299         break;
2300     /* uint64_t */
2301     case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2302     case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2303     case QEMU_IFLA_BRPORT_HOLD_TIMER:
2304         u64 = NLA_DATA(nlattr);
2305         *u64 = tswap64(*u64);
2306         break;
2307     /* ifla_bridge_id: uint8_t[] */
2308     case QEMU_IFLA_BRPORT_ROOT_ID:
2309     case QEMU_IFLA_BRPORT_BRIDGE_ID:
2310         break;
2311     default:
2312         gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2313         break;
2314     }
2315     return 0;
2316 }
2317 
2318 struct linkinfo_context {
2319     int len;
2320     char *name;
2321     int slave_len;
2322     char *slave_name;
2323 };
2324 
2325 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2326                                                     void *context)
2327 {
2328     struct linkinfo_context *li_context = context;
2329 
2330     switch (nlattr->nla_type) {
2331     /* string */
2332     case QEMU_IFLA_INFO_KIND:
2333         li_context->name = NLA_DATA(nlattr);
2334         li_context->len = nlattr->nla_len - NLA_HDRLEN;
2335         break;
2336     case QEMU_IFLA_INFO_SLAVE_KIND:
2337         li_context->slave_name = NLA_DATA(nlattr);
2338         li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2339         break;
2340     /* stats */
2341     case QEMU_IFLA_INFO_XSTATS:
2342         /* FIXME: only used by CAN */
2343         break;
2344     /* nested */
2345     case QEMU_IFLA_INFO_DATA:
2346         if (strncmp(li_context->name, "bridge",
2347                     li_context->len) == 0) {
2348             return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2349                                                   nlattr->nla_len,
2350                                                   NULL,
2351                                              host_to_target_data_bridge_nlattr);
2352         } else {
2353             gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2354         }
2355         break;
2356     case QEMU_IFLA_INFO_SLAVE_DATA:
2357         if (strncmp(li_context->slave_name, "bridge",
2358                     li_context->slave_len) == 0) {
2359             return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2360                                                   nlattr->nla_len,
2361                                                   NULL,
2362                                        host_to_target_slave_data_bridge_nlattr);
2363         } else {
2364             gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2365                      li_context->slave_name);
2366         }
2367         break;
2368     default:
2369         gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2370         break;
2371     }
2372 
2373     return 0;
2374 }
2375 
2376 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2377                                                 void *context)
2378 {
2379     uint32_t *u32;
2380     int i;
2381 
2382     switch (nlattr->nla_type) {
2383     case QEMU_IFLA_INET_CONF:
2384         u32 = NLA_DATA(nlattr);
2385         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2386              i++) {
2387             u32[i] = tswap32(u32[i]);
2388         }
2389         break;
2390     default:
2391         gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2392     }
2393     return 0;
2394 }
2395 
2396 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2397                                                 void *context)
2398 {
2399     uint32_t *u32;
2400     uint64_t *u64;
2401     struct ifla_cacheinfo *ci;
2402     int i;
2403 
2404     switch (nlattr->nla_type) {
2405     /* binaries */
2406     case QEMU_IFLA_INET6_TOKEN:
2407         break;
2408     /* uint8_t */
2409     case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2410         break;
2411     /* uint32_t */
2412     case QEMU_IFLA_INET6_FLAGS:
2413         u32 = NLA_DATA(nlattr);
2414         *u32 = tswap32(*u32);
2415         break;
2416     /* uint32_t[] */
2417     case QEMU_IFLA_INET6_CONF:
2418         u32 = NLA_DATA(nlattr);
2419         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2420              i++) {
2421             u32[i] = tswap32(u32[i]);
2422         }
2423         break;
2424     /* ifla_cacheinfo */
2425     case QEMU_IFLA_INET6_CACHEINFO:
2426         ci = NLA_DATA(nlattr);
2427         ci->max_reasm_len = tswap32(ci->max_reasm_len);
2428         ci->tstamp = tswap32(ci->tstamp);
2429         ci->reachable_time = tswap32(ci->reachable_time);
2430         ci->retrans_time = tswap32(ci->retrans_time);
2431         break;
2432     /* uint64_t[] */
2433     case QEMU_IFLA_INET6_STATS:
2434     case QEMU_IFLA_INET6_ICMP6STATS:
2435         u64 = NLA_DATA(nlattr);
2436         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2437              i++) {
2438             u64[i] = tswap64(u64[i]);
2439         }
2440         break;
2441     default:
2442         gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2443     }
2444     return 0;
2445 }
2446 
2447 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2448                                                     void *context)
2449 {
2450     switch (nlattr->nla_type) {
2451     case AF_INET:
2452         return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2453                                               NULL,
2454                                              host_to_target_data_inet_nlattr);
2455     case AF_INET6:
2456         return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2457                                               NULL,
2458                                              host_to_target_data_inet6_nlattr);
2459     default:
2460         gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2461         break;
2462     }
2463     return 0;
2464 }
2465 
2466 static abi_long host_to_target_data_xdp_nlattr(struct nlattr *nlattr,
2467                                                void *context)
2468 {
2469     uint32_t *u32;
2470 
2471     switch (nlattr->nla_type) {
2472     /* uint8_t */
2473     case QEMU_IFLA_XDP_ATTACHED:
2474         break;
2475     /* uint32_t */
2476     case QEMU_IFLA_XDP_PROG_ID:
2477         u32 = NLA_DATA(nlattr);
2478         *u32 = tswap32(*u32);
2479         break;
2480     default:
2481         gemu_log("Unknown host XDP type: %d\n", nlattr->nla_type);
2482         break;
2483     }
2484     return 0;
2485 }
2486 
2487 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2488 {
2489     uint32_t *u32;
2490     struct rtnl_link_stats *st;
2491     struct rtnl_link_stats64 *st64;
2492     struct rtnl_link_ifmap *map;
2493     struct linkinfo_context li_context;
2494 
2495     switch (rtattr->rta_type) {
2496     /* binary stream */
2497     case QEMU_IFLA_ADDRESS:
2498     case QEMU_IFLA_BROADCAST:
2499     /* string */
2500     case QEMU_IFLA_IFNAME:
2501     case QEMU_IFLA_QDISC:
2502         break;
2503     /* uin8_t */
2504     case QEMU_IFLA_OPERSTATE:
2505     case QEMU_IFLA_LINKMODE:
2506     case QEMU_IFLA_CARRIER:
2507     case QEMU_IFLA_PROTO_DOWN:
2508         break;
2509     /* uint32_t */
2510     case QEMU_IFLA_MTU:
2511     case QEMU_IFLA_LINK:
2512     case QEMU_IFLA_WEIGHT:
2513     case QEMU_IFLA_TXQLEN:
2514     case QEMU_IFLA_CARRIER_CHANGES:
2515     case QEMU_IFLA_NUM_RX_QUEUES:
2516     case QEMU_IFLA_NUM_TX_QUEUES:
2517     case QEMU_IFLA_PROMISCUITY:
2518     case QEMU_IFLA_EXT_MASK:
2519     case QEMU_IFLA_LINK_NETNSID:
2520     case QEMU_IFLA_GROUP:
2521     case QEMU_IFLA_MASTER:
2522     case QEMU_IFLA_NUM_VF:
2523     case QEMU_IFLA_GSO_MAX_SEGS:
2524     case QEMU_IFLA_GSO_MAX_SIZE:
2525     case QEMU_IFLA_CARRIER_UP_COUNT:
2526     case QEMU_IFLA_CARRIER_DOWN_COUNT:
2527         u32 = RTA_DATA(rtattr);
2528         *u32 = tswap32(*u32);
2529         break;
2530     /* struct rtnl_link_stats */
2531     case QEMU_IFLA_STATS:
2532         st = RTA_DATA(rtattr);
2533         st->rx_packets = tswap32(st->rx_packets);
2534         st->tx_packets = tswap32(st->tx_packets);
2535         st->rx_bytes = tswap32(st->rx_bytes);
2536         st->tx_bytes = tswap32(st->tx_bytes);
2537         st->rx_errors = tswap32(st->rx_errors);
2538         st->tx_errors = tswap32(st->tx_errors);
2539         st->rx_dropped = tswap32(st->rx_dropped);
2540         st->tx_dropped = tswap32(st->tx_dropped);
2541         st->multicast = tswap32(st->multicast);
2542         st->collisions = tswap32(st->collisions);
2543 
2544         /* detailed rx_errors: */
2545         st->rx_length_errors = tswap32(st->rx_length_errors);
2546         st->rx_over_errors = tswap32(st->rx_over_errors);
2547         st->rx_crc_errors = tswap32(st->rx_crc_errors);
2548         st->rx_frame_errors = tswap32(st->rx_frame_errors);
2549         st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2550         st->rx_missed_errors = tswap32(st->rx_missed_errors);
2551 
2552         /* detailed tx_errors */
2553         st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2554         st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2555         st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2556         st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2557         st->tx_window_errors = tswap32(st->tx_window_errors);
2558 
2559         /* for cslip etc */
2560         st->rx_compressed = tswap32(st->rx_compressed);
2561         st->tx_compressed = tswap32(st->tx_compressed);
2562         break;
2563     /* struct rtnl_link_stats64 */
2564     case QEMU_IFLA_STATS64:
2565         st64 = RTA_DATA(rtattr);
2566         st64->rx_packets = tswap64(st64->rx_packets);
2567         st64->tx_packets = tswap64(st64->tx_packets);
2568         st64->rx_bytes = tswap64(st64->rx_bytes);
2569         st64->tx_bytes = tswap64(st64->tx_bytes);
2570         st64->rx_errors = tswap64(st64->rx_errors);
2571         st64->tx_errors = tswap64(st64->tx_errors);
2572         st64->rx_dropped = tswap64(st64->rx_dropped);
2573         st64->tx_dropped = tswap64(st64->tx_dropped);
2574         st64->multicast = tswap64(st64->multicast);
2575         st64->collisions = tswap64(st64->collisions);
2576 
2577         /* detailed rx_errors: */
2578         st64->rx_length_errors = tswap64(st64->rx_length_errors);
2579         st64->rx_over_errors = tswap64(st64->rx_over_errors);
2580         st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2581         st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2582         st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2583         st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2584 
2585         /* detailed tx_errors */
2586         st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2587         st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2588         st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2589         st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2590         st64->tx_window_errors = tswap64(st64->tx_window_errors);
2591 
2592         /* for cslip etc */
2593         st64->rx_compressed = tswap64(st64->rx_compressed);
2594         st64->tx_compressed = tswap64(st64->tx_compressed);
2595         break;
2596     /* struct rtnl_link_ifmap */
2597     case QEMU_IFLA_MAP:
2598         map = RTA_DATA(rtattr);
2599         map->mem_start = tswap64(map->mem_start);
2600         map->mem_end = tswap64(map->mem_end);
2601         map->base_addr = tswap64(map->base_addr);
2602         map->irq = tswap16(map->irq);
2603         break;
2604     /* nested */
2605     case QEMU_IFLA_LINKINFO:
2606         memset(&li_context, 0, sizeof(li_context));
2607         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2608                                               &li_context,
2609                                            host_to_target_data_linkinfo_nlattr);
2610     case QEMU_IFLA_AF_SPEC:
2611         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2612                                               NULL,
2613                                              host_to_target_data_spec_nlattr);
2614     case QEMU_IFLA_XDP:
2615         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2616                                               NULL,
2617                                                 host_to_target_data_xdp_nlattr);
2618     default:
2619         gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2620         break;
2621     }
2622     return 0;
2623 }
2624 
2625 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2626 {
2627     uint32_t *u32;
2628     struct ifa_cacheinfo *ci;
2629 
2630     switch (rtattr->rta_type) {
2631     /* binary: depends on family type */
2632     case IFA_ADDRESS:
2633     case IFA_LOCAL:
2634         break;
2635     /* string */
2636     case IFA_LABEL:
2637         break;
2638     /* u32 */
2639     case IFA_FLAGS:
2640     case IFA_BROADCAST:
2641         u32 = RTA_DATA(rtattr);
2642         *u32 = tswap32(*u32);
2643         break;
2644     /* struct ifa_cacheinfo */
2645     case IFA_CACHEINFO:
2646         ci = RTA_DATA(rtattr);
2647         ci->ifa_prefered = tswap32(ci->ifa_prefered);
2648         ci->ifa_valid = tswap32(ci->ifa_valid);
2649         ci->cstamp = tswap32(ci->cstamp);
2650         ci->tstamp = tswap32(ci->tstamp);
2651         break;
2652     default:
2653         gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2654         break;
2655     }
2656     return 0;
2657 }
2658 
2659 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2660 {
2661     uint32_t *u32;
2662     switch (rtattr->rta_type) {
2663     /* binary: depends on family type */
2664     case RTA_GATEWAY:
2665     case RTA_DST:
2666     case RTA_PREFSRC:
2667         break;
2668     /* u32 */
2669     case RTA_PRIORITY:
2670     case RTA_TABLE:
2671     case RTA_OIF:
2672         u32 = RTA_DATA(rtattr);
2673         *u32 = tswap32(*u32);
2674         break;
2675     default:
2676         gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2677         break;
2678     }
2679     return 0;
2680 }
2681 
2682 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2683                                          uint32_t rtattr_len)
2684 {
2685     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2686                                           host_to_target_data_link_rtattr);
2687 }
2688 
2689 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2690                                          uint32_t rtattr_len)
2691 {
2692     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2693                                           host_to_target_data_addr_rtattr);
2694 }
2695 
2696 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2697                                          uint32_t rtattr_len)
2698 {
2699     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2700                                           host_to_target_data_route_rtattr);
2701 }
2702 
2703 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2704 {
2705     uint32_t nlmsg_len;
2706     struct ifinfomsg *ifi;
2707     struct ifaddrmsg *ifa;
2708     struct rtmsg *rtm;
2709 
2710     nlmsg_len = nlh->nlmsg_len;
2711     switch (nlh->nlmsg_type) {
2712     case RTM_NEWLINK:
2713     case RTM_DELLINK:
2714     case RTM_GETLINK:
2715         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2716             ifi = NLMSG_DATA(nlh);
2717             ifi->ifi_type = tswap16(ifi->ifi_type);
2718             ifi->ifi_index = tswap32(ifi->ifi_index);
2719             ifi->ifi_flags = tswap32(ifi->ifi_flags);
2720             ifi->ifi_change = tswap32(ifi->ifi_change);
2721             host_to_target_link_rtattr(IFLA_RTA(ifi),
2722                                        nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2723         }
2724         break;
2725     case RTM_NEWADDR:
2726     case RTM_DELADDR:
2727     case RTM_GETADDR:
2728         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2729             ifa = NLMSG_DATA(nlh);
2730             ifa->ifa_index = tswap32(ifa->ifa_index);
2731             host_to_target_addr_rtattr(IFA_RTA(ifa),
2732                                        nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2733         }
2734         break;
2735     case RTM_NEWROUTE:
2736     case RTM_DELROUTE:
2737     case RTM_GETROUTE:
2738         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2739             rtm = NLMSG_DATA(nlh);
2740             rtm->rtm_flags = tswap32(rtm->rtm_flags);
2741             host_to_target_route_rtattr(RTM_RTA(rtm),
2742                                         nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2743         }
2744         break;
2745     default:
2746         return -TARGET_EINVAL;
2747     }
2748     return 0;
2749 }
2750 
2751 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2752                                                   size_t len)
2753 {
2754     return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2755 }
2756 
2757 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2758                                                size_t len,
2759                                                abi_long (*target_to_host_rtattr)
2760                                                         (struct rtattr *))
2761 {
2762     abi_long ret;
2763 
2764     while (len >= sizeof(struct rtattr)) {
2765         if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2766             tswap16(rtattr->rta_len) > len) {
2767             break;
2768         }
2769         rtattr->rta_len = tswap16(rtattr->rta_len);
2770         rtattr->rta_type = tswap16(rtattr->rta_type);
2771         ret = target_to_host_rtattr(rtattr);
2772         if (ret < 0) {
2773             return ret;
2774         }
2775         len -= RTA_ALIGN(rtattr->rta_len);
2776         rtattr = (struct rtattr *)(((char *)rtattr) +
2777                  RTA_ALIGN(rtattr->rta_len));
2778     }
2779     return 0;
2780 }
2781 
2782 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2783 {
2784     switch (rtattr->rta_type) {
2785     default:
2786         gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2787         break;
2788     }
2789     return 0;
2790 }
2791 
2792 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2793 {
2794     switch (rtattr->rta_type) {
2795     /* binary: depends on family type */
2796     case IFA_LOCAL:
2797     case IFA_ADDRESS:
2798         break;
2799     default:
2800         gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2801         break;
2802     }
2803     return 0;
2804 }
2805 
2806 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2807 {
2808     uint32_t *u32;
2809     switch (rtattr->rta_type) {
2810     /* binary: depends on family type */
2811     case RTA_DST:
2812     case RTA_SRC:
2813     case RTA_GATEWAY:
2814         break;
2815     /* u32 */
2816     case RTA_PRIORITY:
2817     case RTA_OIF:
2818         u32 = RTA_DATA(rtattr);
2819         *u32 = tswap32(*u32);
2820         break;
2821     default:
2822         gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2823         break;
2824     }
2825     return 0;
2826 }
2827 
2828 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2829                                        uint32_t rtattr_len)
2830 {
2831     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2832                                    target_to_host_data_link_rtattr);
2833 }
2834 
2835 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2836                                      uint32_t rtattr_len)
2837 {
2838     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2839                                    target_to_host_data_addr_rtattr);
2840 }
2841 
2842 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2843                                      uint32_t rtattr_len)
2844 {
2845     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2846                                    target_to_host_data_route_rtattr);
2847 }
2848 
2849 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2850 {
2851     struct ifinfomsg *ifi;
2852     struct ifaddrmsg *ifa;
2853     struct rtmsg *rtm;
2854 
2855     switch (nlh->nlmsg_type) {
2856     case RTM_GETLINK:
2857         break;
2858     case RTM_NEWLINK:
2859     case RTM_DELLINK:
2860         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2861             ifi = NLMSG_DATA(nlh);
2862             ifi->ifi_type = tswap16(ifi->ifi_type);
2863             ifi->ifi_index = tswap32(ifi->ifi_index);
2864             ifi->ifi_flags = tswap32(ifi->ifi_flags);
2865             ifi->ifi_change = tswap32(ifi->ifi_change);
2866             target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2867                                        NLMSG_LENGTH(sizeof(*ifi)));
2868         }
2869         break;
2870     case RTM_GETADDR:
2871     case RTM_NEWADDR:
2872     case RTM_DELADDR:
2873         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2874             ifa = NLMSG_DATA(nlh);
2875             ifa->ifa_index = tswap32(ifa->ifa_index);
2876             target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2877                                        NLMSG_LENGTH(sizeof(*ifa)));
2878         }
2879         break;
2880     case RTM_GETROUTE:
2881         break;
2882     case RTM_NEWROUTE:
2883     case RTM_DELROUTE:
2884         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2885             rtm = NLMSG_DATA(nlh);
2886             rtm->rtm_flags = tswap32(rtm->rtm_flags);
2887             target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2888                                         NLMSG_LENGTH(sizeof(*rtm)));
2889         }
2890         break;
2891     default:
2892         return -TARGET_EOPNOTSUPP;
2893     }
2894     return 0;
2895 }
2896 
2897 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2898 {
2899     return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2900 }
2901 #endif /* CONFIG_RTNETLINK */
2902 
2903 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2904 {
2905     switch (nlh->nlmsg_type) {
2906     default:
2907         gemu_log("Unknown host audit message type %d\n",
2908                  nlh->nlmsg_type);
2909         return -TARGET_EINVAL;
2910     }
2911     return 0;
2912 }
2913 
2914 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2915                                                   size_t len)
2916 {
2917     return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2918 }
2919 
2920 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2921 {
2922     switch (nlh->nlmsg_type) {
2923     case AUDIT_USER:
2924     case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2925     case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2926         break;
2927     default:
2928         gemu_log("Unknown target audit message type %d\n",
2929                  nlh->nlmsg_type);
2930         return -TARGET_EINVAL;
2931     }
2932 
2933     return 0;
2934 }
2935 
2936 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2937 {
2938     return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2939 }
2940 
2941 /* do_setsockopt() Must return target values and target errnos. */
2942 static abi_long do_setsockopt(int sockfd, int level, int optname,
2943                               abi_ulong optval_addr, socklen_t optlen)
2944 {
2945     abi_long ret;
2946     int val;
2947     struct ip_mreqn *ip_mreq;
2948     struct ip_mreq_source *ip_mreq_source;
2949 
2950     switch(level) {
2951     case SOL_TCP:
2952         /* TCP options all take an 'int' value.  */
2953         if (optlen < sizeof(uint32_t))
2954             return -TARGET_EINVAL;
2955 
2956         if (get_user_u32(val, optval_addr))
2957             return -TARGET_EFAULT;
2958         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2959         break;
2960     case SOL_IP:
2961         switch(optname) {
2962         case IP_TOS:
2963         case IP_TTL:
2964         case IP_HDRINCL:
2965         case IP_ROUTER_ALERT:
2966         case IP_RECVOPTS:
2967         case IP_RETOPTS:
2968         case IP_PKTINFO:
2969         case IP_MTU_DISCOVER:
2970         case IP_RECVERR:
2971         case IP_RECVTTL:
2972         case IP_RECVTOS:
2973 #ifdef IP_FREEBIND
2974         case IP_FREEBIND:
2975 #endif
2976         case IP_MULTICAST_TTL:
2977         case IP_MULTICAST_LOOP:
2978             val = 0;
2979             if (optlen >= sizeof(uint32_t)) {
2980                 if (get_user_u32(val, optval_addr))
2981                     return -TARGET_EFAULT;
2982             } else if (optlen >= 1) {
2983                 if (get_user_u8(val, optval_addr))
2984                     return -TARGET_EFAULT;
2985             }
2986             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2987             break;
2988         case IP_ADD_MEMBERSHIP:
2989         case IP_DROP_MEMBERSHIP:
2990             if (optlen < sizeof (struct target_ip_mreq) ||
2991                 optlen > sizeof (struct target_ip_mreqn))
2992                 return -TARGET_EINVAL;
2993 
2994             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2995             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2996             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2997             break;
2998 
2999         case IP_BLOCK_SOURCE:
3000         case IP_UNBLOCK_SOURCE:
3001         case IP_ADD_SOURCE_MEMBERSHIP:
3002         case IP_DROP_SOURCE_MEMBERSHIP:
3003             if (optlen != sizeof (struct target_ip_mreq_source))
3004                 return -TARGET_EINVAL;
3005 
3006             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3007             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
3008             unlock_user (ip_mreq_source, optval_addr, 0);
3009             break;
3010 
3011         default:
3012             goto unimplemented;
3013         }
3014         break;
3015     case SOL_IPV6:
3016         switch (optname) {
3017         case IPV6_MTU_DISCOVER:
3018         case IPV6_MTU:
3019         case IPV6_V6ONLY:
3020         case IPV6_RECVPKTINFO:
3021         case IPV6_UNICAST_HOPS:
3022         case IPV6_MULTICAST_HOPS:
3023         case IPV6_MULTICAST_LOOP:
3024         case IPV6_RECVERR:
3025         case IPV6_RECVHOPLIMIT:
3026         case IPV6_2292HOPLIMIT:
3027         case IPV6_CHECKSUM:
3028             val = 0;
3029             if (optlen < sizeof(uint32_t)) {
3030                 return -TARGET_EINVAL;
3031             }
3032             if (get_user_u32(val, optval_addr)) {
3033                 return -TARGET_EFAULT;
3034             }
3035             ret = get_errno(setsockopt(sockfd, level, optname,
3036                                        &val, sizeof(val)));
3037             break;
3038         case IPV6_PKTINFO:
3039         {
3040             struct in6_pktinfo pki;
3041 
3042             if (optlen < sizeof(pki)) {
3043                 return -TARGET_EINVAL;
3044             }
3045 
3046             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
3047                 return -TARGET_EFAULT;
3048             }
3049 
3050             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
3051 
3052             ret = get_errno(setsockopt(sockfd, level, optname,
3053                                        &pki, sizeof(pki)));
3054             break;
3055         }
3056         default:
3057             goto unimplemented;
3058         }
3059         break;
3060     case SOL_ICMPV6:
3061         switch (optname) {
3062         case ICMPV6_FILTER:
3063         {
3064             struct icmp6_filter icmp6f;
3065 
3066             if (optlen > sizeof(icmp6f)) {
3067                 optlen = sizeof(icmp6f);
3068             }
3069 
3070             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
3071                 return -TARGET_EFAULT;
3072             }
3073 
3074             for (val = 0; val < 8; val++) {
3075                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3076             }
3077 
3078             ret = get_errno(setsockopt(sockfd, level, optname,
3079                                        &icmp6f, optlen));
3080             break;
3081         }
3082         default:
3083             goto unimplemented;
3084         }
3085         break;
3086     case SOL_RAW:
3087         switch (optname) {
3088         case ICMP_FILTER:
3089         case IPV6_CHECKSUM:
3090             /* those take an u32 value */
3091             if (optlen < sizeof(uint32_t)) {
3092                 return -TARGET_EINVAL;
3093             }
3094 
3095             if (get_user_u32(val, optval_addr)) {
3096                 return -TARGET_EFAULT;
3097             }
3098             ret = get_errno(setsockopt(sockfd, level, optname,
3099                                        &val, sizeof(val)));
3100             break;
3101 
3102         default:
3103             goto unimplemented;
3104         }
3105         break;
3106     case TARGET_SOL_SOCKET:
3107         switch (optname) {
3108         case TARGET_SO_RCVTIMEO:
3109         {
3110                 struct timeval tv;
3111 
3112                 optname = SO_RCVTIMEO;
3113 
3114 set_timeout:
3115                 if (optlen != sizeof(struct target_timeval)) {
3116                     return -TARGET_EINVAL;
3117                 }
3118 
3119                 if (copy_from_user_timeval(&tv, optval_addr)) {
3120                     return -TARGET_EFAULT;
3121                 }
3122 
3123                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3124                                 &tv, sizeof(tv)));
3125                 return ret;
3126         }
3127         case TARGET_SO_SNDTIMEO:
3128                 optname = SO_SNDTIMEO;
3129                 goto set_timeout;
3130         case TARGET_SO_ATTACH_FILTER:
3131         {
3132                 struct target_sock_fprog *tfprog;
3133                 struct target_sock_filter *tfilter;
3134                 struct sock_fprog fprog;
3135                 struct sock_filter *filter;
3136                 int i;
3137 
3138                 if (optlen != sizeof(*tfprog)) {
3139                     return -TARGET_EINVAL;
3140                 }
3141                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3142                     return -TARGET_EFAULT;
3143                 }
3144                 if (!lock_user_struct(VERIFY_READ, tfilter,
3145                                       tswapal(tfprog->filter), 0)) {
3146                     unlock_user_struct(tfprog, optval_addr, 1);
3147                     return -TARGET_EFAULT;
3148                 }
3149 
3150                 fprog.len = tswap16(tfprog->len);
3151                 filter = g_try_new(struct sock_filter, fprog.len);
3152                 if (filter == NULL) {
3153                     unlock_user_struct(tfilter, tfprog->filter, 1);
3154                     unlock_user_struct(tfprog, optval_addr, 1);
3155                     return -TARGET_ENOMEM;
3156                 }
3157                 for (i = 0; i < fprog.len; i++) {
3158                     filter[i].code = tswap16(tfilter[i].code);
3159                     filter[i].jt = tfilter[i].jt;
3160                     filter[i].jf = tfilter[i].jf;
3161                     filter[i].k = tswap32(tfilter[i].k);
3162                 }
3163                 fprog.filter = filter;
3164 
3165                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3166                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3167                 g_free(filter);
3168 
3169                 unlock_user_struct(tfilter, tfprog->filter, 1);
3170                 unlock_user_struct(tfprog, optval_addr, 1);
3171                 return ret;
3172         }
3173 	case TARGET_SO_BINDTODEVICE:
3174 	{
3175 		char *dev_ifname, *addr_ifname;
3176 
3177 		if (optlen > IFNAMSIZ - 1) {
3178 		    optlen = IFNAMSIZ - 1;
3179 		}
3180 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3181 		if (!dev_ifname) {
3182 		    return -TARGET_EFAULT;
3183 		}
3184 		optname = SO_BINDTODEVICE;
3185 		addr_ifname = alloca(IFNAMSIZ);
3186 		memcpy(addr_ifname, dev_ifname, optlen);
3187 		addr_ifname[optlen] = 0;
3188 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3189                                            addr_ifname, optlen));
3190 		unlock_user (dev_ifname, optval_addr, 0);
3191 		return ret;
3192 	}
3193             /* Options with 'int' argument.  */
3194         case TARGET_SO_DEBUG:
3195 		optname = SO_DEBUG;
3196 		break;
3197         case TARGET_SO_REUSEADDR:
3198 		optname = SO_REUSEADDR;
3199 		break;
3200         case TARGET_SO_TYPE:
3201 		optname = SO_TYPE;
3202 		break;
3203         case TARGET_SO_ERROR:
3204 		optname = SO_ERROR;
3205 		break;
3206         case TARGET_SO_DONTROUTE:
3207 		optname = SO_DONTROUTE;
3208 		break;
3209         case TARGET_SO_BROADCAST:
3210 		optname = SO_BROADCAST;
3211 		break;
3212         case TARGET_SO_SNDBUF:
3213 		optname = SO_SNDBUF;
3214 		break;
3215         case TARGET_SO_SNDBUFFORCE:
3216                 optname = SO_SNDBUFFORCE;
3217                 break;
3218         case TARGET_SO_RCVBUF:
3219 		optname = SO_RCVBUF;
3220 		break;
3221         case TARGET_SO_RCVBUFFORCE:
3222                 optname = SO_RCVBUFFORCE;
3223                 break;
3224         case TARGET_SO_KEEPALIVE:
3225 		optname = SO_KEEPALIVE;
3226 		break;
3227         case TARGET_SO_OOBINLINE:
3228 		optname = SO_OOBINLINE;
3229 		break;
3230         case TARGET_SO_NO_CHECK:
3231 		optname = SO_NO_CHECK;
3232 		break;
3233         case TARGET_SO_PRIORITY:
3234 		optname = SO_PRIORITY;
3235 		break;
3236 #ifdef SO_BSDCOMPAT
3237         case TARGET_SO_BSDCOMPAT:
3238 		optname = SO_BSDCOMPAT;
3239 		break;
3240 #endif
3241         case TARGET_SO_PASSCRED:
3242 		optname = SO_PASSCRED;
3243 		break;
3244         case TARGET_SO_PASSSEC:
3245                 optname = SO_PASSSEC;
3246                 break;
3247         case TARGET_SO_TIMESTAMP:
3248 		optname = SO_TIMESTAMP;
3249 		break;
3250         case TARGET_SO_RCVLOWAT:
3251 		optname = SO_RCVLOWAT;
3252 		break;
3253         default:
3254             goto unimplemented;
3255         }
3256 	if (optlen < sizeof(uint32_t))
3257             return -TARGET_EINVAL;
3258 
3259 	if (get_user_u32(val, optval_addr))
3260             return -TARGET_EFAULT;
3261 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3262         break;
3263     default:
3264     unimplemented:
3265         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3266         ret = -TARGET_ENOPROTOOPT;
3267     }
3268     return ret;
3269 }
3270 
3271 /* do_getsockopt() Must return target values and target errnos. */
3272 static abi_long do_getsockopt(int sockfd, int level, int optname,
3273                               abi_ulong optval_addr, abi_ulong optlen)
3274 {
3275     abi_long ret;
3276     int len, val;
3277     socklen_t lv;
3278 
3279     switch(level) {
3280     case TARGET_SOL_SOCKET:
3281         level = SOL_SOCKET;
3282         switch (optname) {
3283         /* These don't just return a single integer */
3284         case TARGET_SO_LINGER:
3285         case TARGET_SO_RCVTIMEO:
3286         case TARGET_SO_SNDTIMEO:
3287         case TARGET_SO_PEERNAME:
3288             goto unimplemented;
3289         case TARGET_SO_PEERCRED: {
3290             struct ucred cr;
3291             socklen_t crlen;
3292             struct target_ucred *tcr;
3293 
3294             if (get_user_u32(len, optlen)) {
3295                 return -TARGET_EFAULT;
3296             }
3297             if (len < 0) {
3298                 return -TARGET_EINVAL;
3299             }
3300 
3301             crlen = sizeof(cr);
3302             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3303                                        &cr, &crlen));
3304             if (ret < 0) {
3305                 return ret;
3306             }
3307             if (len > crlen) {
3308                 len = crlen;
3309             }
3310             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3311                 return -TARGET_EFAULT;
3312             }
3313             __put_user(cr.pid, &tcr->pid);
3314             __put_user(cr.uid, &tcr->uid);
3315             __put_user(cr.gid, &tcr->gid);
3316             unlock_user_struct(tcr, optval_addr, 1);
3317             if (put_user_u32(len, optlen)) {
3318                 return -TARGET_EFAULT;
3319             }
3320             break;
3321         }
3322         /* Options with 'int' argument.  */
3323         case TARGET_SO_DEBUG:
3324             optname = SO_DEBUG;
3325             goto int_case;
3326         case TARGET_SO_REUSEADDR:
3327             optname = SO_REUSEADDR;
3328             goto int_case;
3329         case TARGET_SO_TYPE:
3330             optname = SO_TYPE;
3331             goto int_case;
3332         case TARGET_SO_ERROR:
3333             optname = SO_ERROR;
3334             goto int_case;
3335         case TARGET_SO_DONTROUTE:
3336             optname = SO_DONTROUTE;
3337             goto int_case;
3338         case TARGET_SO_BROADCAST:
3339             optname = SO_BROADCAST;
3340             goto int_case;
3341         case TARGET_SO_SNDBUF:
3342             optname = SO_SNDBUF;
3343             goto int_case;
3344         case TARGET_SO_RCVBUF:
3345             optname = SO_RCVBUF;
3346             goto int_case;
3347         case TARGET_SO_KEEPALIVE:
3348             optname = SO_KEEPALIVE;
3349             goto int_case;
3350         case TARGET_SO_OOBINLINE:
3351             optname = SO_OOBINLINE;
3352             goto int_case;
3353         case TARGET_SO_NO_CHECK:
3354             optname = SO_NO_CHECK;
3355             goto int_case;
3356         case TARGET_SO_PRIORITY:
3357             optname = SO_PRIORITY;
3358             goto int_case;
3359 #ifdef SO_BSDCOMPAT
3360         case TARGET_SO_BSDCOMPAT:
3361             optname = SO_BSDCOMPAT;
3362             goto int_case;
3363 #endif
3364         case TARGET_SO_PASSCRED:
3365             optname = SO_PASSCRED;
3366             goto int_case;
3367         case TARGET_SO_TIMESTAMP:
3368             optname = SO_TIMESTAMP;
3369             goto int_case;
3370         case TARGET_SO_RCVLOWAT:
3371             optname = SO_RCVLOWAT;
3372             goto int_case;
3373         case TARGET_SO_ACCEPTCONN:
3374             optname = SO_ACCEPTCONN;
3375             goto int_case;
3376         default:
3377             goto int_case;
3378         }
3379         break;
3380     case SOL_TCP:
3381         /* TCP options all take an 'int' value.  */
3382     int_case:
3383         if (get_user_u32(len, optlen))
3384             return -TARGET_EFAULT;
3385         if (len < 0)
3386             return -TARGET_EINVAL;
3387         lv = sizeof(lv);
3388         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3389         if (ret < 0)
3390             return ret;
3391         if (optname == SO_TYPE) {
3392             val = host_to_target_sock_type(val);
3393         }
3394         if (len > lv)
3395             len = lv;
3396         if (len == 4) {
3397             if (put_user_u32(val, optval_addr))
3398                 return -TARGET_EFAULT;
3399         } else {
3400             if (put_user_u8(val, optval_addr))
3401                 return -TARGET_EFAULT;
3402         }
3403         if (put_user_u32(len, optlen))
3404             return -TARGET_EFAULT;
3405         break;
3406     case SOL_IP:
3407         switch(optname) {
3408         case IP_TOS:
3409         case IP_TTL:
3410         case IP_HDRINCL:
3411         case IP_ROUTER_ALERT:
3412         case IP_RECVOPTS:
3413         case IP_RETOPTS:
3414         case IP_PKTINFO:
3415         case IP_MTU_DISCOVER:
3416         case IP_RECVERR:
3417         case IP_RECVTOS:
3418 #ifdef IP_FREEBIND
3419         case IP_FREEBIND:
3420 #endif
3421         case IP_MULTICAST_TTL:
3422         case IP_MULTICAST_LOOP:
3423             if (get_user_u32(len, optlen))
3424                 return -TARGET_EFAULT;
3425             if (len < 0)
3426                 return -TARGET_EINVAL;
3427             lv = sizeof(lv);
3428             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3429             if (ret < 0)
3430                 return ret;
3431             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3432                 len = 1;
3433                 if (put_user_u32(len, optlen)
3434                     || put_user_u8(val, optval_addr))
3435                     return -TARGET_EFAULT;
3436             } else {
3437                 if (len > sizeof(int))
3438                     len = sizeof(int);
3439                 if (put_user_u32(len, optlen)
3440                     || put_user_u32(val, optval_addr))
3441                     return -TARGET_EFAULT;
3442             }
3443             break;
3444         default:
3445             ret = -TARGET_ENOPROTOOPT;
3446             break;
3447         }
3448         break;
3449     default:
3450     unimplemented:
3451         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3452                  level, optname);
3453         ret = -TARGET_EOPNOTSUPP;
3454         break;
3455     }
3456     return ret;
3457 }
3458 
3459 /* Convert target low/high pair representing file offset into the host
3460  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3461  * as the kernel doesn't handle them either.
3462  */
3463 static void target_to_host_low_high(abi_ulong tlow,
3464                                     abi_ulong thigh,
3465                                     unsigned long *hlow,
3466                                     unsigned long *hhigh)
3467 {
3468     uint64_t off = tlow |
3469         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3470         TARGET_LONG_BITS / 2;
3471 
3472     *hlow = off;
3473     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3474 }
3475 
3476 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3477                                 abi_ulong count, int copy)
3478 {
3479     struct target_iovec *target_vec;
3480     struct iovec *vec;
3481     abi_ulong total_len, max_len;
3482     int i;
3483     int err = 0;
3484     bool bad_address = false;
3485 
3486     if (count == 0) {
3487         errno = 0;
3488         return NULL;
3489     }
3490     if (count > IOV_MAX) {
3491         errno = EINVAL;
3492         return NULL;
3493     }
3494 
3495     vec = g_try_new0(struct iovec, count);
3496     if (vec == NULL) {
3497         errno = ENOMEM;
3498         return NULL;
3499     }
3500 
3501     target_vec = lock_user(VERIFY_READ, target_addr,
3502                            count * sizeof(struct target_iovec), 1);
3503     if (target_vec == NULL) {
3504         err = EFAULT;
3505         goto fail2;
3506     }
3507 
3508     /* ??? If host page size > target page size, this will result in a
3509        value larger than what we can actually support.  */
3510     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3511     total_len = 0;
3512 
3513     for (i = 0; i < count; i++) {
3514         abi_ulong base = tswapal(target_vec[i].iov_base);
3515         abi_long len = tswapal(target_vec[i].iov_len);
3516 
3517         if (len < 0) {
3518             err = EINVAL;
3519             goto fail;
3520         } else if (len == 0) {
3521             /* Zero length pointer is ignored.  */
3522             vec[i].iov_base = 0;
3523         } else {
3524             vec[i].iov_base = lock_user(type, base, len, copy);
3525             /* If the first buffer pointer is bad, this is a fault.  But
3526              * subsequent bad buffers will result in a partial write; this
3527              * is realized by filling the vector with null pointers and
3528              * zero lengths. */
3529             if (!vec[i].iov_base) {
3530                 if (i == 0) {
3531                     err = EFAULT;
3532                     goto fail;
3533                 } else {
3534                     bad_address = true;
3535                 }
3536             }
3537             if (bad_address) {
3538                 len = 0;
3539             }
3540             if (len > max_len - total_len) {
3541                 len = max_len - total_len;
3542             }
3543         }
3544         vec[i].iov_len = len;
3545         total_len += len;
3546     }
3547 
3548     unlock_user(target_vec, target_addr, 0);
3549     return vec;
3550 
3551  fail:
3552     while (--i >= 0) {
3553         if (tswapal(target_vec[i].iov_len) > 0) {
3554             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3555         }
3556     }
3557     unlock_user(target_vec, target_addr, 0);
3558  fail2:
3559     g_free(vec);
3560     errno = err;
3561     return NULL;
3562 }
3563 
3564 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3565                          abi_ulong count, int copy)
3566 {
3567     struct target_iovec *target_vec;
3568     int i;
3569 
3570     target_vec = lock_user(VERIFY_READ, target_addr,
3571                            count * sizeof(struct target_iovec), 1);
3572     if (target_vec) {
3573         for (i = 0; i < count; i++) {
3574             abi_ulong base = tswapal(target_vec[i].iov_base);
3575             abi_long len = tswapal(target_vec[i].iov_len);
3576             if (len < 0) {
3577                 break;
3578             }
3579             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3580         }
3581         unlock_user(target_vec, target_addr, 0);
3582     }
3583 
3584     g_free(vec);
3585 }
3586 
3587 static inline int target_to_host_sock_type(int *type)
3588 {
3589     int host_type = 0;
3590     int target_type = *type;
3591 
3592     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3593     case TARGET_SOCK_DGRAM:
3594         host_type = SOCK_DGRAM;
3595         break;
3596     case TARGET_SOCK_STREAM:
3597         host_type = SOCK_STREAM;
3598         break;
3599     default:
3600         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3601         break;
3602     }
3603     if (target_type & TARGET_SOCK_CLOEXEC) {
3604 #if defined(SOCK_CLOEXEC)
3605         host_type |= SOCK_CLOEXEC;
3606 #else
3607         return -TARGET_EINVAL;
3608 #endif
3609     }
3610     if (target_type & TARGET_SOCK_NONBLOCK) {
3611 #if defined(SOCK_NONBLOCK)
3612         host_type |= SOCK_NONBLOCK;
3613 #elif !defined(O_NONBLOCK)
3614         return -TARGET_EINVAL;
3615 #endif
3616     }
3617     *type = host_type;
3618     return 0;
3619 }
3620 
3621 /* Try to emulate socket type flags after socket creation.  */
3622 static int sock_flags_fixup(int fd, int target_type)
3623 {
3624 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3625     if (target_type & TARGET_SOCK_NONBLOCK) {
3626         int flags = fcntl(fd, F_GETFL);
3627         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3628             close(fd);
3629             return -TARGET_EINVAL;
3630         }
3631     }
3632 #endif
3633     return fd;
3634 }
3635 
3636 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3637                                                abi_ulong target_addr,
3638                                                socklen_t len)
3639 {
3640     struct sockaddr *addr = host_addr;
3641     struct target_sockaddr *target_saddr;
3642 
3643     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3644     if (!target_saddr) {
3645         return -TARGET_EFAULT;
3646     }
3647 
3648     memcpy(addr, target_saddr, len);
3649     addr->sa_family = tswap16(target_saddr->sa_family);
3650     /* spkt_protocol is big-endian */
3651 
3652     unlock_user(target_saddr, target_addr, 0);
3653     return 0;
3654 }
3655 
3656 static TargetFdTrans target_packet_trans = {
3657     .target_to_host_addr = packet_target_to_host_sockaddr,
3658 };
3659 
3660 #ifdef CONFIG_RTNETLINK
3661 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3662 {
3663     abi_long ret;
3664 
3665     ret = target_to_host_nlmsg_route(buf, len);
3666     if (ret < 0) {
3667         return ret;
3668     }
3669 
3670     return len;
3671 }
3672 
3673 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3674 {
3675     abi_long ret;
3676 
3677     ret = host_to_target_nlmsg_route(buf, len);
3678     if (ret < 0) {
3679         return ret;
3680     }
3681 
3682     return len;
3683 }
3684 
3685 static TargetFdTrans target_netlink_route_trans = {
3686     .target_to_host_data = netlink_route_target_to_host,
3687     .host_to_target_data = netlink_route_host_to_target,
3688 };
3689 #endif /* CONFIG_RTNETLINK */
3690 
3691 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3692 {
3693     abi_long ret;
3694 
3695     ret = target_to_host_nlmsg_audit(buf, len);
3696     if (ret < 0) {
3697         return ret;
3698     }
3699 
3700     return len;
3701 }
3702 
3703 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3704 {
3705     abi_long ret;
3706 
3707     ret = host_to_target_nlmsg_audit(buf, len);
3708     if (ret < 0) {
3709         return ret;
3710     }
3711 
3712     return len;
3713 }
3714 
3715 static TargetFdTrans target_netlink_audit_trans = {
3716     .target_to_host_data = netlink_audit_target_to_host,
3717     .host_to_target_data = netlink_audit_host_to_target,
3718 };
3719 
3720 /* do_socket() Must return target values and target errnos. */
3721 static abi_long do_socket(int domain, int type, int protocol)
3722 {
3723     int target_type = type;
3724     int ret;
3725 
3726     ret = target_to_host_sock_type(&type);
3727     if (ret) {
3728         return ret;
3729     }
3730 
3731     if (domain == PF_NETLINK && !(
3732 #ifdef CONFIG_RTNETLINK
3733          protocol == NETLINK_ROUTE ||
3734 #endif
3735          protocol == NETLINK_KOBJECT_UEVENT ||
3736          protocol == NETLINK_AUDIT)) {
3737         return -EPFNOSUPPORT;
3738     }
3739 
3740     if (domain == AF_PACKET ||
3741         (domain == AF_INET && type == SOCK_PACKET)) {
3742         protocol = tswap16(protocol);
3743     }
3744 
3745     ret = get_errno(socket(domain, type, protocol));
3746     if (ret >= 0) {
3747         ret = sock_flags_fixup(ret, target_type);
3748         if (type == SOCK_PACKET) {
3749             /* Manage an obsolete case :
3750              * if socket type is SOCK_PACKET, bind by name
3751              */
3752             fd_trans_register(ret, &target_packet_trans);
3753         } else if (domain == PF_NETLINK) {
3754             switch (protocol) {
3755 #ifdef CONFIG_RTNETLINK
3756             case NETLINK_ROUTE:
3757                 fd_trans_register(ret, &target_netlink_route_trans);
3758                 break;
3759 #endif
3760             case NETLINK_KOBJECT_UEVENT:
3761                 /* nothing to do: messages are strings */
3762                 break;
3763             case NETLINK_AUDIT:
3764                 fd_trans_register(ret, &target_netlink_audit_trans);
3765                 break;
3766             default:
3767                 g_assert_not_reached();
3768             }
3769         }
3770     }
3771     return ret;
3772 }
3773 
3774 /* do_bind() Must return target values and target errnos. */
3775 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3776                         socklen_t addrlen)
3777 {
3778     void *addr;
3779     abi_long ret;
3780 
3781     if ((int)addrlen < 0) {
3782         return -TARGET_EINVAL;
3783     }
3784 
3785     addr = alloca(addrlen+1);
3786 
3787     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3788     if (ret)
3789         return ret;
3790 
3791     return get_errno(bind(sockfd, addr, addrlen));
3792 }
3793 
3794 /* do_connect() Must return target values and target errnos. */
3795 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3796                            socklen_t addrlen)
3797 {
3798     void *addr;
3799     abi_long ret;
3800 
3801     if ((int)addrlen < 0) {
3802         return -TARGET_EINVAL;
3803     }
3804 
3805     addr = alloca(addrlen+1);
3806 
3807     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3808     if (ret)
3809         return ret;
3810 
3811     return get_errno(safe_connect(sockfd, addr, addrlen));
3812 }
3813 
3814 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3815 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3816                                       int flags, int send)
3817 {
3818     abi_long ret, len;
3819     struct msghdr msg;
3820     abi_ulong count;
3821     struct iovec *vec;
3822     abi_ulong target_vec;
3823 
3824     if (msgp->msg_name) {
3825         msg.msg_namelen = tswap32(msgp->msg_namelen);
3826         msg.msg_name = alloca(msg.msg_namelen+1);
3827         ret = target_to_host_sockaddr(fd, msg.msg_name,
3828                                       tswapal(msgp->msg_name),
3829                                       msg.msg_namelen);
3830         if (ret == -TARGET_EFAULT) {
3831             /* For connected sockets msg_name and msg_namelen must
3832              * be ignored, so returning EFAULT immediately is wrong.
3833              * Instead, pass a bad msg_name to the host kernel, and
3834              * let it decide whether to return EFAULT or not.
3835              */
3836             msg.msg_name = (void *)-1;
3837         } else if (ret) {
3838             goto out2;
3839         }
3840     } else {
3841         msg.msg_name = NULL;
3842         msg.msg_namelen = 0;
3843     }
3844     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3845     msg.msg_control = alloca(msg.msg_controllen);
3846     msg.msg_flags = tswap32(msgp->msg_flags);
3847 
3848     count = tswapal(msgp->msg_iovlen);
3849     target_vec = tswapal(msgp->msg_iov);
3850 
3851     if (count > IOV_MAX) {
3852         /* sendrcvmsg returns a different errno for this condition than
3853          * readv/writev, so we must catch it here before lock_iovec() does.
3854          */
3855         ret = -TARGET_EMSGSIZE;
3856         goto out2;
3857     }
3858 
3859     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3860                      target_vec, count, send);
3861     if (vec == NULL) {
3862         ret = -host_to_target_errno(errno);
3863         goto out2;
3864     }
3865     msg.msg_iovlen = count;
3866     msg.msg_iov = vec;
3867 
3868     if (send) {
3869         if (fd_trans_target_to_host_data(fd)) {
3870             void *host_msg;
3871 
3872             host_msg = g_malloc(msg.msg_iov->iov_len);
3873             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3874             ret = fd_trans_target_to_host_data(fd)(host_msg,
3875                                                    msg.msg_iov->iov_len);
3876             if (ret >= 0) {
3877                 msg.msg_iov->iov_base = host_msg;
3878                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3879             }
3880             g_free(host_msg);
3881         } else {
3882             ret = target_to_host_cmsg(&msg, msgp);
3883             if (ret == 0) {
3884                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3885             }
3886         }
3887     } else {
3888         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3889         if (!is_error(ret)) {
3890             len = ret;
3891             if (fd_trans_host_to_target_data(fd)) {
3892                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3893                                                        len);
3894             } else {
3895                 ret = host_to_target_cmsg(msgp, &msg);
3896             }
3897             if (!is_error(ret)) {
3898                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3899                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3900                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3901                                     msg.msg_name, msg.msg_namelen);
3902                     if (ret) {
3903                         goto out;
3904                     }
3905                 }
3906 
3907                 ret = len;
3908             }
3909         }
3910     }
3911 
3912 out:
3913     unlock_iovec(vec, target_vec, count, !send);
3914 out2:
3915     return ret;
3916 }
3917 
3918 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3919                                int flags, int send)
3920 {
3921     abi_long ret;
3922     struct target_msghdr *msgp;
3923 
3924     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3925                           msgp,
3926                           target_msg,
3927                           send ? 1 : 0)) {
3928         return -TARGET_EFAULT;
3929     }
3930     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3931     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3932     return ret;
3933 }
3934 
3935 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3936  * so it might not have this *mmsg-specific flag either.
3937  */
3938 #ifndef MSG_WAITFORONE
3939 #define MSG_WAITFORONE 0x10000
3940 #endif
3941 
3942 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3943                                 unsigned int vlen, unsigned int flags,
3944                                 int send)
3945 {
3946     struct target_mmsghdr *mmsgp;
3947     abi_long ret = 0;
3948     int i;
3949 
3950     if (vlen > UIO_MAXIOV) {
3951         vlen = UIO_MAXIOV;
3952     }
3953 
3954     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3955     if (!mmsgp) {
3956         return -TARGET_EFAULT;
3957     }
3958 
3959     for (i = 0; i < vlen; i++) {
3960         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3961         if (is_error(ret)) {
3962             break;
3963         }
3964         mmsgp[i].msg_len = tswap32(ret);
3965         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3966         if (flags & MSG_WAITFORONE) {
3967             flags |= MSG_DONTWAIT;
3968         }
3969     }
3970 
3971     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3972 
3973     /* Return number of datagrams sent if we sent any at all;
3974      * otherwise return the error.
3975      */
3976     if (i) {
3977         return i;
3978     }
3979     return ret;
3980 }
3981 
3982 /* do_accept4() Must return target values and target errnos. */
3983 static abi_long do_accept4(int fd, abi_ulong target_addr,
3984                            abi_ulong target_addrlen_addr, int flags)
3985 {
3986     socklen_t addrlen;
3987     void *addr;
3988     abi_long ret;
3989     int host_flags;
3990 
3991     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3992 
3993     if (target_addr == 0) {
3994         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3995     }
3996 
3997     /* linux returns EINVAL if addrlen pointer is invalid */
3998     if (get_user_u32(addrlen, target_addrlen_addr))
3999         return -TARGET_EINVAL;
4000 
4001     if ((int)addrlen < 0) {
4002         return -TARGET_EINVAL;
4003     }
4004 
4005     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4006         return -TARGET_EINVAL;
4007 
4008     addr = alloca(addrlen);
4009 
4010     ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
4011     if (!is_error(ret)) {
4012         host_to_target_sockaddr(target_addr, addr, addrlen);
4013         if (put_user_u32(addrlen, target_addrlen_addr))
4014             ret = -TARGET_EFAULT;
4015     }
4016     return ret;
4017 }
4018 
4019 /* do_getpeername() Must return target values and target errnos. */
4020 static abi_long do_getpeername(int fd, abi_ulong target_addr,
4021                                abi_ulong target_addrlen_addr)
4022 {
4023     socklen_t addrlen;
4024     void *addr;
4025     abi_long ret;
4026 
4027     if (get_user_u32(addrlen, target_addrlen_addr))
4028         return -TARGET_EFAULT;
4029 
4030     if ((int)addrlen < 0) {
4031         return -TARGET_EINVAL;
4032     }
4033 
4034     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4035         return -TARGET_EFAULT;
4036 
4037     addr = alloca(addrlen);
4038 
4039     ret = get_errno(getpeername(fd, addr, &addrlen));
4040     if (!is_error(ret)) {
4041         host_to_target_sockaddr(target_addr, addr, addrlen);
4042         if (put_user_u32(addrlen, target_addrlen_addr))
4043             ret = -TARGET_EFAULT;
4044     }
4045     return ret;
4046 }
4047 
4048 /* do_getsockname() Must return target values and target errnos. */
4049 static abi_long do_getsockname(int fd, abi_ulong target_addr,
4050                                abi_ulong target_addrlen_addr)
4051 {
4052     socklen_t addrlen;
4053     void *addr;
4054     abi_long ret;
4055 
4056     if (get_user_u32(addrlen, target_addrlen_addr))
4057         return -TARGET_EFAULT;
4058 
4059     if ((int)addrlen < 0) {
4060         return -TARGET_EINVAL;
4061     }
4062 
4063     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4064         return -TARGET_EFAULT;
4065 
4066     addr = alloca(addrlen);
4067 
4068     ret = get_errno(getsockname(fd, addr, &addrlen));
4069     if (!is_error(ret)) {
4070         host_to_target_sockaddr(target_addr, addr, addrlen);
4071         if (put_user_u32(addrlen, target_addrlen_addr))
4072             ret = -TARGET_EFAULT;
4073     }
4074     return ret;
4075 }
4076 
4077 /* do_socketpair() Must return target values and target errnos. */
4078 static abi_long do_socketpair(int domain, int type, int protocol,
4079                               abi_ulong target_tab_addr)
4080 {
4081     int tab[2];
4082     abi_long ret;
4083 
4084     target_to_host_sock_type(&type);
4085 
4086     ret = get_errno(socketpair(domain, type, protocol, tab));
4087     if (!is_error(ret)) {
4088         if (put_user_s32(tab[0], target_tab_addr)
4089             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4090             ret = -TARGET_EFAULT;
4091     }
4092     return ret;
4093 }
4094 
4095 /* do_sendto() Must return target values and target errnos. */
4096 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4097                           abi_ulong target_addr, socklen_t addrlen)
4098 {
4099     void *addr;
4100     void *host_msg;
4101     void *copy_msg = NULL;
4102     abi_long ret;
4103 
4104     if ((int)addrlen < 0) {
4105         return -TARGET_EINVAL;
4106     }
4107 
4108     host_msg = lock_user(VERIFY_READ, msg, len, 1);
4109     if (!host_msg)
4110         return -TARGET_EFAULT;
4111     if (fd_trans_target_to_host_data(fd)) {
4112         copy_msg = host_msg;
4113         host_msg = g_malloc(len);
4114         memcpy(host_msg, copy_msg, len);
4115         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4116         if (ret < 0) {
4117             goto fail;
4118         }
4119     }
4120     if (target_addr) {
4121         addr = alloca(addrlen+1);
4122         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4123         if (ret) {
4124             goto fail;
4125         }
4126         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4127     } else {
4128         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4129     }
4130 fail:
4131     if (copy_msg) {
4132         g_free(host_msg);
4133         host_msg = copy_msg;
4134     }
4135     unlock_user(host_msg, msg, 0);
4136     return ret;
4137 }
4138 
4139 /* do_recvfrom() Must return target values and target errnos. */
4140 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4141                             abi_ulong target_addr,
4142                             abi_ulong target_addrlen)
4143 {
4144     socklen_t addrlen;
4145     void *addr;
4146     void *host_msg;
4147     abi_long ret;
4148 
4149     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4150     if (!host_msg)
4151         return -TARGET_EFAULT;
4152     if (target_addr) {
4153         if (get_user_u32(addrlen, target_addrlen)) {
4154             ret = -TARGET_EFAULT;
4155             goto fail;
4156         }
4157         if ((int)addrlen < 0) {
4158             ret = -TARGET_EINVAL;
4159             goto fail;
4160         }
4161         addr = alloca(addrlen);
4162         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4163                                       addr, &addrlen));
4164     } else {
4165         addr = NULL; /* To keep compiler quiet.  */
4166         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4167     }
4168     if (!is_error(ret)) {
4169         if (fd_trans_host_to_target_data(fd)) {
4170             ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4171         }
4172         if (target_addr) {
4173             host_to_target_sockaddr(target_addr, addr, addrlen);
4174             if (put_user_u32(addrlen, target_addrlen)) {
4175                 ret = -TARGET_EFAULT;
4176                 goto fail;
4177             }
4178         }
4179         unlock_user(host_msg, msg, len);
4180     } else {
4181 fail:
4182         unlock_user(host_msg, msg, 0);
4183     }
4184     return ret;
4185 }
4186 
4187 #ifdef TARGET_NR_socketcall
4188 /* do_socketcall() must return target values and target errnos. */
4189 static abi_long do_socketcall(int num, abi_ulong vptr)
4190 {
4191     static const unsigned nargs[] = { /* number of arguments per operation */
4192         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
4193         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
4194         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
4195         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
4196         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
4197         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4198         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4199         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
4200         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
4201         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
4202         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
4203         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
4204         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
4205         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
4206         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
4207         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
4208         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
4209         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
4210         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
4211         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
4212     };
4213     abi_long a[6]; /* max 6 args */
4214     unsigned i;
4215 
4216     /* check the range of the first argument num */
4217     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4218     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4219         return -TARGET_EINVAL;
4220     }
4221     /* ensure we have space for args */
4222     if (nargs[num] > ARRAY_SIZE(a)) {
4223         return -TARGET_EINVAL;
4224     }
4225     /* collect the arguments in a[] according to nargs[] */
4226     for (i = 0; i < nargs[num]; ++i) {
4227         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4228             return -TARGET_EFAULT;
4229         }
4230     }
4231     /* now when we have the args, invoke the appropriate underlying function */
4232     switch (num) {
4233     case TARGET_SYS_SOCKET: /* domain, type, protocol */
4234         return do_socket(a[0], a[1], a[2]);
4235     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4236         return do_bind(a[0], a[1], a[2]);
4237     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4238         return do_connect(a[0], a[1], a[2]);
4239     case TARGET_SYS_LISTEN: /* sockfd, backlog */
4240         return get_errno(listen(a[0], a[1]));
4241     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4242         return do_accept4(a[0], a[1], a[2], 0);
4243     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4244         return do_getsockname(a[0], a[1], a[2]);
4245     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4246         return do_getpeername(a[0], a[1], a[2]);
4247     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4248         return do_socketpair(a[0], a[1], a[2], a[3]);
4249     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4250         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4251     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4252         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4253     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4254         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4255     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4256         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4257     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4258         return get_errno(shutdown(a[0], a[1]));
4259     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4260         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4261     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4262         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4263     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4264         return do_sendrecvmsg(a[0], a[1], a[2], 1);
4265     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4266         return do_sendrecvmsg(a[0], a[1], a[2], 0);
4267     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4268         return do_accept4(a[0], a[1], a[2], a[3]);
4269     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4270         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4271     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4272         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4273     default:
4274         gemu_log("Unsupported socketcall: %d\n", num);
4275         return -TARGET_EINVAL;
4276     }
4277 }
4278 #endif
4279 
4280 #define N_SHM_REGIONS	32
4281 
4282 static struct shm_region {
4283     abi_ulong start;
4284     abi_ulong size;
4285     bool in_use;
4286 } shm_regions[N_SHM_REGIONS];
4287 
4288 #ifndef TARGET_SEMID64_DS
4289 /* asm-generic version of this struct */
4290 struct target_semid64_ds
4291 {
4292   struct target_ipc_perm sem_perm;
4293   abi_ulong sem_otime;
4294 #if TARGET_ABI_BITS == 32
4295   abi_ulong __unused1;
4296 #endif
4297   abi_ulong sem_ctime;
4298 #if TARGET_ABI_BITS == 32
4299   abi_ulong __unused2;
4300 #endif
4301   abi_ulong sem_nsems;
4302   abi_ulong __unused3;
4303   abi_ulong __unused4;
4304 };
4305 #endif
4306 
4307 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4308                                                abi_ulong target_addr)
4309 {
4310     struct target_ipc_perm *target_ip;
4311     struct target_semid64_ds *target_sd;
4312 
4313     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4314         return -TARGET_EFAULT;
4315     target_ip = &(target_sd->sem_perm);
4316     host_ip->__key = tswap32(target_ip->__key);
4317     host_ip->uid = tswap32(target_ip->uid);
4318     host_ip->gid = tswap32(target_ip->gid);
4319     host_ip->cuid = tswap32(target_ip->cuid);
4320     host_ip->cgid = tswap32(target_ip->cgid);
4321 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4322     host_ip->mode = tswap32(target_ip->mode);
4323 #else
4324     host_ip->mode = tswap16(target_ip->mode);
4325 #endif
4326 #if defined(TARGET_PPC)
4327     host_ip->__seq = tswap32(target_ip->__seq);
4328 #else
4329     host_ip->__seq = tswap16(target_ip->__seq);
4330 #endif
4331     unlock_user_struct(target_sd, target_addr, 0);
4332     return 0;
4333 }
4334 
4335 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4336                                                struct ipc_perm *host_ip)
4337 {
4338     struct target_ipc_perm *target_ip;
4339     struct target_semid64_ds *target_sd;
4340 
4341     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4342         return -TARGET_EFAULT;
4343     target_ip = &(target_sd->sem_perm);
4344     target_ip->__key = tswap32(host_ip->__key);
4345     target_ip->uid = tswap32(host_ip->uid);
4346     target_ip->gid = tswap32(host_ip->gid);
4347     target_ip->cuid = tswap32(host_ip->cuid);
4348     target_ip->cgid = tswap32(host_ip->cgid);
4349 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4350     target_ip->mode = tswap32(host_ip->mode);
4351 #else
4352     target_ip->mode = tswap16(host_ip->mode);
4353 #endif
4354 #if defined(TARGET_PPC)
4355     target_ip->__seq = tswap32(host_ip->__seq);
4356 #else
4357     target_ip->__seq = tswap16(host_ip->__seq);
4358 #endif
4359     unlock_user_struct(target_sd, target_addr, 1);
4360     return 0;
4361 }
4362 
4363 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4364                                                abi_ulong target_addr)
4365 {
4366     struct target_semid64_ds *target_sd;
4367 
4368     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4369         return -TARGET_EFAULT;
4370     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4371         return -TARGET_EFAULT;
4372     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4373     host_sd->sem_otime = tswapal(target_sd->sem_otime);
4374     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4375     unlock_user_struct(target_sd, target_addr, 0);
4376     return 0;
4377 }
4378 
4379 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4380                                                struct semid_ds *host_sd)
4381 {
4382     struct target_semid64_ds *target_sd;
4383 
4384     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4385         return -TARGET_EFAULT;
4386     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4387         return -TARGET_EFAULT;
4388     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4389     target_sd->sem_otime = tswapal(host_sd->sem_otime);
4390     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4391     unlock_user_struct(target_sd, target_addr, 1);
4392     return 0;
4393 }
4394 
4395 struct target_seminfo {
4396     int semmap;
4397     int semmni;
4398     int semmns;
4399     int semmnu;
4400     int semmsl;
4401     int semopm;
4402     int semume;
4403     int semusz;
4404     int semvmx;
4405     int semaem;
4406 };
4407 
4408 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4409                                               struct seminfo *host_seminfo)
4410 {
4411     struct target_seminfo *target_seminfo;
4412     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4413         return -TARGET_EFAULT;
4414     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4415     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4416     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4417     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4418     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4419     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4420     __put_user(host_seminfo->semume, &target_seminfo->semume);
4421     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4422     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4423     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4424     unlock_user_struct(target_seminfo, target_addr, 1);
4425     return 0;
4426 }
4427 
4428 union semun {
4429 	int val;
4430 	struct semid_ds *buf;
4431 	unsigned short *array;
4432 	struct seminfo *__buf;
4433 };
4434 
4435 union target_semun {
4436 	int val;
4437 	abi_ulong buf;
4438 	abi_ulong array;
4439 	abi_ulong __buf;
4440 };
4441 
4442 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4443                                                abi_ulong target_addr)
4444 {
4445     int nsems;
4446     unsigned short *array;
4447     union semun semun;
4448     struct semid_ds semid_ds;
4449     int i, ret;
4450 
4451     semun.buf = &semid_ds;
4452 
4453     ret = semctl(semid, 0, IPC_STAT, semun);
4454     if (ret == -1)
4455         return get_errno(ret);
4456 
4457     nsems = semid_ds.sem_nsems;
4458 
4459     *host_array = g_try_new(unsigned short, nsems);
4460     if (!*host_array) {
4461         return -TARGET_ENOMEM;
4462     }
4463     array = lock_user(VERIFY_READ, target_addr,
4464                       nsems*sizeof(unsigned short), 1);
4465     if (!array) {
4466         g_free(*host_array);
4467         return -TARGET_EFAULT;
4468     }
4469 
4470     for(i=0; i<nsems; i++) {
4471         __get_user((*host_array)[i], &array[i]);
4472     }
4473     unlock_user(array, target_addr, 0);
4474 
4475     return 0;
4476 }
4477 
4478 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4479                                                unsigned short **host_array)
4480 {
4481     int nsems;
4482     unsigned short *array;
4483     union semun semun;
4484     struct semid_ds semid_ds;
4485     int i, ret;
4486 
4487     semun.buf = &semid_ds;
4488 
4489     ret = semctl(semid, 0, IPC_STAT, semun);
4490     if (ret == -1)
4491         return get_errno(ret);
4492 
4493     nsems = semid_ds.sem_nsems;
4494 
4495     array = lock_user(VERIFY_WRITE, target_addr,
4496                       nsems*sizeof(unsigned short), 0);
4497     if (!array)
4498         return -TARGET_EFAULT;
4499 
4500     for(i=0; i<nsems; i++) {
4501         __put_user((*host_array)[i], &array[i]);
4502     }
4503     g_free(*host_array);
4504     unlock_user(array, target_addr, 1);
4505 
4506     return 0;
4507 }
4508 
4509 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4510                                  abi_ulong target_arg)
4511 {
4512     union target_semun target_su = { .buf = target_arg };
4513     union semun arg;
4514     struct semid_ds dsarg;
4515     unsigned short *array = NULL;
4516     struct seminfo seminfo;
4517     abi_long ret = -TARGET_EINVAL;
4518     abi_long err;
4519     cmd &= 0xff;
4520 
4521     switch( cmd ) {
4522 	case GETVAL:
4523 	case SETVAL:
4524             /* In 64 bit cross-endian situations, we will erroneously pick up
4525              * the wrong half of the union for the "val" element.  To rectify
4526              * this, the entire 8-byte structure is byteswapped, followed by
4527 	     * a swap of the 4 byte val field. In other cases, the data is
4528 	     * already in proper host byte order. */
4529 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4530 		target_su.buf = tswapal(target_su.buf);
4531 		arg.val = tswap32(target_su.val);
4532 	    } else {
4533 		arg.val = target_su.val;
4534 	    }
4535             ret = get_errno(semctl(semid, semnum, cmd, arg));
4536             break;
4537 	case GETALL:
4538 	case SETALL:
4539             err = target_to_host_semarray(semid, &array, target_su.array);
4540             if (err)
4541                 return err;
4542             arg.array = array;
4543             ret = get_errno(semctl(semid, semnum, cmd, arg));
4544             err = host_to_target_semarray(semid, target_su.array, &array);
4545             if (err)
4546                 return err;
4547             break;
4548 	case IPC_STAT:
4549 	case IPC_SET:
4550 	case SEM_STAT:
4551             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4552             if (err)
4553                 return err;
4554             arg.buf = &dsarg;
4555             ret = get_errno(semctl(semid, semnum, cmd, arg));
4556             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4557             if (err)
4558                 return err;
4559             break;
4560 	case IPC_INFO:
4561 	case SEM_INFO:
4562             arg.__buf = &seminfo;
4563             ret = get_errno(semctl(semid, semnum, cmd, arg));
4564             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4565             if (err)
4566                 return err;
4567             break;
4568 	case IPC_RMID:
4569 	case GETPID:
4570 	case GETNCNT:
4571 	case GETZCNT:
4572             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4573             break;
4574     }
4575 
4576     return ret;
4577 }
4578 
4579 struct target_sembuf {
4580     unsigned short sem_num;
4581     short sem_op;
4582     short sem_flg;
4583 };
4584 
4585 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4586                                              abi_ulong target_addr,
4587                                              unsigned nsops)
4588 {
4589     struct target_sembuf *target_sembuf;
4590     int i;
4591 
4592     target_sembuf = lock_user(VERIFY_READ, target_addr,
4593                               nsops*sizeof(struct target_sembuf), 1);
4594     if (!target_sembuf)
4595         return -TARGET_EFAULT;
4596 
4597     for(i=0; i<nsops; i++) {
4598         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4599         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4600         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4601     }
4602 
4603     unlock_user(target_sembuf, target_addr, 0);
4604 
4605     return 0;
4606 }
4607 
4608 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4609 {
4610     struct sembuf sops[nsops];
4611 
4612     if (target_to_host_sembuf(sops, ptr, nsops))
4613         return -TARGET_EFAULT;
4614 
4615     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4616 }
4617 
4618 struct target_msqid_ds
4619 {
4620     struct target_ipc_perm msg_perm;
4621     abi_ulong msg_stime;
4622 #if TARGET_ABI_BITS == 32
4623     abi_ulong __unused1;
4624 #endif
4625     abi_ulong msg_rtime;
4626 #if TARGET_ABI_BITS == 32
4627     abi_ulong __unused2;
4628 #endif
4629     abi_ulong msg_ctime;
4630 #if TARGET_ABI_BITS == 32
4631     abi_ulong __unused3;
4632 #endif
4633     abi_ulong __msg_cbytes;
4634     abi_ulong msg_qnum;
4635     abi_ulong msg_qbytes;
4636     abi_ulong msg_lspid;
4637     abi_ulong msg_lrpid;
4638     abi_ulong __unused4;
4639     abi_ulong __unused5;
4640 };
4641 
4642 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4643                                                abi_ulong target_addr)
4644 {
4645     struct target_msqid_ds *target_md;
4646 
4647     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4648         return -TARGET_EFAULT;
4649     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4650         return -TARGET_EFAULT;
4651     host_md->msg_stime = tswapal(target_md->msg_stime);
4652     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4653     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4654     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4655     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4656     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4657     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4658     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4659     unlock_user_struct(target_md, target_addr, 0);
4660     return 0;
4661 }
4662 
4663 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4664                                                struct msqid_ds *host_md)
4665 {
4666     struct target_msqid_ds *target_md;
4667 
4668     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4669         return -TARGET_EFAULT;
4670     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4671         return -TARGET_EFAULT;
4672     target_md->msg_stime = tswapal(host_md->msg_stime);
4673     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4674     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4675     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4676     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4677     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4678     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4679     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4680     unlock_user_struct(target_md, target_addr, 1);
4681     return 0;
4682 }
4683 
4684 struct target_msginfo {
4685     int msgpool;
4686     int msgmap;
4687     int msgmax;
4688     int msgmnb;
4689     int msgmni;
4690     int msgssz;
4691     int msgtql;
4692     unsigned short int msgseg;
4693 };
4694 
4695 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4696                                               struct msginfo *host_msginfo)
4697 {
4698     struct target_msginfo *target_msginfo;
4699     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4700         return -TARGET_EFAULT;
4701     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4702     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4703     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4704     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4705     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4706     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4707     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4708     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4709     unlock_user_struct(target_msginfo, target_addr, 1);
4710     return 0;
4711 }
4712 
4713 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4714 {
4715     struct msqid_ds dsarg;
4716     struct msginfo msginfo;
4717     abi_long ret = -TARGET_EINVAL;
4718 
4719     cmd &= 0xff;
4720 
4721     switch (cmd) {
4722     case IPC_STAT:
4723     case IPC_SET:
4724     case MSG_STAT:
4725         if (target_to_host_msqid_ds(&dsarg,ptr))
4726             return -TARGET_EFAULT;
4727         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4728         if (host_to_target_msqid_ds(ptr,&dsarg))
4729             return -TARGET_EFAULT;
4730         break;
4731     case IPC_RMID:
4732         ret = get_errno(msgctl(msgid, cmd, NULL));
4733         break;
4734     case IPC_INFO:
4735     case MSG_INFO:
4736         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4737         if (host_to_target_msginfo(ptr, &msginfo))
4738             return -TARGET_EFAULT;
4739         break;
4740     }
4741 
4742     return ret;
4743 }
4744 
4745 struct target_msgbuf {
4746     abi_long mtype;
4747     char	mtext[1];
4748 };
4749 
4750 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4751                                  ssize_t msgsz, int msgflg)
4752 {
4753     struct target_msgbuf *target_mb;
4754     struct msgbuf *host_mb;
4755     abi_long ret = 0;
4756 
4757     if (msgsz < 0) {
4758         return -TARGET_EINVAL;
4759     }
4760 
4761     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4762         return -TARGET_EFAULT;
4763     host_mb = g_try_malloc(msgsz + sizeof(long));
4764     if (!host_mb) {
4765         unlock_user_struct(target_mb, msgp, 0);
4766         return -TARGET_ENOMEM;
4767     }
4768     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4769     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4770     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4771     g_free(host_mb);
4772     unlock_user_struct(target_mb, msgp, 0);
4773 
4774     return ret;
4775 }
4776 
4777 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4778                                  ssize_t msgsz, abi_long msgtyp,
4779                                  int msgflg)
4780 {
4781     struct target_msgbuf *target_mb;
4782     char *target_mtext;
4783     struct msgbuf *host_mb;
4784     abi_long ret = 0;
4785 
4786     if (msgsz < 0) {
4787         return -TARGET_EINVAL;
4788     }
4789 
4790     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4791         return -TARGET_EFAULT;
4792 
4793     host_mb = g_try_malloc(msgsz + sizeof(long));
4794     if (!host_mb) {
4795         ret = -TARGET_ENOMEM;
4796         goto end;
4797     }
4798     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4799 
4800     if (ret > 0) {
4801         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4802         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4803         if (!target_mtext) {
4804             ret = -TARGET_EFAULT;
4805             goto end;
4806         }
4807         memcpy(target_mb->mtext, host_mb->mtext, ret);
4808         unlock_user(target_mtext, target_mtext_addr, ret);
4809     }
4810 
4811     target_mb->mtype = tswapal(host_mb->mtype);
4812 
4813 end:
4814     if (target_mb)
4815         unlock_user_struct(target_mb, msgp, 1);
4816     g_free(host_mb);
4817     return ret;
4818 }
4819 
4820 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4821                                                abi_ulong target_addr)
4822 {
4823     struct target_shmid_ds *target_sd;
4824 
4825     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4826         return -TARGET_EFAULT;
4827     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4828         return -TARGET_EFAULT;
4829     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4830     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4831     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4832     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4833     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4834     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4835     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4836     unlock_user_struct(target_sd, target_addr, 0);
4837     return 0;
4838 }
4839 
4840 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4841                                                struct shmid_ds *host_sd)
4842 {
4843     struct target_shmid_ds *target_sd;
4844 
4845     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4846         return -TARGET_EFAULT;
4847     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4848         return -TARGET_EFAULT;
4849     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4850     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4851     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4852     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4853     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4854     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4855     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4856     unlock_user_struct(target_sd, target_addr, 1);
4857     return 0;
4858 }
4859 
4860 struct  target_shminfo {
4861     abi_ulong shmmax;
4862     abi_ulong shmmin;
4863     abi_ulong shmmni;
4864     abi_ulong shmseg;
4865     abi_ulong shmall;
4866 };
4867 
4868 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4869                                               struct shminfo *host_shminfo)
4870 {
4871     struct target_shminfo *target_shminfo;
4872     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4873         return -TARGET_EFAULT;
4874     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4875     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4876     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4877     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4878     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4879     unlock_user_struct(target_shminfo, target_addr, 1);
4880     return 0;
4881 }
4882 
4883 struct target_shm_info {
4884     int used_ids;
4885     abi_ulong shm_tot;
4886     abi_ulong shm_rss;
4887     abi_ulong shm_swp;
4888     abi_ulong swap_attempts;
4889     abi_ulong swap_successes;
4890 };
4891 
4892 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4893                                                struct shm_info *host_shm_info)
4894 {
4895     struct target_shm_info *target_shm_info;
4896     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4897         return -TARGET_EFAULT;
4898     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4899     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4900     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4901     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4902     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4903     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4904     unlock_user_struct(target_shm_info, target_addr, 1);
4905     return 0;
4906 }
4907 
4908 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4909 {
4910     struct shmid_ds dsarg;
4911     struct shminfo shminfo;
4912     struct shm_info shm_info;
4913     abi_long ret = -TARGET_EINVAL;
4914 
4915     cmd &= 0xff;
4916 
4917     switch(cmd) {
4918     case IPC_STAT:
4919     case IPC_SET:
4920     case SHM_STAT:
4921         if (target_to_host_shmid_ds(&dsarg, buf))
4922             return -TARGET_EFAULT;
4923         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4924         if (host_to_target_shmid_ds(buf, &dsarg))
4925             return -TARGET_EFAULT;
4926         break;
4927     case IPC_INFO:
4928         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4929         if (host_to_target_shminfo(buf, &shminfo))
4930             return -TARGET_EFAULT;
4931         break;
4932     case SHM_INFO:
4933         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4934         if (host_to_target_shm_info(buf, &shm_info))
4935             return -TARGET_EFAULT;
4936         break;
4937     case IPC_RMID:
4938     case SHM_LOCK:
4939     case SHM_UNLOCK:
4940         ret = get_errno(shmctl(shmid, cmd, NULL));
4941         break;
4942     }
4943 
4944     return ret;
4945 }
4946 
4947 #ifndef TARGET_FORCE_SHMLBA
4948 /* For most architectures, SHMLBA is the same as the page size;
4949  * some architectures have larger values, in which case they should
4950  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4951  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4952  * and defining its own value for SHMLBA.
4953  *
4954  * The kernel also permits SHMLBA to be set by the architecture to a
4955  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4956  * this means that addresses are rounded to the large size if
4957  * SHM_RND is set but addresses not aligned to that size are not rejected
4958  * as long as they are at least page-aligned. Since the only architecture
4959  * which uses this is ia64 this code doesn't provide for that oddity.
4960  */
4961 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4962 {
4963     return TARGET_PAGE_SIZE;
4964 }
4965 #endif
4966 
4967 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4968                                  int shmid, abi_ulong shmaddr, int shmflg)
4969 {
4970     abi_long raddr;
4971     void *host_raddr;
4972     struct shmid_ds shm_info;
4973     int i,ret;
4974     abi_ulong shmlba;
4975 
4976     /* find out the length of the shared memory segment */
4977     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4978     if (is_error(ret)) {
4979         /* can't get length, bail out */
4980         return ret;
4981     }
4982 
4983     shmlba = target_shmlba(cpu_env);
4984 
4985     if (shmaddr & (shmlba - 1)) {
4986         if (shmflg & SHM_RND) {
4987             shmaddr &= ~(shmlba - 1);
4988         } else {
4989             return -TARGET_EINVAL;
4990         }
4991     }
4992     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4993         return -TARGET_EINVAL;
4994     }
4995 
4996     mmap_lock();
4997 
4998     if (shmaddr)
4999         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
5000     else {
5001         abi_ulong mmap_start;
5002 
5003         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
5004 
5005         if (mmap_start == -1) {
5006             errno = ENOMEM;
5007             host_raddr = (void *)-1;
5008         } else
5009             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
5010     }
5011 
5012     if (host_raddr == (void *)-1) {
5013         mmap_unlock();
5014         return get_errno((long)host_raddr);
5015     }
5016     raddr=h2g((unsigned long)host_raddr);
5017 
5018     page_set_flags(raddr, raddr + shm_info.shm_segsz,
5019                    PAGE_VALID | PAGE_READ |
5020                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
5021 
5022     for (i = 0; i < N_SHM_REGIONS; i++) {
5023         if (!shm_regions[i].in_use) {
5024             shm_regions[i].in_use = true;
5025             shm_regions[i].start = raddr;
5026             shm_regions[i].size = shm_info.shm_segsz;
5027             break;
5028         }
5029     }
5030 
5031     mmap_unlock();
5032     return raddr;
5033 
5034 }
5035 
5036 static inline abi_long do_shmdt(abi_ulong shmaddr)
5037 {
5038     int i;
5039     abi_long rv;
5040 
5041     mmap_lock();
5042 
5043     for (i = 0; i < N_SHM_REGIONS; ++i) {
5044         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
5045             shm_regions[i].in_use = false;
5046             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
5047             break;
5048         }
5049     }
5050     rv = get_errno(shmdt(g2h(shmaddr)));
5051 
5052     mmap_unlock();
5053 
5054     return rv;
5055 }
5056 
5057 #ifdef TARGET_NR_ipc
5058 /* ??? This only works with linear mappings.  */
5059 /* do_ipc() must return target values and target errnos. */
5060 static abi_long do_ipc(CPUArchState *cpu_env,
5061                        unsigned int call, abi_long first,
5062                        abi_long second, abi_long third,
5063                        abi_long ptr, abi_long fifth)
5064 {
5065     int version;
5066     abi_long ret = 0;
5067 
5068     version = call >> 16;
5069     call &= 0xffff;
5070 
5071     switch (call) {
5072     case IPCOP_semop:
5073         ret = do_semop(first, ptr, second);
5074         break;
5075 
5076     case IPCOP_semget:
5077         ret = get_errno(semget(first, second, third));
5078         break;
5079 
5080     case IPCOP_semctl: {
5081         /* The semun argument to semctl is passed by value, so dereference the
5082          * ptr argument. */
5083         abi_ulong atptr;
5084         get_user_ual(atptr, ptr);
5085         ret = do_semctl(first, second, third, atptr);
5086         break;
5087     }
5088 
5089     case IPCOP_msgget:
5090         ret = get_errno(msgget(first, second));
5091         break;
5092 
5093     case IPCOP_msgsnd:
5094         ret = do_msgsnd(first, ptr, second, third);
5095         break;
5096 
5097     case IPCOP_msgctl:
5098         ret = do_msgctl(first, second, ptr);
5099         break;
5100 
5101     case IPCOP_msgrcv:
5102         switch (version) {
5103         case 0:
5104             {
5105                 struct target_ipc_kludge {
5106                     abi_long msgp;
5107                     abi_long msgtyp;
5108                 } *tmp;
5109 
5110                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5111                     ret = -TARGET_EFAULT;
5112                     break;
5113                 }
5114 
5115                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5116 
5117                 unlock_user_struct(tmp, ptr, 0);
5118                 break;
5119             }
5120         default:
5121             ret = do_msgrcv(first, ptr, second, fifth, third);
5122         }
5123         break;
5124 
5125     case IPCOP_shmat:
5126         switch (version) {
5127         default:
5128         {
5129             abi_ulong raddr;
5130             raddr = do_shmat(cpu_env, first, ptr, second);
5131             if (is_error(raddr))
5132                 return get_errno(raddr);
5133             if (put_user_ual(raddr, third))
5134                 return -TARGET_EFAULT;
5135             break;
5136         }
5137         case 1:
5138             ret = -TARGET_EINVAL;
5139             break;
5140         }
5141 	break;
5142     case IPCOP_shmdt:
5143         ret = do_shmdt(ptr);
5144 	break;
5145 
5146     case IPCOP_shmget:
5147 	/* IPC_* flag values are the same on all linux platforms */
5148 	ret = get_errno(shmget(first, second, third));
5149 	break;
5150 
5151 	/* IPC_* and SHM_* command values are the same on all linux platforms */
5152     case IPCOP_shmctl:
5153         ret = do_shmctl(first, second, ptr);
5154         break;
5155     default:
5156 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5157 	ret = -TARGET_ENOSYS;
5158 	break;
5159     }
5160     return ret;
5161 }
5162 #endif
5163 
5164 /* kernel structure types definitions */
5165 
5166 #define STRUCT(name, ...) STRUCT_ ## name,
5167 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5168 enum {
5169 #include "syscall_types.h"
5170 STRUCT_MAX
5171 };
5172 #undef STRUCT
5173 #undef STRUCT_SPECIAL
5174 
5175 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
5176 #define STRUCT_SPECIAL(name)
5177 #include "syscall_types.h"
5178 #undef STRUCT
5179 #undef STRUCT_SPECIAL
5180 
5181 typedef struct IOCTLEntry IOCTLEntry;
5182 
5183 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5184                              int fd, int cmd, abi_long arg);
5185 
5186 struct IOCTLEntry {
5187     int target_cmd;
5188     unsigned int host_cmd;
5189     const char *name;
5190     int access;
5191     do_ioctl_fn *do_ioctl;
5192     const argtype arg_type[5];
5193 };
5194 
5195 #define IOC_R 0x0001
5196 #define IOC_W 0x0002
5197 #define IOC_RW (IOC_R | IOC_W)
5198 
5199 #define MAX_STRUCT_SIZE 4096
5200 
5201 #ifdef CONFIG_FIEMAP
5202 /* So fiemap access checks don't overflow on 32 bit systems.
5203  * This is very slightly smaller than the limit imposed by
5204  * the underlying kernel.
5205  */
5206 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
5207                             / sizeof(struct fiemap_extent))
5208 
5209 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5210                                        int fd, int cmd, abi_long arg)
5211 {
5212     /* The parameter for this ioctl is a struct fiemap followed
5213      * by an array of struct fiemap_extent whose size is set
5214      * in fiemap->fm_extent_count. The array is filled in by the
5215      * ioctl.
5216      */
5217     int target_size_in, target_size_out;
5218     struct fiemap *fm;
5219     const argtype *arg_type = ie->arg_type;
5220     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5221     void *argptr, *p;
5222     abi_long ret;
5223     int i, extent_size = thunk_type_size(extent_arg_type, 0);
5224     uint32_t outbufsz;
5225     int free_fm = 0;
5226 
5227     assert(arg_type[0] == TYPE_PTR);
5228     assert(ie->access == IOC_RW);
5229     arg_type++;
5230     target_size_in = thunk_type_size(arg_type, 0);
5231     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5232     if (!argptr) {
5233         return -TARGET_EFAULT;
5234     }
5235     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5236     unlock_user(argptr, arg, 0);
5237     fm = (struct fiemap *)buf_temp;
5238     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5239         return -TARGET_EINVAL;
5240     }
5241 
5242     outbufsz = sizeof (*fm) +
5243         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5244 
5245     if (outbufsz > MAX_STRUCT_SIZE) {
5246         /* We can't fit all the extents into the fixed size buffer.
5247          * Allocate one that is large enough and use it instead.
5248          */
5249         fm = g_try_malloc(outbufsz);
5250         if (!fm) {
5251             return -TARGET_ENOMEM;
5252         }
5253         memcpy(fm, buf_temp, sizeof(struct fiemap));
5254         free_fm = 1;
5255     }
5256     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5257     if (!is_error(ret)) {
5258         target_size_out = target_size_in;
5259         /* An extent_count of 0 means we were only counting the extents
5260          * so there are no structs to copy
5261          */
5262         if (fm->fm_extent_count != 0) {
5263             target_size_out += fm->fm_mapped_extents * extent_size;
5264         }
5265         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5266         if (!argptr) {
5267             ret = -TARGET_EFAULT;
5268         } else {
5269             /* Convert the struct fiemap */
5270             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5271             if (fm->fm_extent_count != 0) {
5272                 p = argptr + target_size_in;
5273                 /* ...and then all the struct fiemap_extents */
5274                 for (i = 0; i < fm->fm_mapped_extents; i++) {
5275                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5276                                   THUNK_TARGET);
5277                     p += extent_size;
5278                 }
5279             }
5280             unlock_user(argptr, arg, target_size_out);
5281         }
5282     }
5283     if (free_fm) {
5284         g_free(fm);
5285     }
5286     return ret;
5287 }
5288 #endif
5289 
5290 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5291                                 int fd, int cmd, abi_long arg)
5292 {
5293     const argtype *arg_type = ie->arg_type;
5294     int target_size;
5295     void *argptr;
5296     int ret;
5297     struct ifconf *host_ifconf;
5298     uint32_t outbufsz;
5299     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5300     int target_ifreq_size;
5301     int nb_ifreq;
5302     int free_buf = 0;
5303     int i;
5304     int target_ifc_len;
5305     abi_long target_ifc_buf;
5306     int host_ifc_len;
5307     char *host_ifc_buf;
5308 
5309     assert(arg_type[0] == TYPE_PTR);
5310     assert(ie->access == IOC_RW);
5311 
5312     arg_type++;
5313     target_size = thunk_type_size(arg_type, 0);
5314 
5315     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5316     if (!argptr)
5317         return -TARGET_EFAULT;
5318     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5319     unlock_user(argptr, arg, 0);
5320 
5321     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5322     target_ifc_len = host_ifconf->ifc_len;
5323     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5324 
5325     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5326     nb_ifreq = target_ifc_len / target_ifreq_size;
5327     host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5328 
5329     outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5330     if (outbufsz > MAX_STRUCT_SIZE) {
5331         /* We can't fit all the extents into the fixed size buffer.
5332          * Allocate one that is large enough and use it instead.
5333          */
5334         host_ifconf = malloc(outbufsz);
5335         if (!host_ifconf) {
5336             return -TARGET_ENOMEM;
5337         }
5338         memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5339         free_buf = 1;
5340     }
5341     host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5342 
5343     host_ifconf->ifc_len = host_ifc_len;
5344     host_ifconf->ifc_buf = host_ifc_buf;
5345 
5346     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5347     if (!is_error(ret)) {
5348 	/* convert host ifc_len to target ifc_len */
5349 
5350         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5351         target_ifc_len = nb_ifreq * target_ifreq_size;
5352         host_ifconf->ifc_len = target_ifc_len;
5353 
5354 	/* restore target ifc_buf */
5355 
5356         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5357 
5358 	/* copy struct ifconf to target user */
5359 
5360         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5361         if (!argptr)
5362             return -TARGET_EFAULT;
5363         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5364         unlock_user(argptr, arg, target_size);
5365 
5366 	/* copy ifreq[] to target user */
5367 
5368         argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5369         for (i = 0; i < nb_ifreq ; i++) {
5370             thunk_convert(argptr + i * target_ifreq_size,
5371                           host_ifc_buf + i * sizeof(struct ifreq),
5372                           ifreq_arg_type, THUNK_TARGET);
5373         }
5374         unlock_user(argptr, target_ifc_buf, target_ifc_len);
5375     }
5376 
5377     if (free_buf) {
5378         free(host_ifconf);
5379     }
5380 
5381     return ret;
5382 }
5383 
5384 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5385                             int cmd, abi_long arg)
5386 {
5387     void *argptr;
5388     struct dm_ioctl *host_dm;
5389     abi_long guest_data;
5390     uint32_t guest_data_size;
5391     int target_size;
5392     const argtype *arg_type = ie->arg_type;
5393     abi_long ret;
5394     void *big_buf = NULL;
5395     char *host_data;
5396 
5397     arg_type++;
5398     target_size = thunk_type_size(arg_type, 0);
5399     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5400     if (!argptr) {
5401         ret = -TARGET_EFAULT;
5402         goto out;
5403     }
5404     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5405     unlock_user(argptr, arg, 0);
5406 
5407     /* buf_temp is too small, so fetch things into a bigger buffer */
5408     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5409     memcpy(big_buf, buf_temp, target_size);
5410     buf_temp = big_buf;
5411     host_dm = big_buf;
5412 
5413     guest_data = arg + host_dm->data_start;
5414     if ((guest_data - arg) < 0) {
5415         ret = -TARGET_EINVAL;
5416         goto out;
5417     }
5418     guest_data_size = host_dm->data_size - host_dm->data_start;
5419     host_data = (char*)host_dm + host_dm->data_start;
5420 
5421     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5422     if (!argptr) {
5423         ret = -TARGET_EFAULT;
5424         goto out;
5425     }
5426 
5427     switch (ie->host_cmd) {
5428     case DM_REMOVE_ALL:
5429     case DM_LIST_DEVICES:
5430     case DM_DEV_CREATE:
5431     case DM_DEV_REMOVE:
5432     case DM_DEV_SUSPEND:
5433     case DM_DEV_STATUS:
5434     case DM_DEV_WAIT:
5435     case DM_TABLE_STATUS:
5436     case DM_TABLE_CLEAR:
5437     case DM_TABLE_DEPS:
5438     case DM_LIST_VERSIONS:
5439         /* no input data */
5440         break;
5441     case DM_DEV_RENAME:
5442     case DM_DEV_SET_GEOMETRY:
5443         /* data contains only strings */
5444         memcpy(host_data, argptr, guest_data_size);
5445         break;
5446     case DM_TARGET_MSG:
5447         memcpy(host_data, argptr, guest_data_size);
5448         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5449         break;
5450     case DM_TABLE_LOAD:
5451     {
5452         void *gspec = argptr;
5453         void *cur_data = host_data;
5454         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5455         int spec_size = thunk_type_size(arg_type, 0);
5456         int i;
5457 
5458         for (i = 0; i < host_dm->target_count; i++) {
5459             struct dm_target_spec *spec = cur_data;
5460             uint32_t next;
5461             int slen;
5462 
5463             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5464             slen = strlen((char*)gspec + spec_size) + 1;
5465             next = spec->next;
5466             spec->next = sizeof(*spec) + slen;
5467             strcpy((char*)&spec[1], gspec + spec_size);
5468             gspec += next;
5469             cur_data += spec->next;
5470         }
5471         break;
5472     }
5473     default:
5474         ret = -TARGET_EINVAL;
5475         unlock_user(argptr, guest_data, 0);
5476         goto out;
5477     }
5478     unlock_user(argptr, guest_data, 0);
5479 
5480     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5481     if (!is_error(ret)) {
5482         guest_data = arg + host_dm->data_start;
5483         guest_data_size = host_dm->data_size - host_dm->data_start;
5484         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5485         switch (ie->host_cmd) {
5486         case DM_REMOVE_ALL:
5487         case DM_DEV_CREATE:
5488         case DM_DEV_REMOVE:
5489         case DM_DEV_RENAME:
5490         case DM_DEV_SUSPEND:
5491         case DM_DEV_STATUS:
5492         case DM_TABLE_LOAD:
5493         case DM_TABLE_CLEAR:
5494         case DM_TARGET_MSG:
5495         case DM_DEV_SET_GEOMETRY:
5496             /* no return data */
5497             break;
5498         case DM_LIST_DEVICES:
5499         {
5500             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5501             uint32_t remaining_data = guest_data_size;
5502             void *cur_data = argptr;
5503             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5504             int nl_size = 12; /* can't use thunk_size due to alignment */
5505 
5506             while (1) {
5507                 uint32_t next = nl->next;
5508                 if (next) {
5509                     nl->next = nl_size + (strlen(nl->name) + 1);
5510                 }
5511                 if (remaining_data < nl->next) {
5512                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5513                     break;
5514                 }
5515                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5516                 strcpy(cur_data + nl_size, nl->name);
5517                 cur_data += nl->next;
5518                 remaining_data -= nl->next;
5519                 if (!next) {
5520                     break;
5521                 }
5522                 nl = (void*)nl + next;
5523             }
5524             break;
5525         }
5526         case DM_DEV_WAIT:
5527         case DM_TABLE_STATUS:
5528         {
5529             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5530             void *cur_data = argptr;
5531             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5532             int spec_size = thunk_type_size(arg_type, 0);
5533             int i;
5534 
5535             for (i = 0; i < host_dm->target_count; i++) {
5536                 uint32_t next = spec->next;
5537                 int slen = strlen((char*)&spec[1]) + 1;
5538                 spec->next = (cur_data - argptr) + spec_size + slen;
5539                 if (guest_data_size < spec->next) {
5540                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5541                     break;
5542                 }
5543                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5544                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5545                 cur_data = argptr + spec->next;
5546                 spec = (void*)host_dm + host_dm->data_start + next;
5547             }
5548             break;
5549         }
5550         case DM_TABLE_DEPS:
5551         {
5552             void *hdata = (void*)host_dm + host_dm->data_start;
5553             int count = *(uint32_t*)hdata;
5554             uint64_t *hdev = hdata + 8;
5555             uint64_t *gdev = argptr + 8;
5556             int i;
5557 
5558             *(uint32_t*)argptr = tswap32(count);
5559             for (i = 0; i < count; i++) {
5560                 *gdev = tswap64(*hdev);
5561                 gdev++;
5562                 hdev++;
5563             }
5564             break;
5565         }
5566         case DM_LIST_VERSIONS:
5567         {
5568             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5569             uint32_t remaining_data = guest_data_size;
5570             void *cur_data = argptr;
5571             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5572             int vers_size = thunk_type_size(arg_type, 0);
5573 
5574             while (1) {
5575                 uint32_t next = vers->next;
5576                 if (next) {
5577                     vers->next = vers_size + (strlen(vers->name) + 1);
5578                 }
5579                 if (remaining_data < vers->next) {
5580                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5581                     break;
5582                 }
5583                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5584                 strcpy(cur_data + vers_size, vers->name);
5585                 cur_data += vers->next;
5586                 remaining_data -= vers->next;
5587                 if (!next) {
5588                     break;
5589                 }
5590                 vers = (void*)vers + next;
5591             }
5592             break;
5593         }
5594         default:
5595             unlock_user(argptr, guest_data, 0);
5596             ret = -TARGET_EINVAL;
5597             goto out;
5598         }
5599         unlock_user(argptr, guest_data, guest_data_size);
5600 
5601         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5602         if (!argptr) {
5603             ret = -TARGET_EFAULT;
5604             goto out;
5605         }
5606         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5607         unlock_user(argptr, arg, target_size);
5608     }
5609 out:
5610     g_free(big_buf);
5611     return ret;
5612 }
5613 
5614 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5615                                int cmd, abi_long arg)
5616 {
5617     void *argptr;
5618     int target_size;
5619     const argtype *arg_type = ie->arg_type;
5620     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5621     abi_long ret;
5622 
5623     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5624     struct blkpg_partition host_part;
5625 
5626     /* Read and convert blkpg */
5627     arg_type++;
5628     target_size = thunk_type_size(arg_type, 0);
5629     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5630     if (!argptr) {
5631         ret = -TARGET_EFAULT;
5632         goto out;
5633     }
5634     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5635     unlock_user(argptr, arg, 0);
5636 
5637     switch (host_blkpg->op) {
5638     case BLKPG_ADD_PARTITION:
5639     case BLKPG_DEL_PARTITION:
5640         /* payload is struct blkpg_partition */
5641         break;
5642     default:
5643         /* Unknown opcode */
5644         ret = -TARGET_EINVAL;
5645         goto out;
5646     }
5647 
5648     /* Read and convert blkpg->data */
5649     arg = (abi_long)(uintptr_t)host_blkpg->data;
5650     target_size = thunk_type_size(part_arg_type, 0);
5651     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5652     if (!argptr) {
5653         ret = -TARGET_EFAULT;
5654         goto out;
5655     }
5656     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5657     unlock_user(argptr, arg, 0);
5658 
5659     /* Swizzle the data pointer to our local copy and call! */
5660     host_blkpg->data = &host_part;
5661     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5662 
5663 out:
5664     return ret;
5665 }
5666 
5667 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5668                                 int fd, int cmd, abi_long arg)
5669 {
5670     const argtype *arg_type = ie->arg_type;
5671     const StructEntry *se;
5672     const argtype *field_types;
5673     const int *dst_offsets, *src_offsets;
5674     int target_size;
5675     void *argptr;
5676     abi_ulong *target_rt_dev_ptr;
5677     unsigned long *host_rt_dev_ptr;
5678     abi_long ret;
5679     int i;
5680 
5681     assert(ie->access == IOC_W);
5682     assert(*arg_type == TYPE_PTR);
5683     arg_type++;
5684     assert(*arg_type == TYPE_STRUCT);
5685     target_size = thunk_type_size(arg_type, 0);
5686     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5687     if (!argptr) {
5688         return -TARGET_EFAULT;
5689     }
5690     arg_type++;
5691     assert(*arg_type == (int)STRUCT_rtentry);
5692     se = struct_entries + *arg_type++;
5693     assert(se->convert[0] == NULL);
5694     /* convert struct here to be able to catch rt_dev string */
5695     field_types = se->field_types;
5696     dst_offsets = se->field_offsets[THUNK_HOST];
5697     src_offsets = se->field_offsets[THUNK_TARGET];
5698     for (i = 0; i < se->nb_fields; i++) {
5699         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5700             assert(*field_types == TYPE_PTRVOID);
5701             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5702             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5703             if (*target_rt_dev_ptr != 0) {
5704                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5705                                                   tswapal(*target_rt_dev_ptr));
5706                 if (!*host_rt_dev_ptr) {
5707                     unlock_user(argptr, arg, 0);
5708                     return -TARGET_EFAULT;
5709                 }
5710             } else {
5711                 *host_rt_dev_ptr = 0;
5712             }
5713             field_types++;
5714             continue;
5715         }
5716         field_types = thunk_convert(buf_temp + dst_offsets[i],
5717                                     argptr + src_offsets[i],
5718                                     field_types, THUNK_HOST);
5719     }
5720     unlock_user(argptr, arg, 0);
5721 
5722     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5723     if (*host_rt_dev_ptr != 0) {
5724         unlock_user((void *)*host_rt_dev_ptr,
5725                     *target_rt_dev_ptr, 0);
5726     }
5727     return ret;
5728 }
5729 
5730 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5731                                      int fd, int cmd, abi_long arg)
5732 {
5733     int sig = target_to_host_signal(arg);
5734     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5735 }
5736 
5737 #ifdef TIOCGPTPEER
5738 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5739                                      int fd, int cmd, abi_long arg)
5740 {
5741     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5742     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5743 }
5744 #endif
5745 
5746 static IOCTLEntry ioctl_entries[] = {
5747 #define IOCTL(cmd, access, ...) \
5748     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5749 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5750     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5751 #define IOCTL_IGNORE(cmd) \
5752     { TARGET_ ## cmd, 0, #cmd },
5753 #include "ioctls.h"
5754     { 0, 0, },
5755 };
5756 
5757 /* ??? Implement proper locking for ioctls.  */
5758 /* do_ioctl() Must return target values and target errnos. */
5759 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5760 {
5761     const IOCTLEntry *ie;
5762     const argtype *arg_type;
5763     abi_long ret;
5764     uint8_t buf_temp[MAX_STRUCT_SIZE];
5765     int target_size;
5766     void *argptr;
5767 
5768     ie = ioctl_entries;
5769     for(;;) {
5770         if (ie->target_cmd == 0) {
5771             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5772             return -TARGET_ENOSYS;
5773         }
5774         if (ie->target_cmd == cmd)
5775             break;
5776         ie++;
5777     }
5778     arg_type = ie->arg_type;
5779 #if defined(DEBUG)
5780     gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5781 #endif
5782     if (ie->do_ioctl) {
5783         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5784     } else if (!ie->host_cmd) {
5785         /* Some architectures define BSD ioctls in their headers
5786            that are not implemented in Linux.  */
5787         return -TARGET_ENOSYS;
5788     }
5789 
5790     switch(arg_type[0]) {
5791     case TYPE_NULL:
5792         /* no argument */
5793         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5794         break;
5795     case TYPE_PTRVOID:
5796     case TYPE_INT:
5797         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5798         break;
5799     case TYPE_PTR:
5800         arg_type++;
5801         target_size = thunk_type_size(arg_type, 0);
5802         switch(ie->access) {
5803         case IOC_R:
5804             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5805             if (!is_error(ret)) {
5806                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5807                 if (!argptr)
5808                     return -TARGET_EFAULT;
5809                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5810                 unlock_user(argptr, arg, target_size);
5811             }
5812             break;
5813         case IOC_W:
5814             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5815             if (!argptr)
5816                 return -TARGET_EFAULT;
5817             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5818             unlock_user(argptr, arg, 0);
5819             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5820             break;
5821         default:
5822         case IOC_RW:
5823             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5824             if (!argptr)
5825                 return -TARGET_EFAULT;
5826             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5827             unlock_user(argptr, arg, 0);
5828             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5829             if (!is_error(ret)) {
5830                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5831                 if (!argptr)
5832                     return -TARGET_EFAULT;
5833                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5834                 unlock_user(argptr, arg, target_size);
5835             }
5836             break;
5837         }
5838         break;
5839     default:
5840         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5841                  (long)cmd, arg_type[0]);
5842         ret = -TARGET_ENOSYS;
5843         break;
5844     }
5845     return ret;
5846 }
5847 
5848 static const bitmask_transtbl iflag_tbl[] = {
5849         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5850         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5851         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5852         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5853         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5854         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5855         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5856         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5857         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5858         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5859         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5860         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5861         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5862         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5863         { 0, 0, 0, 0 }
5864 };
5865 
5866 static const bitmask_transtbl oflag_tbl[] = {
5867 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5868 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5869 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5870 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5871 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5872 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5873 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5874 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5875 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5876 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5877 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5878 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5879 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5880 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5881 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5882 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5883 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5884 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5885 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5886 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5887 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5888 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5889 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5890 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5891 	{ 0, 0, 0, 0 }
5892 };
5893 
5894 static const bitmask_transtbl cflag_tbl[] = {
5895 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5896 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5897 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5898 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5899 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5900 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5901 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5902 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5903 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5904 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5905 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5906 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5907 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5908 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5909 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5910 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5911 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5912 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5913 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5914 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5915 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5916 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5917 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5918 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5919 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5920 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5921 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5922 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5923 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5924 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5925 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5926 	{ 0, 0, 0, 0 }
5927 };
5928 
5929 static const bitmask_transtbl lflag_tbl[] = {
5930 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5931 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5932 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5933 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5934 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5935 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5936 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5937 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5938 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5939 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5940 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5941 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5942 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5943 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5944 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5945 	{ 0, 0, 0, 0 }
5946 };
5947 
5948 static void target_to_host_termios (void *dst, const void *src)
5949 {
5950     struct host_termios *host = dst;
5951     const struct target_termios *target = src;
5952 
5953     host->c_iflag =
5954         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5955     host->c_oflag =
5956         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5957     host->c_cflag =
5958         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5959     host->c_lflag =
5960         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5961     host->c_line = target->c_line;
5962 
5963     memset(host->c_cc, 0, sizeof(host->c_cc));
5964     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5965     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5966     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5967     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5968     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5969     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5970     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5971     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5972     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5973     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5974     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5975     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5976     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5977     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5978     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5979     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5980     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5981 }
5982 
5983 static void host_to_target_termios (void *dst, const void *src)
5984 {
5985     struct target_termios *target = dst;
5986     const struct host_termios *host = src;
5987 
5988     target->c_iflag =
5989         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5990     target->c_oflag =
5991         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5992     target->c_cflag =
5993         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5994     target->c_lflag =
5995         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5996     target->c_line = host->c_line;
5997 
5998     memset(target->c_cc, 0, sizeof(target->c_cc));
5999     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6000     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6001     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6002     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6003     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6004     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6005     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6006     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6007     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6008     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6009     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6010     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6011     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6012     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6013     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6014     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6015     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6016 }
6017 
6018 static const StructEntry struct_termios_def = {
6019     .convert = { host_to_target_termios, target_to_host_termios },
6020     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6021     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6022 };
6023 
6024 static bitmask_transtbl mmap_flags_tbl[] = {
6025     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6026     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6027     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6028     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6029       MAP_ANONYMOUS, MAP_ANONYMOUS },
6030     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6031       MAP_GROWSDOWN, MAP_GROWSDOWN },
6032     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6033       MAP_DENYWRITE, MAP_DENYWRITE },
6034     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6035       MAP_EXECUTABLE, MAP_EXECUTABLE },
6036     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6037     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6038       MAP_NORESERVE, MAP_NORESERVE },
6039     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6040     /* MAP_STACK had been ignored by the kernel for quite some time.
6041        Recognize it for the target insofar as we do not want to pass
6042        it through to the host.  */
6043     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6044     { 0, 0, 0, 0 }
6045 };
6046 
6047 #if defined(TARGET_I386)
6048 
6049 /* NOTE: there is really one LDT for all the threads */
6050 static uint8_t *ldt_table;
6051 
6052 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6053 {
6054     int size;
6055     void *p;
6056 
6057     if (!ldt_table)
6058         return 0;
6059     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6060     if (size > bytecount)
6061         size = bytecount;
6062     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6063     if (!p)
6064         return -TARGET_EFAULT;
6065     /* ??? Should this by byteswapped?  */
6066     memcpy(p, ldt_table, size);
6067     unlock_user(p, ptr, size);
6068     return size;
6069 }
6070 
6071 /* XXX: add locking support */
6072 static abi_long write_ldt(CPUX86State *env,
6073                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6074 {
6075     struct target_modify_ldt_ldt_s ldt_info;
6076     struct target_modify_ldt_ldt_s *target_ldt_info;
6077     int seg_32bit, contents, read_exec_only, limit_in_pages;
6078     int seg_not_present, useable, lm;
6079     uint32_t *lp, entry_1, entry_2;
6080 
6081     if (bytecount != sizeof(ldt_info))
6082         return -TARGET_EINVAL;
6083     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6084         return -TARGET_EFAULT;
6085     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6086     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6087     ldt_info.limit = tswap32(target_ldt_info->limit);
6088     ldt_info.flags = tswap32(target_ldt_info->flags);
6089     unlock_user_struct(target_ldt_info, ptr, 0);
6090 
6091     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6092         return -TARGET_EINVAL;
6093     seg_32bit = ldt_info.flags & 1;
6094     contents = (ldt_info.flags >> 1) & 3;
6095     read_exec_only = (ldt_info.flags >> 3) & 1;
6096     limit_in_pages = (ldt_info.flags >> 4) & 1;
6097     seg_not_present = (ldt_info.flags >> 5) & 1;
6098     useable = (ldt_info.flags >> 6) & 1;
6099 #ifdef TARGET_ABI32
6100     lm = 0;
6101 #else
6102     lm = (ldt_info.flags >> 7) & 1;
6103 #endif
6104     if (contents == 3) {
6105         if (oldmode)
6106             return -TARGET_EINVAL;
6107         if (seg_not_present == 0)
6108             return -TARGET_EINVAL;
6109     }
6110     /* allocate the LDT */
6111     if (!ldt_table) {
6112         env->ldt.base = target_mmap(0,
6113                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6114                                     PROT_READ|PROT_WRITE,
6115                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6116         if (env->ldt.base == -1)
6117             return -TARGET_ENOMEM;
6118         memset(g2h(env->ldt.base), 0,
6119                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6120         env->ldt.limit = 0xffff;
6121         ldt_table = g2h(env->ldt.base);
6122     }
6123 
6124     /* NOTE: same code as Linux kernel */
6125     /* Allow LDTs to be cleared by the user. */
6126     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6127         if (oldmode ||
6128             (contents == 0		&&
6129              read_exec_only == 1	&&
6130              seg_32bit == 0		&&
6131              limit_in_pages == 0	&&
6132              seg_not_present == 1	&&
6133              useable == 0 )) {
6134             entry_1 = 0;
6135             entry_2 = 0;
6136             goto install;
6137         }
6138     }
6139 
6140     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6141         (ldt_info.limit & 0x0ffff);
6142     entry_2 = (ldt_info.base_addr & 0xff000000) |
6143         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6144         (ldt_info.limit & 0xf0000) |
6145         ((read_exec_only ^ 1) << 9) |
6146         (contents << 10) |
6147         ((seg_not_present ^ 1) << 15) |
6148         (seg_32bit << 22) |
6149         (limit_in_pages << 23) |
6150         (lm << 21) |
6151         0x7000;
6152     if (!oldmode)
6153         entry_2 |= (useable << 20);
6154 
6155     /* Install the new entry ...  */
6156 install:
6157     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6158     lp[0] = tswap32(entry_1);
6159     lp[1] = tswap32(entry_2);
6160     return 0;
6161 }
6162 
6163 /* specific and weird i386 syscalls */
6164 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6165                               unsigned long bytecount)
6166 {
6167     abi_long ret;
6168 
6169     switch (func) {
6170     case 0:
6171         ret = read_ldt(ptr, bytecount);
6172         break;
6173     case 1:
6174         ret = write_ldt(env, ptr, bytecount, 1);
6175         break;
6176     case 0x11:
6177         ret = write_ldt(env, ptr, bytecount, 0);
6178         break;
6179     default:
6180         ret = -TARGET_ENOSYS;
6181         break;
6182     }
6183     return ret;
6184 }
6185 
6186 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6187 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6188 {
6189     uint64_t *gdt_table = g2h(env->gdt.base);
6190     struct target_modify_ldt_ldt_s ldt_info;
6191     struct target_modify_ldt_ldt_s *target_ldt_info;
6192     int seg_32bit, contents, read_exec_only, limit_in_pages;
6193     int seg_not_present, useable, lm;
6194     uint32_t *lp, entry_1, entry_2;
6195     int i;
6196 
6197     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6198     if (!target_ldt_info)
6199         return -TARGET_EFAULT;
6200     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6201     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6202     ldt_info.limit = tswap32(target_ldt_info->limit);
6203     ldt_info.flags = tswap32(target_ldt_info->flags);
6204     if (ldt_info.entry_number == -1) {
6205         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6206             if (gdt_table[i] == 0) {
6207                 ldt_info.entry_number = i;
6208                 target_ldt_info->entry_number = tswap32(i);
6209                 break;
6210             }
6211         }
6212     }
6213     unlock_user_struct(target_ldt_info, ptr, 1);
6214 
6215     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6216         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6217            return -TARGET_EINVAL;
6218     seg_32bit = ldt_info.flags & 1;
6219     contents = (ldt_info.flags >> 1) & 3;
6220     read_exec_only = (ldt_info.flags >> 3) & 1;
6221     limit_in_pages = (ldt_info.flags >> 4) & 1;
6222     seg_not_present = (ldt_info.flags >> 5) & 1;
6223     useable = (ldt_info.flags >> 6) & 1;
6224 #ifdef TARGET_ABI32
6225     lm = 0;
6226 #else
6227     lm = (ldt_info.flags >> 7) & 1;
6228 #endif
6229 
6230     if (contents == 3) {
6231         if (seg_not_present == 0)
6232             return -TARGET_EINVAL;
6233     }
6234 
6235     /* NOTE: same code as Linux kernel */
6236     /* Allow LDTs to be cleared by the user. */
6237     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6238         if ((contents == 0             &&
6239              read_exec_only == 1       &&
6240              seg_32bit == 0            &&
6241              limit_in_pages == 0       &&
6242              seg_not_present == 1      &&
6243              useable == 0 )) {
6244             entry_1 = 0;
6245             entry_2 = 0;
6246             goto install;
6247         }
6248     }
6249 
6250     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6251         (ldt_info.limit & 0x0ffff);
6252     entry_2 = (ldt_info.base_addr & 0xff000000) |
6253         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6254         (ldt_info.limit & 0xf0000) |
6255         ((read_exec_only ^ 1) << 9) |
6256         (contents << 10) |
6257         ((seg_not_present ^ 1) << 15) |
6258         (seg_32bit << 22) |
6259         (limit_in_pages << 23) |
6260         (useable << 20) |
6261         (lm << 21) |
6262         0x7000;
6263 
6264     /* Install the new entry ...  */
6265 install:
6266     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6267     lp[0] = tswap32(entry_1);
6268     lp[1] = tswap32(entry_2);
6269     return 0;
6270 }
6271 
6272 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6273 {
6274     struct target_modify_ldt_ldt_s *target_ldt_info;
6275     uint64_t *gdt_table = g2h(env->gdt.base);
6276     uint32_t base_addr, limit, flags;
6277     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6278     int seg_not_present, useable, lm;
6279     uint32_t *lp, entry_1, entry_2;
6280 
6281     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6282     if (!target_ldt_info)
6283         return -TARGET_EFAULT;
6284     idx = tswap32(target_ldt_info->entry_number);
6285     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6286         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6287         unlock_user_struct(target_ldt_info, ptr, 1);
6288         return -TARGET_EINVAL;
6289     }
6290     lp = (uint32_t *)(gdt_table + idx);
6291     entry_1 = tswap32(lp[0]);
6292     entry_2 = tswap32(lp[1]);
6293 
6294     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6295     contents = (entry_2 >> 10) & 3;
6296     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6297     seg_32bit = (entry_2 >> 22) & 1;
6298     limit_in_pages = (entry_2 >> 23) & 1;
6299     useable = (entry_2 >> 20) & 1;
6300 #ifdef TARGET_ABI32
6301     lm = 0;
6302 #else
6303     lm = (entry_2 >> 21) & 1;
6304 #endif
6305     flags = (seg_32bit << 0) | (contents << 1) |
6306         (read_exec_only << 3) | (limit_in_pages << 4) |
6307         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6308     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6309     base_addr = (entry_1 >> 16) |
6310         (entry_2 & 0xff000000) |
6311         ((entry_2 & 0xff) << 16);
6312     target_ldt_info->base_addr = tswapal(base_addr);
6313     target_ldt_info->limit = tswap32(limit);
6314     target_ldt_info->flags = tswap32(flags);
6315     unlock_user_struct(target_ldt_info, ptr, 1);
6316     return 0;
6317 }
6318 #endif /* TARGET_I386 && TARGET_ABI32 */
6319 
6320 #ifndef TARGET_ABI32
6321 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6322 {
6323     abi_long ret = 0;
6324     abi_ulong val;
6325     int idx;
6326 
6327     switch(code) {
6328     case TARGET_ARCH_SET_GS:
6329     case TARGET_ARCH_SET_FS:
6330         if (code == TARGET_ARCH_SET_GS)
6331             idx = R_GS;
6332         else
6333             idx = R_FS;
6334         cpu_x86_load_seg(env, idx, 0);
6335         env->segs[idx].base = addr;
6336         break;
6337     case TARGET_ARCH_GET_GS:
6338     case TARGET_ARCH_GET_FS:
6339         if (code == TARGET_ARCH_GET_GS)
6340             idx = R_GS;
6341         else
6342             idx = R_FS;
6343         val = env->segs[idx].base;
6344         if (put_user(val, addr, abi_ulong))
6345             ret = -TARGET_EFAULT;
6346         break;
6347     default:
6348         ret = -TARGET_EINVAL;
6349         break;
6350     }
6351     return ret;
6352 }
6353 #endif
6354 
6355 #endif /* defined(TARGET_I386) */
6356 
6357 #define NEW_STACK_SIZE 0x40000
6358 
6359 
6360 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6361 typedef struct {
6362     CPUArchState *env;
6363     pthread_mutex_t mutex;
6364     pthread_cond_t cond;
6365     pthread_t thread;
6366     uint32_t tid;
6367     abi_ulong child_tidptr;
6368     abi_ulong parent_tidptr;
6369     sigset_t sigmask;
6370 } new_thread_info;
6371 
6372 static void *clone_func(void *arg)
6373 {
6374     new_thread_info *info = arg;
6375     CPUArchState *env;
6376     CPUState *cpu;
6377     TaskState *ts;
6378 
6379     rcu_register_thread();
6380     tcg_register_thread();
6381     env = info->env;
6382     cpu = ENV_GET_CPU(env);
6383     thread_cpu = cpu;
6384     ts = (TaskState *)cpu->opaque;
6385     info->tid = gettid();
6386     task_settid(ts);
6387     if (info->child_tidptr)
6388         put_user_u32(info->tid, info->child_tidptr);
6389     if (info->parent_tidptr)
6390         put_user_u32(info->tid, info->parent_tidptr);
6391     /* Enable signals.  */
6392     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6393     /* Signal to the parent that we're ready.  */
6394     pthread_mutex_lock(&info->mutex);
6395     pthread_cond_broadcast(&info->cond);
6396     pthread_mutex_unlock(&info->mutex);
6397     /* Wait until the parent has finished initializing the tls state.  */
6398     pthread_mutex_lock(&clone_lock);
6399     pthread_mutex_unlock(&clone_lock);
6400     cpu_loop(env);
6401     /* never exits */
6402     return NULL;
6403 }
6404 
6405 /* do_fork() Must return host values and target errnos (unlike most
6406    do_*() functions). */
6407 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6408                    abi_ulong parent_tidptr, target_ulong newtls,
6409                    abi_ulong child_tidptr)
6410 {
6411     CPUState *cpu = ENV_GET_CPU(env);
6412     int ret;
6413     TaskState *ts;
6414     CPUState *new_cpu;
6415     CPUArchState *new_env;
6416     sigset_t sigmask;
6417 
6418     flags &= ~CLONE_IGNORED_FLAGS;
6419 
6420     /* Emulate vfork() with fork() */
6421     if (flags & CLONE_VFORK)
6422         flags &= ~(CLONE_VFORK | CLONE_VM);
6423 
6424     if (flags & CLONE_VM) {
6425         TaskState *parent_ts = (TaskState *)cpu->opaque;
6426         new_thread_info info;
6427         pthread_attr_t attr;
6428 
6429         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6430             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6431             return -TARGET_EINVAL;
6432         }
6433 
6434         ts = g_new0(TaskState, 1);
6435         init_task_state(ts);
6436 
6437         /* Grab a mutex so that thread setup appears atomic.  */
6438         pthread_mutex_lock(&clone_lock);
6439 
6440         /* we create a new CPU instance. */
6441         new_env = cpu_copy(env);
6442         /* Init regs that differ from the parent.  */
6443         cpu_clone_regs(new_env, newsp);
6444         new_cpu = ENV_GET_CPU(new_env);
6445         new_cpu->opaque = ts;
6446         ts->bprm = parent_ts->bprm;
6447         ts->info = parent_ts->info;
6448         ts->signal_mask = parent_ts->signal_mask;
6449 
6450         if (flags & CLONE_CHILD_CLEARTID) {
6451             ts->child_tidptr = child_tidptr;
6452         }
6453 
6454         if (flags & CLONE_SETTLS) {
6455             cpu_set_tls (new_env, newtls);
6456         }
6457 
6458         memset(&info, 0, sizeof(info));
6459         pthread_mutex_init(&info.mutex, NULL);
6460         pthread_mutex_lock(&info.mutex);
6461         pthread_cond_init(&info.cond, NULL);
6462         info.env = new_env;
6463         if (flags & CLONE_CHILD_SETTID) {
6464             info.child_tidptr = child_tidptr;
6465         }
6466         if (flags & CLONE_PARENT_SETTID) {
6467             info.parent_tidptr = parent_tidptr;
6468         }
6469 
6470         ret = pthread_attr_init(&attr);
6471         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6472         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6473         /* It is not safe to deliver signals until the child has finished
6474            initializing, so temporarily block all signals.  */
6475         sigfillset(&sigmask);
6476         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6477 
6478         /* If this is our first additional thread, we need to ensure we
6479          * generate code for parallel execution and flush old translations.
6480          */
6481         if (!parallel_cpus) {
6482             parallel_cpus = true;
6483             tb_flush(cpu);
6484         }
6485 
6486         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6487         /* TODO: Free new CPU state if thread creation failed.  */
6488 
6489         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6490         pthread_attr_destroy(&attr);
6491         if (ret == 0) {
6492             /* Wait for the child to initialize.  */
6493             pthread_cond_wait(&info.cond, &info.mutex);
6494             ret = info.tid;
6495         } else {
6496             ret = -1;
6497         }
6498         pthread_mutex_unlock(&info.mutex);
6499         pthread_cond_destroy(&info.cond);
6500         pthread_mutex_destroy(&info.mutex);
6501         pthread_mutex_unlock(&clone_lock);
6502     } else {
6503         /* if no CLONE_VM, we consider it is a fork */
6504         if (flags & CLONE_INVALID_FORK_FLAGS) {
6505             return -TARGET_EINVAL;
6506         }
6507 
6508         /* We can't support custom termination signals */
6509         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6510             return -TARGET_EINVAL;
6511         }
6512 
6513         if (block_signals()) {
6514             return -TARGET_ERESTARTSYS;
6515         }
6516 
6517         fork_start();
6518         ret = fork();
6519         if (ret == 0) {
6520             /* Child Process.  */
6521             cpu_clone_regs(env, newsp);
6522             fork_end(1);
6523             /* There is a race condition here.  The parent process could
6524                theoretically read the TID in the child process before the child
6525                tid is set.  This would require using either ptrace
6526                (not implemented) or having *_tidptr to point at a shared memory
6527                mapping.  We can't repeat the spinlock hack used above because
6528                the child process gets its own copy of the lock.  */
6529             if (flags & CLONE_CHILD_SETTID)
6530                 put_user_u32(gettid(), child_tidptr);
6531             if (flags & CLONE_PARENT_SETTID)
6532                 put_user_u32(gettid(), parent_tidptr);
6533             ts = (TaskState *)cpu->opaque;
6534             if (flags & CLONE_SETTLS)
6535                 cpu_set_tls (env, newtls);
6536             if (flags & CLONE_CHILD_CLEARTID)
6537                 ts->child_tidptr = child_tidptr;
6538         } else {
6539             fork_end(0);
6540         }
6541     }
6542     return ret;
6543 }
6544 
6545 /* warning : doesn't handle linux specific flags... */
6546 static int target_to_host_fcntl_cmd(int cmd)
6547 {
6548     switch(cmd) {
6549 	case TARGET_F_DUPFD:
6550 	case TARGET_F_GETFD:
6551 	case TARGET_F_SETFD:
6552 	case TARGET_F_GETFL:
6553 	case TARGET_F_SETFL:
6554             return cmd;
6555         case TARGET_F_GETLK:
6556             return F_GETLK64;
6557         case TARGET_F_SETLK:
6558             return F_SETLK64;
6559         case TARGET_F_SETLKW:
6560             return F_SETLKW64;
6561 	case TARGET_F_GETOWN:
6562 	    return F_GETOWN;
6563 	case TARGET_F_SETOWN:
6564 	    return F_SETOWN;
6565 	case TARGET_F_GETSIG:
6566 	    return F_GETSIG;
6567 	case TARGET_F_SETSIG:
6568 	    return F_SETSIG;
6569 #if TARGET_ABI_BITS == 32
6570         case TARGET_F_GETLK64:
6571 	    return F_GETLK64;
6572 	case TARGET_F_SETLK64:
6573 	    return F_SETLK64;
6574 	case TARGET_F_SETLKW64:
6575 	    return F_SETLKW64;
6576 #endif
6577         case TARGET_F_SETLEASE:
6578             return F_SETLEASE;
6579         case TARGET_F_GETLEASE:
6580             return F_GETLEASE;
6581 #ifdef F_DUPFD_CLOEXEC
6582         case TARGET_F_DUPFD_CLOEXEC:
6583             return F_DUPFD_CLOEXEC;
6584 #endif
6585         case TARGET_F_NOTIFY:
6586             return F_NOTIFY;
6587 #ifdef F_GETOWN_EX
6588 	case TARGET_F_GETOWN_EX:
6589 	    return F_GETOWN_EX;
6590 #endif
6591 #ifdef F_SETOWN_EX
6592 	case TARGET_F_SETOWN_EX:
6593 	    return F_SETOWN_EX;
6594 #endif
6595 #ifdef F_SETPIPE_SZ
6596         case TARGET_F_SETPIPE_SZ:
6597             return F_SETPIPE_SZ;
6598         case TARGET_F_GETPIPE_SZ:
6599             return F_GETPIPE_SZ;
6600 #endif
6601 	default:
6602             return -TARGET_EINVAL;
6603     }
6604     return -TARGET_EINVAL;
6605 }
6606 
6607 #define FLOCK_TRANSTBL \
6608     switch (type) { \
6609     TRANSTBL_CONVERT(F_RDLCK); \
6610     TRANSTBL_CONVERT(F_WRLCK); \
6611     TRANSTBL_CONVERT(F_UNLCK); \
6612     TRANSTBL_CONVERT(F_EXLCK); \
6613     TRANSTBL_CONVERT(F_SHLCK); \
6614     }
6615 
6616 static int target_to_host_flock(int type)
6617 {
6618 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6619     FLOCK_TRANSTBL
6620 #undef  TRANSTBL_CONVERT
6621     return -TARGET_EINVAL;
6622 }
6623 
6624 static int host_to_target_flock(int type)
6625 {
6626 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6627     FLOCK_TRANSTBL
6628 #undef  TRANSTBL_CONVERT
6629     /* if we don't know how to convert the value coming
6630      * from the host we copy to the target field as-is
6631      */
6632     return type;
6633 }
6634 
6635 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6636                                             abi_ulong target_flock_addr)
6637 {
6638     struct target_flock *target_fl;
6639     int l_type;
6640 
6641     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6642         return -TARGET_EFAULT;
6643     }
6644 
6645     __get_user(l_type, &target_fl->l_type);
6646     l_type = target_to_host_flock(l_type);
6647     if (l_type < 0) {
6648         return l_type;
6649     }
6650     fl->l_type = l_type;
6651     __get_user(fl->l_whence, &target_fl->l_whence);
6652     __get_user(fl->l_start, &target_fl->l_start);
6653     __get_user(fl->l_len, &target_fl->l_len);
6654     __get_user(fl->l_pid, &target_fl->l_pid);
6655     unlock_user_struct(target_fl, target_flock_addr, 0);
6656     return 0;
6657 }
6658 
6659 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6660                                           const struct flock64 *fl)
6661 {
6662     struct target_flock *target_fl;
6663     short l_type;
6664 
6665     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6666         return -TARGET_EFAULT;
6667     }
6668 
6669     l_type = host_to_target_flock(fl->l_type);
6670     __put_user(l_type, &target_fl->l_type);
6671     __put_user(fl->l_whence, &target_fl->l_whence);
6672     __put_user(fl->l_start, &target_fl->l_start);
6673     __put_user(fl->l_len, &target_fl->l_len);
6674     __put_user(fl->l_pid, &target_fl->l_pid);
6675     unlock_user_struct(target_fl, target_flock_addr, 1);
6676     return 0;
6677 }
6678 
6679 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6680 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6681 
6682 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6683 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6684                                                    abi_ulong target_flock_addr)
6685 {
6686     struct target_oabi_flock64 *target_fl;
6687     int l_type;
6688 
6689     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6690         return -TARGET_EFAULT;
6691     }
6692 
6693     __get_user(l_type, &target_fl->l_type);
6694     l_type = target_to_host_flock(l_type);
6695     if (l_type < 0) {
6696         return l_type;
6697     }
6698     fl->l_type = l_type;
6699     __get_user(fl->l_whence, &target_fl->l_whence);
6700     __get_user(fl->l_start, &target_fl->l_start);
6701     __get_user(fl->l_len, &target_fl->l_len);
6702     __get_user(fl->l_pid, &target_fl->l_pid);
6703     unlock_user_struct(target_fl, target_flock_addr, 0);
6704     return 0;
6705 }
6706 
6707 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6708                                                  const struct flock64 *fl)
6709 {
6710     struct target_oabi_flock64 *target_fl;
6711     short l_type;
6712 
6713     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6714         return -TARGET_EFAULT;
6715     }
6716 
6717     l_type = host_to_target_flock(fl->l_type);
6718     __put_user(l_type, &target_fl->l_type);
6719     __put_user(fl->l_whence, &target_fl->l_whence);
6720     __put_user(fl->l_start, &target_fl->l_start);
6721     __put_user(fl->l_len, &target_fl->l_len);
6722     __put_user(fl->l_pid, &target_fl->l_pid);
6723     unlock_user_struct(target_fl, target_flock_addr, 1);
6724     return 0;
6725 }
6726 #endif
6727 
6728 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6729                                               abi_ulong target_flock_addr)
6730 {
6731     struct target_flock64 *target_fl;
6732     int l_type;
6733 
6734     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6735         return -TARGET_EFAULT;
6736     }
6737 
6738     __get_user(l_type, &target_fl->l_type);
6739     l_type = target_to_host_flock(l_type);
6740     if (l_type < 0) {
6741         return l_type;
6742     }
6743     fl->l_type = l_type;
6744     __get_user(fl->l_whence, &target_fl->l_whence);
6745     __get_user(fl->l_start, &target_fl->l_start);
6746     __get_user(fl->l_len, &target_fl->l_len);
6747     __get_user(fl->l_pid, &target_fl->l_pid);
6748     unlock_user_struct(target_fl, target_flock_addr, 0);
6749     return 0;
6750 }
6751 
6752 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6753                                             const struct flock64 *fl)
6754 {
6755     struct target_flock64 *target_fl;
6756     short l_type;
6757 
6758     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6759         return -TARGET_EFAULT;
6760     }
6761 
6762     l_type = host_to_target_flock(fl->l_type);
6763     __put_user(l_type, &target_fl->l_type);
6764     __put_user(fl->l_whence, &target_fl->l_whence);
6765     __put_user(fl->l_start, &target_fl->l_start);
6766     __put_user(fl->l_len, &target_fl->l_len);
6767     __put_user(fl->l_pid, &target_fl->l_pid);
6768     unlock_user_struct(target_fl, target_flock_addr, 1);
6769     return 0;
6770 }
6771 
6772 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6773 {
6774     struct flock64 fl64;
6775 #ifdef F_GETOWN_EX
6776     struct f_owner_ex fox;
6777     struct target_f_owner_ex *target_fox;
6778 #endif
6779     abi_long ret;
6780     int host_cmd = target_to_host_fcntl_cmd(cmd);
6781 
6782     if (host_cmd == -TARGET_EINVAL)
6783 	    return host_cmd;
6784 
6785     switch(cmd) {
6786     case TARGET_F_GETLK:
6787         ret = copy_from_user_flock(&fl64, arg);
6788         if (ret) {
6789             return ret;
6790         }
6791         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6792         if (ret == 0) {
6793             ret = copy_to_user_flock(arg, &fl64);
6794         }
6795         break;
6796 
6797     case TARGET_F_SETLK:
6798     case TARGET_F_SETLKW:
6799         ret = copy_from_user_flock(&fl64, arg);
6800         if (ret) {
6801             return ret;
6802         }
6803         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6804         break;
6805 
6806     case TARGET_F_GETLK64:
6807         ret = copy_from_user_flock64(&fl64, arg);
6808         if (ret) {
6809             return ret;
6810         }
6811         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6812         if (ret == 0) {
6813             ret = copy_to_user_flock64(arg, &fl64);
6814         }
6815         break;
6816     case TARGET_F_SETLK64:
6817     case TARGET_F_SETLKW64:
6818         ret = copy_from_user_flock64(&fl64, arg);
6819         if (ret) {
6820             return ret;
6821         }
6822         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6823         break;
6824 
6825     case TARGET_F_GETFL:
6826         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6827         if (ret >= 0) {
6828             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6829         }
6830         break;
6831 
6832     case TARGET_F_SETFL:
6833         ret = get_errno(safe_fcntl(fd, host_cmd,
6834                                    target_to_host_bitmask(arg,
6835                                                           fcntl_flags_tbl)));
6836         break;
6837 
6838 #ifdef F_GETOWN_EX
6839     case TARGET_F_GETOWN_EX:
6840         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6841         if (ret >= 0) {
6842             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6843                 return -TARGET_EFAULT;
6844             target_fox->type = tswap32(fox.type);
6845             target_fox->pid = tswap32(fox.pid);
6846             unlock_user_struct(target_fox, arg, 1);
6847         }
6848         break;
6849 #endif
6850 
6851 #ifdef F_SETOWN_EX
6852     case TARGET_F_SETOWN_EX:
6853         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6854             return -TARGET_EFAULT;
6855         fox.type = tswap32(target_fox->type);
6856         fox.pid = tswap32(target_fox->pid);
6857         unlock_user_struct(target_fox, arg, 0);
6858         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6859         break;
6860 #endif
6861 
6862     case TARGET_F_SETOWN:
6863     case TARGET_F_GETOWN:
6864     case TARGET_F_SETSIG:
6865     case TARGET_F_GETSIG:
6866     case TARGET_F_SETLEASE:
6867     case TARGET_F_GETLEASE:
6868     case TARGET_F_SETPIPE_SZ:
6869     case TARGET_F_GETPIPE_SZ:
6870         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6871         break;
6872 
6873     default:
6874         ret = get_errno(safe_fcntl(fd, cmd, arg));
6875         break;
6876     }
6877     return ret;
6878 }
6879 
6880 #ifdef USE_UID16
6881 
6882 static inline int high2lowuid(int uid)
6883 {
6884     if (uid > 65535)
6885         return 65534;
6886     else
6887         return uid;
6888 }
6889 
6890 static inline int high2lowgid(int gid)
6891 {
6892     if (gid > 65535)
6893         return 65534;
6894     else
6895         return gid;
6896 }
6897 
6898 static inline int low2highuid(int uid)
6899 {
6900     if ((int16_t)uid == -1)
6901         return -1;
6902     else
6903         return uid;
6904 }
6905 
6906 static inline int low2highgid(int gid)
6907 {
6908     if ((int16_t)gid == -1)
6909         return -1;
6910     else
6911         return gid;
6912 }
6913 static inline int tswapid(int id)
6914 {
6915     return tswap16(id);
6916 }
6917 
6918 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6919 
6920 #else /* !USE_UID16 */
6921 static inline int high2lowuid(int uid)
6922 {
6923     return uid;
6924 }
6925 static inline int high2lowgid(int gid)
6926 {
6927     return gid;
6928 }
6929 static inline int low2highuid(int uid)
6930 {
6931     return uid;
6932 }
6933 static inline int low2highgid(int gid)
6934 {
6935     return gid;
6936 }
6937 static inline int tswapid(int id)
6938 {
6939     return tswap32(id);
6940 }
6941 
6942 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6943 
6944 #endif /* USE_UID16 */
6945 
6946 /* We must do direct syscalls for setting UID/GID, because we want to
6947  * implement the Linux system call semantics of "change only for this thread",
6948  * not the libc/POSIX semantics of "change for all threads in process".
6949  * (See http://ewontfix.com/17/ for more details.)
6950  * We use the 32-bit version of the syscalls if present; if it is not
6951  * then either the host architecture supports 32-bit UIDs natively with
6952  * the standard syscall, or the 16-bit UID is the best we can do.
6953  */
6954 #ifdef __NR_setuid32
6955 #define __NR_sys_setuid __NR_setuid32
6956 #else
6957 #define __NR_sys_setuid __NR_setuid
6958 #endif
6959 #ifdef __NR_setgid32
6960 #define __NR_sys_setgid __NR_setgid32
6961 #else
6962 #define __NR_sys_setgid __NR_setgid
6963 #endif
6964 #ifdef __NR_setresuid32
6965 #define __NR_sys_setresuid __NR_setresuid32
6966 #else
6967 #define __NR_sys_setresuid __NR_setresuid
6968 #endif
6969 #ifdef __NR_setresgid32
6970 #define __NR_sys_setresgid __NR_setresgid32
6971 #else
6972 #define __NR_sys_setresgid __NR_setresgid
6973 #endif
6974 
6975 _syscall1(int, sys_setuid, uid_t, uid)
6976 _syscall1(int, sys_setgid, gid_t, gid)
6977 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6978 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6979 
6980 void syscall_init(void)
6981 {
6982     IOCTLEntry *ie;
6983     const argtype *arg_type;
6984     int size;
6985     int i;
6986 
6987     thunk_init(STRUCT_MAX);
6988 
6989 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6990 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6991 #include "syscall_types.h"
6992 #undef STRUCT
6993 #undef STRUCT_SPECIAL
6994 
6995     /* Build target_to_host_errno_table[] table from
6996      * host_to_target_errno_table[]. */
6997     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6998         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6999     }
7000 
7001     /* we patch the ioctl size if necessary. We rely on the fact that
7002        no ioctl has all the bits at '1' in the size field */
7003     ie = ioctl_entries;
7004     while (ie->target_cmd != 0) {
7005         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7006             TARGET_IOC_SIZEMASK) {
7007             arg_type = ie->arg_type;
7008             if (arg_type[0] != TYPE_PTR) {
7009                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7010                         ie->target_cmd);
7011                 exit(1);
7012             }
7013             arg_type++;
7014             size = thunk_type_size(arg_type, 0);
7015             ie->target_cmd = (ie->target_cmd &
7016                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7017                 (size << TARGET_IOC_SIZESHIFT);
7018         }
7019 
7020         /* automatic consistency check if same arch */
7021 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7022     (defined(__x86_64__) && defined(TARGET_X86_64))
7023         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7024             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7025                     ie->name, ie->target_cmd, ie->host_cmd);
7026         }
7027 #endif
7028         ie++;
7029     }
7030 }
7031 
7032 #if TARGET_ABI_BITS == 32
7033 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
7034 {
7035 #ifdef TARGET_WORDS_BIGENDIAN
7036     return ((uint64_t)word0 << 32) | word1;
7037 #else
7038     return ((uint64_t)word1 << 32) | word0;
7039 #endif
7040 }
7041 #else /* TARGET_ABI_BITS == 32 */
7042 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
7043 {
7044     return word0;
7045 }
7046 #endif /* TARGET_ABI_BITS != 32 */
7047 
7048 #ifdef TARGET_NR_truncate64
7049 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7050                                          abi_long arg2,
7051                                          abi_long arg3,
7052                                          abi_long arg4)
7053 {
7054     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7055         arg2 = arg3;
7056         arg3 = arg4;
7057     }
7058     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7059 }
7060 #endif
7061 
7062 #ifdef TARGET_NR_ftruncate64
7063 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7064                                           abi_long arg2,
7065                                           abi_long arg3,
7066                                           abi_long arg4)
7067 {
7068     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7069         arg2 = arg3;
7070         arg3 = arg4;
7071     }
7072     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7073 }
7074 #endif
7075 
7076 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
7077                                                abi_ulong target_addr)
7078 {
7079     struct target_timespec *target_ts;
7080 
7081     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
7082         return -TARGET_EFAULT;
7083     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
7084     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7085     unlock_user_struct(target_ts, target_addr, 0);
7086     return 0;
7087 }
7088 
7089 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
7090                                                struct timespec *host_ts)
7091 {
7092     struct target_timespec *target_ts;
7093 
7094     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
7095         return -TARGET_EFAULT;
7096     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
7097     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7098     unlock_user_struct(target_ts, target_addr, 1);
7099     return 0;
7100 }
7101 
7102 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
7103                                                  abi_ulong target_addr)
7104 {
7105     struct target_itimerspec *target_itspec;
7106 
7107     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
7108         return -TARGET_EFAULT;
7109     }
7110 
7111     host_itspec->it_interval.tv_sec =
7112                             tswapal(target_itspec->it_interval.tv_sec);
7113     host_itspec->it_interval.tv_nsec =
7114                             tswapal(target_itspec->it_interval.tv_nsec);
7115     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
7116     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
7117 
7118     unlock_user_struct(target_itspec, target_addr, 1);
7119     return 0;
7120 }
7121 
7122 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7123                                                struct itimerspec *host_its)
7124 {
7125     struct target_itimerspec *target_itspec;
7126 
7127     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
7128         return -TARGET_EFAULT;
7129     }
7130 
7131     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
7132     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
7133 
7134     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
7135     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7136 
7137     unlock_user_struct(target_itspec, target_addr, 0);
7138     return 0;
7139 }
7140 
7141 static inline abi_long target_to_host_timex(struct timex *host_tx,
7142                                             abi_long target_addr)
7143 {
7144     struct target_timex *target_tx;
7145 
7146     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7147         return -TARGET_EFAULT;
7148     }
7149 
7150     __get_user(host_tx->modes, &target_tx->modes);
7151     __get_user(host_tx->offset, &target_tx->offset);
7152     __get_user(host_tx->freq, &target_tx->freq);
7153     __get_user(host_tx->maxerror, &target_tx->maxerror);
7154     __get_user(host_tx->esterror, &target_tx->esterror);
7155     __get_user(host_tx->status, &target_tx->status);
7156     __get_user(host_tx->constant, &target_tx->constant);
7157     __get_user(host_tx->precision, &target_tx->precision);
7158     __get_user(host_tx->tolerance, &target_tx->tolerance);
7159     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7160     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7161     __get_user(host_tx->tick, &target_tx->tick);
7162     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7163     __get_user(host_tx->jitter, &target_tx->jitter);
7164     __get_user(host_tx->shift, &target_tx->shift);
7165     __get_user(host_tx->stabil, &target_tx->stabil);
7166     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7167     __get_user(host_tx->calcnt, &target_tx->calcnt);
7168     __get_user(host_tx->errcnt, &target_tx->errcnt);
7169     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7170     __get_user(host_tx->tai, &target_tx->tai);
7171 
7172     unlock_user_struct(target_tx, target_addr, 0);
7173     return 0;
7174 }
7175 
7176 static inline abi_long host_to_target_timex(abi_long target_addr,
7177                                             struct timex *host_tx)
7178 {
7179     struct target_timex *target_tx;
7180 
7181     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7182         return -TARGET_EFAULT;
7183     }
7184 
7185     __put_user(host_tx->modes, &target_tx->modes);
7186     __put_user(host_tx->offset, &target_tx->offset);
7187     __put_user(host_tx->freq, &target_tx->freq);
7188     __put_user(host_tx->maxerror, &target_tx->maxerror);
7189     __put_user(host_tx->esterror, &target_tx->esterror);
7190     __put_user(host_tx->status, &target_tx->status);
7191     __put_user(host_tx->constant, &target_tx->constant);
7192     __put_user(host_tx->precision, &target_tx->precision);
7193     __put_user(host_tx->tolerance, &target_tx->tolerance);
7194     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7195     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7196     __put_user(host_tx->tick, &target_tx->tick);
7197     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7198     __put_user(host_tx->jitter, &target_tx->jitter);
7199     __put_user(host_tx->shift, &target_tx->shift);
7200     __put_user(host_tx->stabil, &target_tx->stabil);
7201     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7202     __put_user(host_tx->calcnt, &target_tx->calcnt);
7203     __put_user(host_tx->errcnt, &target_tx->errcnt);
7204     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7205     __put_user(host_tx->tai, &target_tx->tai);
7206 
7207     unlock_user_struct(target_tx, target_addr, 1);
7208     return 0;
7209 }
7210 
7211 
7212 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7213                                                abi_ulong target_addr)
7214 {
7215     struct target_sigevent *target_sevp;
7216 
7217     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7218         return -TARGET_EFAULT;
7219     }
7220 
7221     /* This union is awkward on 64 bit systems because it has a 32 bit
7222      * integer and a pointer in it; we follow the conversion approach
7223      * used for handling sigval types in signal.c so the guest should get
7224      * the correct value back even if we did a 64 bit byteswap and it's
7225      * using the 32 bit integer.
7226      */
7227     host_sevp->sigev_value.sival_ptr =
7228         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7229     host_sevp->sigev_signo =
7230         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7231     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7232     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7233 
7234     unlock_user_struct(target_sevp, target_addr, 1);
7235     return 0;
7236 }
7237 
7238 #if defined(TARGET_NR_mlockall)
7239 static inline int target_to_host_mlockall_arg(int arg)
7240 {
7241     int result = 0;
7242 
7243     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7244         result |= MCL_CURRENT;
7245     }
7246     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7247         result |= MCL_FUTURE;
7248     }
7249     return result;
7250 }
7251 #endif
7252 
7253 static inline abi_long host_to_target_stat64(void *cpu_env,
7254                                              abi_ulong target_addr,
7255                                              struct stat *host_st)
7256 {
7257 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7258     if (((CPUARMState *)cpu_env)->eabi) {
7259         struct target_eabi_stat64 *target_st;
7260 
7261         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7262             return -TARGET_EFAULT;
7263         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7264         __put_user(host_st->st_dev, &target_st->st_dev);
7265         __put_user(host_st->st_ino, &target_st->st_ino);
7266 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7267         __put_user(host_st->st_ino, &target_st->__st_ino);
7268 #endif
7269         __put_user(host_st->st_mode, &target_st->st_mode);
7270         __put_user(host_st->st_nlink, &target_st->st_nlink);
7271         __put_user(host_st->st_uid, &target_st->st_uid);
7272         __put_user(host_st->st_gid, &target_st->st_gid);
7273         __put_user(host_st->st_rdev, &target_st->st_rdev);
7274         __put_user(host_st->st_size, &target_st->st_size);
7275         __put_user(host_st->st_blksize, &target_st->st_blksize);
7276         __put_user(host_st->st_blocks, &target_st->st_blocks);
7277         __put_user(host_st->st_atime, &target_st->target_st_atime);
7278         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7279         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7280         unlock_user_struct(target_st, target_addr, 1);
7281     } else
7282 #endif
7283     {
7284 #if defined(TARGET_HAS_STRUCT_STAT64)
7285         struct target_stat64 *target_st;
7286 #else
7287         struct target_stat *target_st;
7288 #endif
7289 
7290         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7291             return -TARGET_EFAULT;
7292         memset(target_st, 0, sizeof(*target_st));
7293         __put_user(host_st->st_dev, &target_st->st_dev);
7294         __put_user(host_st->st_ino, &target_st->st_ino);
7295 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7296         __put_user(host_st->st_ino, &target_st->__st_ino);
7297 #endif
7298         __put_user(host_st->st_mode, &target_st->st_mode);
7299         __put_user(host_st->st_nlink, &target_st->st_nlink);
7300         __put_user(host_st->st_uid, &target_st->st_uid);
7301         __put_user(host_st->st_gid, &target_st->st_gid);
7302         __put_user(host_st->st_rdev, &target_st->st_rdev);
7303         /* XXX: better use of kernel struct */
7304         __put_user(host_st->st_size, &target_st->st_size);
7305         __put_user(host_st->st_blksize, &target_st->st_blksize);
7306         __put_user(host_st->st_blocks, &target_st->st_blocks);
7307         __put_user(host_st->st_atime, &target_st->target_st_atime);
7308         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7309         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7310         unlock_user_struct(target_st, target_addr, 1);
7311     }
7312 
7313     return 0;
7314 }
7315 
7316 /* ??? Using host futex calls even when target atomic operations
7317    are not really atomic probably breaks things.  However implementing
7318    futexes locally would make futexes shared between multiple processes
7319    tricky.  However they're probably useless because guest atomic
7320    operations won't work either.  */
7321 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7322                     target_ulong uaddr2, int val3)
7323 {
7324     struct timespec ts, *pts;
7325     int base_op;
7326 
7327     /* ??? We assume FUTEX_* constants are the same on both host
7328        and target.  */
7329 #ifdef FUTEX_CMD_MASK
7330     base_op = op & FUTEX_CMD_MASK;
7331 #else
7332     base_op = op;
7333 #endif
7334     switch (base_op) {
7335     case FUTEX_WAIT:
7336     case FUTEX_WAIT_BITSET:
7337         if (timeout) {
7338             pts = &ts;
7339             target_to_host_timespec(pts, timeout);
7340         } else {
7341             pts = NULL;
7342         }
7343         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7344                          pts, NULL, val3));
7345     case FUTEX_WAKE:
7346         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7347     case FUTEX_FD:
7348         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7349     case FUTEX_REQUEUE:
7350     case FUTEX_CMP_REQUEUE:
7351     case FUTEX_WAKE_OP:
7352         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7353            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7354            But the prototype takes a `struct timespec *'; insert casts
7355            to satisfy the compiler.  We do not need to tswap TIMEOUT
7356            since it's not compared to guest memory.  */
7357         pts = (struct timespec *)(uintptr_t) timeout;
7358         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7359                                     g2h(uaddr2),
7360                                     (base_op == FUTEX_CMP_REQUEUE
7361                                      ? tswap32(val3)
7362                                      : val3)));
7363     default:
7364         return -TARGET_ENOSYS;
7365     }
7366 }
7367 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7368 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7369                                      abi_long handle, abi_long mount_id,
7370                                      abi_long flags)
7371 {
7372     struct file_handle *target_fh;
7373     struct file_handle *fh;
7374     int mid = 0;
7375     abi_long ret;
7376     char *name;
7377     unsigned int size, total_size;
7378 
7379     if (get_user_s32(size, handle)) {
7380         return -TARGET_EFAULT;
7381     }
7382 
7383     name = lock_user_string(pathname);
7384     if (!name) {
7385         return -TARGET_EFAULT;
7386     }
7387 
7388     total_size = sizeof(struct file_handle) + size;
7389     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7390     if (!target_fh) {
7391         unlock_user(name, pathname, 0);
7392         return -TARGET_EFAULT;
7393     }
7394 
7395     fh = g_malloc0(total_size);
7396     fh->handle_bytes = size;
7397 
7398     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7399     unlock_user(name, pathname, 0);
7400 
7401     /* man name_to_handle_at(2):
7402      * Other than the use of the handle_bytes field, the caller should treat
7403      * the file_handle structure as an opaque data type
7404      */
7405 
7406     memcpy(target_fh, fh, total_size);
7407     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7408     target_fh->handle_type = tswap32(fh->handle_type);
7409     g_free(fh);
7410     unlock_user(target_fh, handle, total_size);
7411 
7412     if (put_user_s32(mid, mount_id)) {
7413         return -TARGET_EFAULT;
7414     }
7415 
7416     return ret;
7417 
7418 }
7419 #endif
7420 
7421 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7422 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7423                                      abi_long flags)
7424 {
7425     struct file_handle *target_fh;
7426     struct file_handle *fh;
7427     unsigned int size, total_size;
7428     abi_long ret;
7429 
7430     if (get_user_s32(size, handle)) {
7431         return -TARGET_EFAULT;
7432     }
7433 
7434     total_size = sizeof(struct file_handle) + size;
7435     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7436     if (!target_fh) {
7437         return -TARGET_EFAULT;
7438     }
7439 
7440     fh = g_memdup(target_fh, total_size);
7441     fh->handle_bytes = size;
7442     fh->handle_type = tswap32(target_fh->handle_type);
7443 
7444     ret = get_errno(open_by_handle_at(mount_fd, fh,
7445                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7446 
7447     g_free(fh);
7448 
7449     unlock_user(target_fh, handle, total_size);
7450 
7451     return ret;
7452 }
7453 #endif
7454 
7455 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7456 
7457 /* signalfd siginfo conversion */
7458 
7459 static void
7460 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7461                                 const struct signalfd_siginfo *info)
7462 {
7463     int sig = host_to_target_signal(info->ssi_signo);
7464 
7465     /* linux/signalfd.h defines a ssi_addr_lsb
7466      * not defined in sys/signalfd.h but used by some kernels
7467      */
7468 
7469 #ifdef BUS_MCEERR_AO
7470     if (tinfo->ssi_signo == SIGBUS &&
7471         (tinfo->ssi_code == BUS_MCEERR_AR ||
7472          tinfo->ssi_code == BUS_MCEERR_AO)) {
7473         uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7474         uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7475         *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7476     }
7477 #endif
7478 
7479     tinfo->ssi_signo = tswap32(sig);
7480     tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7481     tinfo->ssi_code = tswap32(info->ssi_code);
7482     tinfo->ssi_pid = tswap32(info->ssi_pid);
7483     tinfo->ssi_uid = tswap32(info->ssi_uid);
7484     tinfo->ssi_fd = tswap32(info->ssi_fd);
7485     tinfo->ssi_tid = tswap32(info->ssi_tid);
7486     tinfo->ssi_band = tswap32(info->ssi_band);
7487     tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7488     tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7489     tinfo->ssi_status = tswap32(info->ssi_status);
7490     tinfo->ssi_int = tswap32(info->ssi_int);
7491     tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7492     tinfo->ssi_utime = tswap64(info->ssi_utime);
7493     tinfo->ssi_stime = tswap64(info->ssi_stime);
7494     tinfo->ssi_addr = tswap64(info->ssi_addr);
7495 }
7496 
7497 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7498 {
7499     int i;
7500 
7501     for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7502         host_to_target_signalfd_siginfo(buf + i, buf + i);
7503     }
7504 
7505     return len;
7506 }
7507 
7508 static TargetFdTrans target_signalfd_trans = {
7509     .host_to_target_data = host_to_target_data_signalfd,
7510 };
7511 
7512 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7513 {
7514     int host_flags;
7515     target_sigset_t *target_mask;
7516     sigset_t host_mask;
7517     abi_long ret;
7518 
7519     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7520         return -TARGET_EINVAL;
7521     }
7522     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7523         return -TARGET_EFAULT;
7524     }
7525 
7526     target_to_host_sigset(&host_mask, target_mask);
7527 
7528     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7529 
7530     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7531     if (ret >= 0) {
7532         fd_trans_register(ret, &target_signalfd_trans);
7533     }
7534 
7535     unlock_user_struct(target_mask, mask, 0);
7536 
7537     return ret;
7538 }
7539 #endif
7540 
7541 /* Map host to target signal numbers for the wait family of syscalls.
7542    Assume all other status bits are the same.  */
7543 int host_to_target_waitstatus(int status)
7544 {
7545     if (WIFSIGNALED(status)) {
7546         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7547     }
7548     if (WIFSTOPPED(status)) {
7549         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7550                | (status & 0xff);
7551     }
7552     return status;
7553 }
7554 
7555 static int open_self_cmdline(void *cpu_env, int fd)
7556 {
7557     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7558     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7559     int i;
7560 
7561     for (i = 0; i < bprm->argc; i++) {
7562         size_t len = strlen(bprm->argv[i]) + 1;
7563 
7564         if (write(fd, bprm->argv[i], len) != len) {
7565             return -1;
7566         }
7567     }
7568 
7569     return 0;
7570 }
7571 
7572 static int open_self_maps(void *cpu_env, int fd)
7573 {
7574     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7575     TaskState *ts = cpu->opaque;
7576     FILE *fp;
7577     char *line = NULL;
7578     size_t len = 0;
7579     ssize_t read;
7580 
7581     fp = fopen("/proc/self/maps", "r");
7582     if (fp == NULL) {
7583         return -1;
7584     }
7585 
7586     while ((read = getline(&line, &len, fp)) != -1) {
7587         int fields, dev_maj, dev_min, inode;
7588         uint64_t min, max, offset;
7589         char flag_r, flag_w, flag_x, flag_p;
7590         char path[512] = "";
7591         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7592                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7593                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7594 
7595         if ((fields < 10) || (fields > 11)) {
7596             continue;
7597         }
7598         if (h2g_valid(min)) {
7599             int flags = page_get_flags(h2g(min));
7600             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7601             if (page_check_range(h2g(min), max - min, flags) == -1) {
7602                 continue;
7603             }
7604             if (h2g(min) == ts->info->stack_limit) {
7605                 pstrcpy(path, sizeof(path), "      [stack]");
7606             }
7607             dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7608                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7609                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7610                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
7611                     path[0] ? "         " : "", path);
7612         }
7613     }
7614 
7615     free(line);
7616     fclose(fp);
7617 
7618     return 0;
7619 }
7620 
7621 static int open_self_stat(void *cpu_env, int fd)
7622 {
7623     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7624     TaskState *ts = cpu->opaque;
7625     abi_ulong start_stack = ts->info->start_stack;
7626     int i;
7627 
7628     for (i = 0; i < 44; i++) {
7629       char buf[128];
7630       int len;
7631       uint64_t val = 0;
7632 
7633       if (i == 0) {
7634         /* pid */
7635         val = getpid();
7636         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7637       } else if (i == 1) {
7638         /* app name */
7639         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7640       } else if (i == 27) {
7641         /* stack bottom */
7642         val = start_stack;
7643         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7644       } else {
7645         /* for the rest, there is MasterCard */
7646         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7647       }
7648 
7649       len = strlen(buf);
7650       if (write(fd, buf, len) != len) {
7651           return -1;
7652       }
7653     }
7654 
7655     return 0;
7656 }
7657 
7658 static int open_self_auxv(void *cpu_env, int fd)
7659 {
7660     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7661     TaskState *ts = cpu->opaque;
7662     abi_ulong auxv = ts->info->saved_auxv;
7663     abi_ulong len = ts->info->auxv_len;
7664     char *ptr;
7665 
7666     /*
7667      * Auxiliary vector is stored in target process stack.
7668      * read in whole auxv vector and copy it to file
7669      */
7670     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7671     if (ptr != NULL) {
7672         while (len > 0) {
7673             ssize_t r;
7674             r = write(fd, ptr, len);
7675             if (r <= 0) {
7676                 break;
7677             }
7678             len -= r;
7679             ptr += r;
7680         }
7681         lseek(fd, 0, SEEK_SET);
7682         unlock_user(ptr, auxv, len);
7683     }
7684 
7685     return 0;
7686 }
7687 
7688 static int is_proc_myself(const char *filename, const char *entry)
7689 {
7690     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7691         filename += strlen("/proc/");
7692         if (!strncmp(filename, "self/", strlen("self/"))) {
7693             filename += strlen("self/");
7694         } else if (*filename >= '1' && *filename <= '9') {
7695             char myself[80];
7696             snprintf(myself, sizeof(myself), "%d/", getpid());
7697             if (!strncmp(filename, myself, strlen(myself))) {
7698                 filename += strlen(myself);
7699             } else {
7700                 return 0;
7701             }
7702         } else {
7703             return 0;
7704         }
7705         if (!strcmp(filename, entry)) {
7706             return 1;
7707         }
7708     }
7709     return 0;
7710 }
7711 
7712 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7713 static int is_proc(const char *filename, const char *entry)
7714 {
7715     return strcmp(filename, entry) == 0;
7716 }
7717 
7718 static int open_net_route(void *cpu_env, int fd)
7719 {
7720     FILE *fp;
7721     char *line = NULL;
7722     size_t len = 0;
7723     ssize_t read;
7724 
7725     fp = fopen("/proc/net/route", "r");
7726     if (fp == NULL) {
7727         return -1;
7728     }
7729 
7730     /* read header */
7731 
7732     read = getline(&line, &len, fp);
7733     dprintf(fd, "%s", line);
7734 
7735     /* read routes */
7736 
7737     while ((read = getline(&line, &len, fp)) != -1) {
7738         char iface[16];
7739         uint32_t dest, gw, mask;
7740         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7741         sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7742                      iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7743                      &mask, &mtu, &window, &irtt);
7744         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7745                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7746                 metric, tswap32(mask), mtu, window, irtt);
7747     }
7748 
7749     free(line);
7750     fclose(fp);
7751 
7752     return 0;
7753 }
7754 #endif
7755 
7756 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7757 {
7758     struct fake_open {
7759         const char *filename;
7760         int (*fill)(void *cpu_env, int fd);
7761         int (*cmp)(const char *s1, const char *s2);
7762     };
7763     const struct fake_open *fake_open;
7764     static const struct fake_open fakes[] = {
7765         { "maps", open_self_maps, is_proc_myself },
7766         { "stat", open_self_stat, is_proc_myself },
7767         { "auxv", open_self_auxv, is_proc_myself },
7768         { "cmdline", open_self_cmdline, is_proc_myself },
7769 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7770         { "/proc/net/route", open_net_route, is_proc },
7771 #endif
7772         { NULL, NULL, NULL }
7773     };
7774 
7775     if (is_proc_myself(pathname, "exe")) {
7776         int execfd = qemu_getauxval(AT_EXECFD);
7777         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7778     }
7779 
7780     for (fake_open = fakes; fake_open->filename; fake_open++) {
7781         if (fake_open->cmp(pathname, fake_open->filename)) {
7782             break;
7783         }
7784     }
7785 
7786     if (fake_open->filename) {
7787         const char *tmpdir;
7788         char filename[PATH_MAX];
7789         int fd, r;
7790 
7791         /* create temporary file to map stat to */
7792         tmpdir = getenv("TMPDIR");
7793         if (!tmpdir)
7794             tmpdir = "/tmp";
7795         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7796         fd = mkstemp(filename);
7797         if (fd < 0) {
7798             return fd;
7799         }
7800         unlink(filename);
7801 
7802         if ((r = fake_open->fill(cpu_env, fd))) {
7803             int e = errno;
7804             close(fd);
7805             errno = e;
7806             return r;
7807         }
7808         lseek(fd, 0, SEEK_SET);
7809 
7810         return fd;
7811     }
7812 
7813     return safe_openat(dirfd, path(pathname), flags, mode);
7814 }
7815 
7816 #define TIMER_MAGIC 0x0caf0000
7817 #define TIMER_MAGIC_MASK 0xffff0000
7818 
7819 /* Convert QEMU provided timer ID back to internal 16bit index format */
7820 static target_timer_t get_timer_id(abi_long arg)
7821 {
7822     target_timer_t timerid = arg;
7823 
7824     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7825         return -TARGET_EINVAL;
7826     }
7827 
7828     timerid &= 0xffff;
7829 
7830     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7831         return -TARGET_EINVAL;
7832     }
7833 
7834     return timerid;
7835 }
7836 
7837 static abi_long swap_data_eventfd(void *buf, size_t len)
7838 {
7839     uint64_t *counter = buf;
7840     int i;
7841 
7842     if (len < sizeof(uint64_t)) {
7843         return -EINVAL;
7844     }
7845 
7846     for (i = 0; i < len; i += sizeof(uint64_t)) {
7847         *counter = tswap64(*counter);
7848         counter++;
7849     }
7850 
7851     return len;
7852 }
7853 
7854 static TargetFdTrans target_eventfd_trans = {
7855     .host_to_target_data = swap_data_eventfd,
7856     .target_to_host_data = swap_data_eventfd,
7857 };
7858 
7859 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7860     (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7861      defined(__NR_inotify_init1))
7862 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7863 {
7864     struct inotify_event *ev;
7865     int i;
7866     uint32_t name_len;
7867 
7868     for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7869         ev = (struct inotify_event *)((char *)buf + i);
7870         name_len = ev->len;
7871 
7872         ev->wd = tswap32(ev->wd);
7873         ev->mask = tswap32(ev->mask);
7874         ev->cookie = tswap32(ev->cookie);
7875         ev->len = tswap32(name_len);
7876     }
7877 
7878     return len;
7879 }
7880 
7881 static TargetFdTrans target_inotify_trans = {
7882     .host_to_target_data = host_to_target_data_inotify,
7883 };
7884 #endif
7885 
7886 static int target_to_host_cpu_mask(unsigned long *host_mask,
7887                                    size_t host_size,
7888                                    abi_ulong target_addr,
7889                                    size_t target_size)
7890 {
7891     unsigned target_bits = sizeof(abi_ulong) * 8;
7892     unsigned host_bits = sizeof(*host_mask) * 8;
7893     abi_ulong *target_mask;
7894     unsigned i, j;
7895 
7896     assert(host_size >= target_size);
7897 
7898     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7899     if (!target_mask) {
7900         return -TARGET_EFAULT;
7901     }
7902     memset(host_mask, 0, host_size);
7903 
7904     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7905         unsigned bit = i * target_bits;
7906         abi_ulong val;
7907 
7908         __get_user(val, &target_mask[i]);
7909         for (j = 0; j < target_bits; j++, bit++) {
7910             if (val & (1UL << j)) {
7911                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7912             }
7913         }
7914     }
7915 
7916     unlock_user(target_mask, target_addr, 0);
7917     return 0;
7918 }
7919 
7920 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7921                                    size_t host_size,
7922                                    abi_ulong target_addr,
7923                                    size_t target_size)
7924 {
7925     unsigned target_bits = sizeof(abi_ulong) * 8;
7926     unsigned host_bits = sizeof(*host_mask) * 8;
7927     abi_ulong *target_mask;
7928     unsigned i, j;
7929 
7930     assert(host_size >= target_size);
7931 
7932     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7933     if (!target_mask) {
7934         return -TARGET_EFAULT;
7935     }
7936 
7937     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7938         unsigned bit = i * target_bits;
7939         abi_ulong val = 0;
7940 
7941         for (j = 0; j < target_bits; j++, bit++) {
7942             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7943                 val |= 1UL << j;
7944             }
7945         }
7946         __put_user(val, &target_mask[i]);
7947     }
7948 
7949     unlock_user(target_mask, target_addr, target_size);
7950     return 0;
7951 }
7952 
7953 /* do_syscall() should always have a single exit point at the end so
7954    that actions, such as logging of syscall results, can be performed.
7955    All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7956 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7957                     abi_long arg2, abi_long arg3, abi_long arg4,
7958                     abi_long arg5, abi_long arg6, abi_long arg7,
7959                     abi_long arg8)
7960 {
7961     CPUState *cpu = ENV_GET_CPU(cpu_env);
7962     abi_long ret;
7963     struct stat st;
7964     struct statfs stfs;
7965     void *p;
7966 
7967 #if defined(DEBUG_ERESTARTSYS)
7968     /* Debug-only code for exercising the syscall-restart code paths
7969      * in the per-architecture cpu main loops: restart every syscall
7970      * the guest makes once before letting it through.
7971      */
7972     {
7973         static int flag;
7974 
7975         flag = !flag;
7976         if (flag) {
7977             return -TARGET_ERESTARTSYS;
7978         }
7979     }
7980 #endif
7981 
7982 #ifdef DEBUG
7983     gemu_log("syscall %d", num);
7984 #endif
7985     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7986     if(do_strace)
7987         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7988 
7989     switch(num) {
7990     case TARGET_NR_exit:
7991         /* In old applications this may be used to implement _exit(2).
7992            However in threaded applictions it is used for thread termination,
7993            and _exit_group is used for application termination.
7994            Do thread termination if we have more then one thread.  */
7995 
7996         if (block_signals()) {
7997             ret = -TARGET_ERESTARTSYS;
7998             break;
7999         }
8000 
8001         cpu_list_lock();
8002 
8003         if (CPU_NEXT(first_cpu)) {
8004             TaskState *ts;
8005 
8006             /* Remove the CPU from the list.  */
8007             QTAILQ_REMOVE(&cpus, cpu, node);
8008 
8009             cpu_list_unlock();
8010 
8011             ts = cpu->opaque;
8012             if (ts->child_tidptr) {
8013                 put_user_u32(0, ts->child_tidptr);
8014                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8015                           NULL, NULL, 0);
8016             }
8017             thread_cpu = NULL;
8018             object_unref(OBJECT(cpu));
8019             g_free(ts);
8020             rcu_unregister_thread();
8021             pthread_exit(NULL);
8022         }
8023 
8024         cpu_list_unlock();
8025         preexit_cleanup(cpu_env, arg1);
8026         _exit(arg1);
8027         ret = 0; /* avoid warning */
8028         break;
8029     case TARGET_NR_read:
8030         if (arg3 == 0)
8031             ret = 0;
8032         else {
8033             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8034                 goto efault;
8035             ret = get_errno(safe_read(arg1, p, arg3));
8036             if (ret >= 0 &&
8037                 fd_trans_host_to_target_data(arg1)) {
8038                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8039             }
8040             unlock_user(p, arg2, ret);
8041         }
8042         break;
8043     case TARGET_NR_write:
8044         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8045             goto efault;
8046         if (fd_trans_target_to_host_data(arg1)) {
8047             void *copy = g_malloc(arg3);
8048             memcpy(copy, p, arg3);
8049             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8050             if (ret >= 0) {
8051                 ret = get_errno(safe_write(arg1, copy, ret));
8052             }
8053             g_free(copy);
8054         } else {
8055             ret = get_errno(safe_write(arg1, p, arg3));
8056         }
8057         unlock_user(p, arg2, 0);
8058         break;
8059 #ifdef TARGET_NR_open
8060     case TARGET_NR_open:
8061         if (!(p = lock_user_string(arg1)))
8062             goto efault;
8063         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8064                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8065                                   arg3));
8066         fd_trans_unregister(ret);
8067         unlock_user(p, arg1, 0);
8068         break;
8069 #endif
8070     case TARGET_NR_openat:
8071         if (!(p = lock_user_string(arg2)))
8072             goto efault;
8073         ret = get_errno(do_openat(cpu_env, arg1, p,
8074                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8075                                   arg4));
8076         fd_trans_unregister(ret);
8077         unlock_user(p, arg2, 0);
8078         break;
8079 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8080     case TARGET_NR_name_to_handle_at:
8081         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8082         break;
8083 #endif
8084 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8085     case TARGET_NR_open_by_handle_at:
8086         ret = do_open_by_handle_at(arg1, arg2, arg3);
8087         fd_trans_unregister(ret);
8088         break;
8089 #endif
8090     case TARGET_NR_close:
8091         fd_trans_unregister(arg1);
8092         ret = get_errno(close(arg1));
8093         break;
8094     case TARGET_NR_brk:
8095         ret = do_brk(arg1);
8096         break;
8097 #ifdef TARGET_NR_fork
8098     case TARGET_NR_fork:
8099         ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8100         break;
8101 #endif
8102 #ifdef TARGET_NR_waitpid
8103     case TARGET_NR_waitpid:
8104         {
8105             int status;
8106             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8107             if (!is_error(ret) && arg2 && ret
8108                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8109                 goto efault;
8110         }
8111         break;
8112 #endif
8113 #ifdef TARGET_NR_waitid
8114     case TARGET_NR_waitid:
8115         {
8116             siginfo_t info;
8117             info.si_pid = 0;
8118             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8119             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8120                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8121                     goto efault;
8122                 host_to_target_siginfo(p, &info);
8123                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8124             }
8125         }
8126         break;
8127 #endif
8128 #ifdef TARGET_NR_creat /* not on alpha */
8129     case TARGET_NR_creat:
8130         if (!(p = lock_user_string(arg1)))
8131             goto efault;
8132         ret = get_errno(creat(p, arg2));
8133         fd_trans_unregister(ret);
8134         unlock_user(p, arg1, 0);
8135         break;
8136 #endif
8137 #ifdef TARGET_NR_link
8138     case TARGET_NR_link:
8139         {
8140             void * p2;
8141             p = lock_user_string(arg1);
8142             p2 = lock_user_string(arg2);
8143             if (!p || !p2)
8144                 ret = -TARGET_EFAULT;
8145             else
8146                 ret = get_errno(link(p, p2));
8147             unlock_user(p2, arg2, 0);
8148             unlock_user(p, arg1, 0);
8149         }
8150         break;
8151 #endif
8152 #if defined(TARGET_NR_linkat)
8153     case TARGET_NR_linkat:
8154         {
8155             void * p2 = NULL;
8156             if (!arg2 || !arg4)
8157                 goto efault;
8158             p  = lock_user_string(arg2);
8159             p2 = lock_user_string(arg4);
8160             if (!p || !p2)
8161                 ret = -TARGET_EFAULT;
8162             else
8163                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8164             unlock_user(p, arg2, 0);
8165             unlock_user(p2, arg4, 0);
8166         }
8167         break;
8168 #endif
8169 #ifdef TARGET_NR_unlink
8170     case TARGET_NR_unlink:
8171         if (!(p = lock_user_string(arg1)))
8172             goto efault;
8173         ret = get_errno(unlink(p));
8174         unlock_user(p, arg1, 0);
8175         break;
8176 #endif
8177 #if defined(TARGET_NR_unlinkat)
8178     case TARGET_NR_unlinkat:
8179         if (!(p = lock_user_string(arg2)))
8180             goto efault;
8181         ret = get_errno(unlinkat(arg1, p, arg3));
8182         unlock_user(p, arg2, 0);
8183         break;
8184 #endif
8185     case TARGET_NR_execve:
8186         {
8187             char **argp, **envp;
8188             int argc, envc;
8189             abi_ulong gp;
8190             abi_ulong guest_argp;
8191             abi_ulong guest_envp;
8192             abi_ulong addr;
8193             char **q;
8194             int total_size = 0;
8195 
8196             argc = 0;
8197             guest_argp = arg2;
8198             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8199                 if (get_user_ual(addr, gp))
8200                     goto efault;
8201                 if (!addr)
8202                     break;
8203                 argc++;
8204             }
8205             envc = 0;
8206             guest_envp = arg3;
8207             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8208                 if (get_user_ual(addr, gp))
8209                     goto efault;
8210                 if (!addr)
8211                     break;
8212                 envc++;
8213             }
8214 
8215             argp = g_new0(char *, argc + 1);
8216             envp = g_new0(char *, envc + 1);
8217 
8218             for (gp = guest_argp, q = argp; gp;
8219                   gp += sizeof(abi_ulong), q++) {
8220                 if (get_user_ual(addr, gp))
8221                     goto execve_efault;
8222                 if (!addr)
8223                     break;
8224                 if (!(*q = lock_user_string(addr)))
8225                     goto execve_efault;
8226                 total_size += strlen(*q) + 1;
8227             }
8228             *q = NULL;
8229 
8230             for (gp = guest_envp, q = envp; gp;
8231                   gp += sizeof(abi_ulong), q++) {
8232                 if (get_user_ual(addr, gp))
8233                     goto execve_efault;
8234                 if (!addr)
8235                     break;
8236                 if (!(*q = lock_user_string(addr)))
8237                     goto execve_efault;
8238                 total_size += strlen(*q) + 1;
8239             }
8240             *q = NULL;
8241 
8242             if (!(p = lock_user_string(arg1)))
8243                 goto execve_efault;
8244             /* Although execve() is not an interruptible syscall it is
8245              * a special case where we must use the safe_syscall wrapper:
8246              * if we allow a signal to happen before we make the host
8247              * syscall then we will 'lose' it, because at the point of
8248              * execve the process leaves QEMU's control. So we use the
8249              * safe syscall wrapper to ensure that we either take the
8250              * signal as a guest signal, or else it does not happen
8251              * before the execve completes and makes it the other
8252              * program's problem.
8253              */
8254             ret = get_errno(safe_execve(p, argp, envp));
8255             unlock_user(p, arg1, 0);
8256 
8257             goto execve_end;
8258 
8259         execve_efault:
8260             ret = -TARGET_EFAULT;
8261 
8262         execve_end:
8263             for (gp = guest_argp, q = argp; *q;
8264                   gp += sizeof(abi_ulong), q++) {
8265                 if (get_user_ual(addr, gp)
8266                     || !addr)
8267                     break;
8268                 unlock_user(*q, addr, 0);
8269             }
8270             for (gp = guest_envp, q = envp; *q;
8271                   gp += sizeof(abi_ulong), q++) {
8272                 if (get_user_ual(addr, gp)
8273                     || !addr)
8274                     break;
8275                 unlock_user(*q, addr, 0);
8276             }
8277 
8278             g_free(argp);
8279             g_free(envp);
8280         }
8281         break;
8282     case TARGET_NR_chdir:
8283         if (!(p = lock_user_string(arg1)))
8284             goto efault;
8285         ret = get_errno(chdir(p));
8286         unlock_user(p, arg1, 0);
8287         break;
8288 #ifdef TARGET_NR_time
8289     case TARGET_NR_time:
8290         {
8291             time_t host_time;
8292             ret = get_errno(time(&host_time));
8293             if (!is_error(ret)
8294                 && arg1
8295                 && put_user_sal(host_time, arg1))
8296                 goto efault;
8297         }
8298         break;
8299 #endif
8300 #ifdef TARGET_NR_mknod
8301     case TARGET_NR_mknod:
8302         if (!(p = lock_user_string(arg1)))
8303             goto efault;
8304         ret = get_errno(mknod(p, arg2, arg3));
8305         unlock_user(p, arg1, 0);
8306         break;
8307 #endif
8308 #if defined(TARGET_NR_mknodat)
8309     case TARGET_NR_mknodat:
8310         if (!(p = lock_user_string(arg2)))
8311             goto efault;
8312         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8313         unlock_user(p, arg2, 0);
8314         break;
8315 #endif
8316 #ifdef TARGET_NR_chmod
8317     case TARGET_NR_chmod:
8318         if (!(p = lock_user_string(arg1)))
8319             goto efault;
8320         ret = get_errno(chmod(p, arg2));
8321         unlock_user(p, arg1, 0);
8322         break;
8323 #endif
8324 #ifdef TARGET_NR_break
8325     case TARGET_NR_break:
8326         goto unimplemented;
8327 #endif
8328 #ifdef TARGET_NR_oldstat
8329     case TARGET_NR_oldstat:
8330         goto unimplemented;
8331 #endif
8332     case TARGET_NR_lseek:
8333         ret = get_errno(lseek(arg1, arg2, arg3));
8334         break;
8335 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8336     /* Alpha specific */
8337     case TARGET_NR_getxpid:
8338         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8339         ret = get_errno(getpid());
8340         break;
8341 #endif
8342 #ifdef TARGET_NR_getpid
8343     case TARGET_NR_getpid:
8344         ret = get_errno(getpid());
8345         break;
8346 #endif
8347     case TARGET_NR_mount:
8348         {
8349             /* need to look at the data field */
8350             void *p2, *p3;
8351 
8352             if (arg1) {
8353                 p = lock_user_string(arg1);
8354                 if (!p) {
8355                     goto efault;
8356                 }
8357             } else {
8358                 p = NULL;
8359             }
8360 
8361             p2 = lock_user_string(arg2);
8362             if (!p2) {
8363                 if (arg1) {
8364                     unlock_user(p, arg1, 0);
8365                 }
8366                 goto efault;
8367             }
8368 
8369             if (arg3) {
8370                 p3 = lock_user_string(arg3);
8371                 if (!p3) {
8372                     if (arg1) {
8373                         unlock_user(p, arg1, 0);
8374                     }
8375                     unlock_user(p2, arg2, 0);
8376                     goto efault;
8377                 }
8378             } else {
8379                 p3 = NULL;
8380             }
8381 
8382             /* FIXME - arg5 should be locked, but it isn't clear how to
8383              * do that since it's not guaranteed to be a NULL-terminated
8384              * string.
8385              */
8386             if (!arg5) {
8387                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8388             } else {
8389                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8390             }
8391             ret = get_errno(ret);
8392 
8393             if (arg1) {
8394                 unlock_user(p, arg1, 0);
8395             }
8396             unlock_user(p2, arg2, 0);
8397             if (arg3) {
8398                 unlock_user(p3, arg3, 0);
8399             }
8400         }
8401         break;
8402 #ifdef TARGET_NR_umount
8403     case TARGET_NR_umount:
8404         if (!(p = lock_user_string(arg1)))
8405             goto efault;
8406         ret = get_errno(umount(p));
8407         unlock_user(p, arg1, 0);
8408         break;
8409 #endif
8410 #ifdef TARGET_NR_stime /* not on alpha */
8411     case TARGET_NR_stime:
8412         {
8413             time_t host_time;
8414             if (get_user_sal(host_time, arg1))
8415                 goto efault;
8416             ret = get_errno(stime(&host_time));
8417         }
8418         break;
8419 #endif
8420     case TARGET_NR_ptrace:
8421         goto unimplemented;
8422 #ifdef TARGET_NR_alarm /* not on alpha */
8423     case TARGET_NR_alarm:
8424         ret = alarm(arg1);
8425         break;
8426 #endif
8427 #ifdef TARGET_NR_oldfstat
8428     case TARGET_NR_oldfstat:
8429         goto unimplemented;
8430 #endif
8431 #ifdef TARGET_NR_pause /* not on alpha */
8432     case TARGET_NR_pause:
8433         if (!block_signals()) {
8434             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8435         }
8436         ret = -TARGET_EINTR;
8437         break;
8438 #endif
8439 #ifdef TARGET_NR_utime
8440     case TARGET_NR_utime:
8441         {
8442             struct utimbuf tbuf, *host_tbuf;
8443             struct target_utimbuf *target_tbuf;
8444             if (arg2) {
8445                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8446                     goto efault;
8447                 tbuf.actime = tswapal(target_tbuf->actime);
8448                 tbuf.modtime = tswapal(target_tbuf->modtime);
8449                 unlock_user_struct(target_tbuf, arg2, 0);
8450                 host_tbuf = &tbuf;
8451             } else {
8452                 host_tbuf = NULL;
8453             }
8454             if (!(p = lock_user_string(arg1)))
8455                 goto efault;
8456             ret = get_errno(utime(p, host_tbuf));
8457             unlock_user(p, arg1, 0);
8458         }
8459         break;
8460 #endif
8461 #ifdef TARGET_NR_utimes
8462     case TARGET_NR_utimes:
8463         {
8464             struct timeval *tvp, tv[2];
8465             if (arg2) {
8466                 if (copy_from_user_timeval(&tv[0], arg2)
8467                     || copy_from_user_timeval(&tv[1],
8468                                               arg2 + sizeof(struct target_timeval)))
8469                     goto efault;
8470                 tvp = tv;
8471             } else {
8472                 tvp = NULL;
8473             }
8474             if (!(p = lock_user_string(arg1)))
8475                 goto efault;
8476             ret = get_errno(utimes(p, tvp));
8477             unlock_user(p, arg1, 0);
8478         }
8479         break;
8480 #endif
8481 #if defined(TARGET_NR_futimesat)
8482     case TARGET_NR_futimesat:
8483         {
8484             struct timeval *tvp, tv[2];
8485             if (arg3) {
8486                 if (copy_from_user_timeval(&tv[0], arg3)
8487                     || copy_from_user_timeval(&tv[1],
8488                                               arg3 + sizeof(struct target_timeval)))
8489                     goto efault;
8490                 tvp = tv;
8491             } else {
8492                 tvp = NULL;
8493             }
8494             if (!(p = lock_user_string(arg2)))
8495                 goto efault;
8496             ret = get_errno(futimesat(arg1, path(p), tvp));
8497             unlock_user(p, arg2, 0);
8498         }
8499         break;
8500 #endif
8501 #ifdef TARGET_NR_stty
8502     case TARGET_NR_stty:
8503         goto unimplemented;
8504 #endif
8505 #ifdef TARGET_NR_gtty
8506     case TARGET_NR_gtty:
8507         goto unimplemented;
8508 #endif
8509 #ifdef TARGET_NR_access
8510     case TARGET_NR_access:
8511         if (!(p = lock_user_string(arg1)))
8512             goto efault;
8513         ret = get_errno(access(path(p), arg2));
8514         unlock_user(p, arg1, 0);
8515         break;
8516 #endif
8517 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8518     case TARGET_NR_faccessat:
8519         if (!(p = lock_user_string(arg2)))
8520             goto efault;
8521         ret = get_errno(faccessat(arg1, p, arg3, 0));
8522         unlock_user(p, arg2, 0);
8523         break;
8524 #endif
8525 #ifdef TARGET_NR_nice /* not on alpha */
8526     case TARGET_NR_nice:
8527         ret = get_errno(nice(arg1));
8528         break;
8529 #endif
8530 #ifdef TARGET_NR_ftime
8531     case TARGET_NR_ftime:
8532         goto unimplemented;
8533 #endif
8534     case TARGET_NR_sync:
8535         sync();
8536         ret = 0;
8537         break;
8538 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8539     case TARGET_NR_syncfs:
8540         ret = get_errno(syncfs(arg1));
8541         break;
8542 #endif
8543     case TARGET_NR_kill:
8544         ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8545         break;
8546 #ifdef TARGET_NR_rename
8547     case TARGET_NR_rename:
8548         {
8549             void *p2;
8550             p = lock_user_string(arg1);
8551             p2 = lock_user_string(arg2);
8552             if (!p || !p2)
8553                 ret = -TARGET_EFAULT;
8554             else
8555                 ret = get_errno(rename(p, p2));
8556             unlock_user(p2, arg2, 0);
8557             unlock_user(p, arg1, 0);
8558         }
8559         break;
8560 #endif
8561 #if defined(TARGET_NR_renameat)
8562     case TARGET_NR_renameat:
8563         {
8564             void *p2;
8565             p  = lock_user_string(arg2);
8566             p2 = lock_user_string(arg4);
8567             if (!p || !p2)
8568                 ret = -TARGET_EFAULT;
8569             else
8570                 ret = get_errno(renameat(arg1, p, arg3, p2));
8571             unlock_user(p2, arg4, 0);
8572             unlock_user(p, arg2, 0);
8573         }
8574         break;
8575 #endif
8576 #if defined(TARGET_NR_renameat2)
8577     case TARGET_NR_renameat2:
8578         {
8579             void *p2;
8580             p  = lock_user_string(arg2);
8581             p2 = lock_user_string(arg4);
8582             if (!p || !p2) {
8583                 ret = -TARGET_EFAULT;
8584             } else {
8585                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8586             }
8587             unlock_user(p2, arg4, 0);
8588             unlock_user(p, arg2, 0);
8589         }
8590         break;
8591 #endif
8592 #ifdef TARGET_NR_mkdir
8593     case TARGET_NR_mkdir:
8594         if (!(p = lock_user_string(arg1)))
8595             goto efault;
8596         ret = get_errno(mkdir(p, arg2));
8597         unlock_user(p, arg1, 0);
8598         break;
8599 #endif
8600 #if defined(TARGET_NR_mkdirat)
8601     case TARGET_NR_mkdirat:
8602         if (!(p = lock_user_string(arg2)))
8603             goto efault;
8604         ret = get_errno(mkdirat(arg1, p, arg3));
8605         unlock_user(p, arg2, 0);
8606         break;
8607 #endif
8608 #ifdef TARGET_NR_rmdir
8609     case TARGET_NR_rmdir:
8610         if (!(p = lock_user_string(arg1)))
8611             goto efault;
8612         ret = get_errno(rmdir(p));
8613         unlock_user(p, arg1, 0);
8614         break;
8615 #endif
8616     case TARGET_NR_dup:
8617         ret = get_errno(dup(arg1));
8618         if (ret >= 0) {
8619             fd_trans_dup(arg1, ret);
8620         }
8621         break;
8622 #ifdef TARGET_NR_pipe
8623     case TARGET_NR_pipe:
8624         ret = do_pipe(cpu_env, arg1, 0, 0);
8625         break;
8626 #endif
8627 #ifdef TARGET_NR_pipe2
8628     case TARGET_NR_pipe2:
8629         ret = do_pipe(cpu_env, arg1,
8630                       target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8631         break;
8632 #endif
8633     case TARGET_NR_times:
8634         {
8635             struct target_tms *tmsp;
8636             struct tms tms;
8637             ret = get_errno(times(&tms));
8638             if (arg1) {
8639                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8640                 if (!tmsp)
8641                     goto efault;
8642                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8643                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8644                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8645                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8646             }
8647             if (!is_error(ret))
8648                 ret = host_to_target_clock_t(ret);
8649         }
8650         break;
8651 #ifdef TARGET_NR_prof
8652     case TARGET_NR_prof:
8653         goto unimplemented;
8654 #endif
8655 #ifdef TARGET_NR_signal
8656     case TARGET_NR_signal:
8657         goto unimplemented;
8658 #endif
8659     case TARGET_NR_acct:
8660         if (arg1 == 0) {
8661             ret = get_errno(acct(NULL));
8662         } else {
8663             if (!(p = lock_user_string(arg1)))
8664                 goto efault;
8665             ret = get_errno(acct(path(p)));
8666             unlock_user(p, arg1, 0);
8667         }
8668         break;
8669 #ifdef TARGET_NR_umount2
8670     case TARGET_NR_umount2:
8671         if (!(p = lock_user_string(arg1)))
8672             goto efault;
8673         ret = get_errno(umount2(p, arg2));
8674         unlock_user(p, arg1, 0);
8675         break;
8676 #endif
8677 #ifdef TARGET_NR_lock
8678     case TARGET_NR_lock:
8679         goto unimplemented;
8680 #endif
8681     case TARGET_NR_ioctl:
8682         ret = do_ioctl(arg1, arg2, arg3);
8683         break;
8684 #ifdef TARGET_NR_fcntl
8685     case TARGET_NR_fcntl:
8686         ret = do_fcntl(arg1, arg2, arg3);
8687         break;
8688 #endif
8689 #ifdef TARGET_NR_mpx
8690     case TARGET_NR_mpx:
8691         goto unimplemented;
8692 #endif
8693     case TARGET_NR_setpgid:
8694         ret = get_errno(setpgid(arg1, arg2));
8695         break;
8696 #ifdef TARGET_NR_ulimit
8697     case TARGET_NR_ulimit:
8698         goto unimplemented;
8699 #endif
8700 #ifdef TARGET_NR_oldolduname
8701     case TARGET_NR_oldolduname:
8702         goto unimplemented;
8703 #endif
8704     case TARGET_NR_umask:
8705         ret = get_errno(umask(arg1));
8706         break;
8707     case TARGET_NR_chroot:
8708         if (!(p = lock_user_string(arg1)))
8709             goto efault;
8710         ret = get_errno(chroot(p));
8711         unlock_user(p, arg1, 0);
8712         break;
8713 #ifdef TARGET_NR_ustat
8714     case TARGET_NR_ustat:
8715         goto unimplemented;
8716 #endif
8717 #ifdef TARGET_NR_dup2
8718     case TARGET_NR_dup2:
8719         ret = get_errno(dup2(arg1, arg2));
8720         if (ret >= 0) {
8721             fd_trans_dup(arg1, arg2);
8722         }
8723         break;
8724 #endif
8725 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8726     case TARGET_NR_dup3:
8727     {
8728         int host_flags;
8729 
8730         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8731             return -EINVAL;
8732         }
8733         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8734         ret = get_errno(dup3(arg1, arg2, host_flags));
8735         if (ret >= 0) {
8736             fd_trans_dup(arg1, arg2);
8737         }
8738         break;
8739     }
8740 #endif
8741 #ifdef TARGET_NR_getppid /* not on alpha */
8742     case TARGET_NR_getppid:
8743         ret = get_errno(getppid());
8744         break;
8745 #endif
8746 #ifdef TARGET_NR_getpgrp
8747     case TARGET_NR_getpgrp:
8748         ret = get_errno(getpgrp());
8749         break;
8750 #endif
8751     case TARGET_NR_setsid:
8752         ret = get_errno(setsid());
8753         break;
8754 #ifdef TARGET_NR_sigaction
8755     case TARGET_NR_sigaction:
8756         {
8757 #if defined(TARGET_ALPHA)
8758             struct target_sigaction act, oact, *pact = 0;
8759             struct target_old_sigaction *old_act;
8760             if (arg2) {
8761                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8762                     goto efault;
8763                 act._sa_handler = old_act->_sa_handler;
8764                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8765                 act.sa_flags = old_act->sa_flags;
8766                 act.sa_restorer = 0;
8767                 unlock_user_struct(old_act, arg2, 0);
8768                 pact = &act;
8769             }
8770             ret = get_errno(do_sigaction(arg1, pact, &oact));
8771             if (!is_error(ret) && arg3) {
8772                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8773                     goto efault;
8774                 old_act->_sa_handler = oact._sa_handler;
8775                 old_act->sa_mask = oact.sa_mask.sig[0];
8776                 old_act->sa_flags = oact.sa_flags;
8777                 unlock_user_struct(old_act, arg3, 1);
8778             }
8779 #elif defined(TARGET_MIPS)
8780 	    struct target_sigaction act, oact, *pact, *old_act;
8781 
8782 	    if (arg2) {
8783                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8784                     goto efault;
8785 		act._sa_handler = old_act->_sa_handler;
8786 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8787 		act.sa_flags = old_act->sa_flags;
8788 		unlock_user_struct(old_act, arg2, 0);
8789 		pact = &act;
8790 	    } else {
8791 		pact = NULL;
8792 	    }
8793 
8794 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8795 
8796 	    if (!is_error(ret) && arg3) {
8797                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8798                     goto efault;
8799 		old_act->_sa_handler = oact._sa_handler;
8800 		old_act->sa_flags = oact.sa_flags;
8801 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8802 		old_act->sa_mask.sig[1] = 0;
8803 		old_act->sa_mask.sig[2] = 0;
8804 		old_act->sa_mask.sig[3] = 0;
8805 		unlock_user_struct(old_act, arg3, 1);
8806 	    }
8807 #else
8808             struct target_old_sigaction *old_act;
8809             struct target_sigaction act, oact, *pact;
8810             if (arg2) {
8811                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8812                     goto efault;
8813                 act._sa_handler = old_act->_sa_handler;
8814                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8815                 act.sa_flags = old_act->sa_flags;
8816                 act.sa_restorer = old_act->sa_restorer;
8817 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8818                 act.ka_restorer = 0;
8819 #endif
8820                 unlock_user_struct(old_act, arg2, 0);
8821                 pact = &act;
8822             } else {
8823                 pact = NULL;
8824             }
8825             ret = get_errno(do_sigaction(arg1, pact, &oact));
8826             if (!is_error(ret) && arg3) {
8827                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8828                     goto efault;
8829                 old_act->_sa_handler = oact._sa_handler;
8830                 old_act->sa_mask = oact.sa_mask.sig[0];
8831                 old_act->sa_flags = oact.sa_flags;
8832                 old_act->sa_restorer = oact.sa_restorer;
8833                 unlock_user_struct(old_act, arg3, 1);
8834             }
8835 #endif
8836         }
8837         break;
8838 #endif
8839     case TARGET_NR_rt_sigaction:
8840         {
8841 #if defined(TARGET_ALPHA)
8842             /* For Alpha and SPARC this is a 5 argument syscall, with
8843              * a 'restorer' parameter which must be copied into the
8844              * sa_restorer field of the sigaction struct.
8845              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8846              * and arg5 is the sigsetsize.
8847              * Alpha also has a separate rt_sigaction struct that it uses
8848              * here; SPARC uses the usual sigaction struct.
8849              */
8850             struct target_rt_sigaction *rt_act;
8851             struct target_sigaction act, oact, *pact = 0;
8852 
8853             if (arg4 != sizeof(target_sigset_t)) {
8854                 ret = -TARGET_EINVAL;
8855                 break;
8856             }
8857             if (arg2) {
8858                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8859                     goto efault;
8860                 act._sa_handler = rt_act->_sa_handler;
8861                 act.sa_mask = rt_act->sa_mask;
8862                 act.sa_flags = rt_act->sa_flags;
8863                 act.sa_restorer = arg5;
8864                 unlock_user_struct(rt_act, arg2, 0);
8865                 pact = &act;
8866             }
8867             ret = get_errno(do_sigaction(arg1, pact, &oact));
8868             if (!is_error(ret) && arg3) {
8869                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8870                     goto efault;
8871                 rt_act->_sa_handler = oact._sa_handler;
8872                 rt_act->sa_mask = oact.sa_mask;
8873                 rt_act->sa_flags = oact.sa_flags;
8874                 unlock_user_struct(rt_act, arg3, 1);
8875             }
8876 #else
8877 #ifdef TARGET_SPARC
8878             target_ulong restorer = arg4;
8879             target_ulong sigsetsize = arg5;
8880 #else
8881             target_ulong sigsetsize = arg4;
8882 #endif
8883             struct target_sigaction *act;
8884             struct target_sigaction *oact;
8885 
8886             if (sigsetsize != sizeof(target_sigset_t)) {
8887                 ret = -TARGET_EINVAL;
8888                 break;
8889             }
8890             if (arg2) {
8891                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8892                     goto efault;
8893                 }
8894 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8895                 act->ka_restorer = restorer;
8896 #endif
8897             } else {
8898                 act = NULL;
8899             }
8900             if (arg3) {
8901                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8902                     ret = -TARGET_EFAULT;
8903                     goto rt_sigaction_fail;
8904                 }
8905             } else
8906                 oact = NULL;
8907             ret = get_errno(do_sigaction(arg1, act, oact));
8908 	rt_sigaction_fail:
8909             if (act)
8910                 unlock_user_struct(act, arg2, 0);
8911             if (oact)
8912                 unlock_user_struct(oact, arg3, 1);
8913 #endif
8914         }
8915         break;
8916 #ifdef TARGET_NR_sgetmask /* not on alpha */
8917     case TARGET_NR_sgetmask:
8918         {
8919             sigset_t cur_set;
8920             abi_ulong target_set;
8921             ret = do_sigprocmask(0, NULL, &cur_set);
8922             if (!ret) {
8923                 host_to_target_old_sigset(&target_set, &cur_set);
8924                 ret = target_set;
8925             }
8926         }
8927         break;
8928 #endif
8929 #ifdef TARGET_NR_ssetmask /* not on alpha */
8930     case TARGET_NR_ssetmask:
8931         {
8932             sigset_t set, oset;
8933             abi_ulong target_set = arg1;
8934             target_to_host_old_sigset(&set, &target_set);
8935             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8936             if (!ret) {
8937                 host_to_target_old_sigset(&target_set, &oset);
8938                 ret = target_set;
8939             }
8940         }
8941         break;
8942 #endif
8943 #ifdef TARGET_NR_sigprocmask
8944     case TARGET_NR_sigprocmask:
8945         {
8946 #if defined(TARGET_ALPHA)
8947             sigset_t set, oldset;
8948             abi_ulong mask;
8949             int how;
8950 
8951             switch (arg1) {
8952             case TARGET_SIG_BLOCK:
8953                 how = SIG_BLOCK;
8954                 break;
8955             case TARGET_SIG_UNBLOCK:
8956                 how = SIG_UNBLOCK;
8957                 break;
8958             case TARGET_SIG_SETMASK:
8959                 how = SIG_SETMASK;
8960                 break;
8961             default:
8962                 ret = -TARGET_EINVAL;
8963                 goto fail;
8964             }
8965             mask = arg2;
8966             target_to_host_old_sigset(&set, &mask);
8967 
8968             ret = do_sigprocmask(how, &set, &oldset);
8969             if (!is_error(ret)) {
8970                 host_to_target_old_sigset(&mask, &oldset);
8971                 ret = mask;
8972                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8973             }
8974 #else
8975             sigset_t set, oldset, *set_ptr;
8976             int how;
8977 
8978             if (arg2) {
8979                 switch (arg1) {
8980                 case TARGET_SIG_BLOCK:
8981                     how = SIG_BLOCK;
8982                     break;
8983                 case TARGET_SIG_UNBLOCK:
8984                     how = SIG_UNBLOCK;
8985                     break;
8986                 case TARGET_SIG_SETMASK:
8987                     how = SIG_SETMASK;
8988                     break;
8989                 default:
8990                     ret = -TARGET_EINVAL;
8991                     goto fail;
8992                 }
8993                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8994                     goto efault;
8995                 target_to_host_old_sigset(&set, p);
8996                 unlock_user(p, arg2, 0);
8997                 set_ptr = &set;
8998             } else {
8999                 how = 0;
9000                 set_ptr = NULL;
9001             }
9002             ret = do_sigprocmask(how, set_ptr, &oldset);
9003             if (!is_error(ret) && arg3) {
9004                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9005                     goto efault;
9006                 host_to_target_old_sigset(p, &oldset);
9007                 unlock_user(p, arg3, sizeof(target_sigset_t));
9008             }
9009 #endif
9010         }
9011         break;
9012 #endif
9013     case TARGET_NR_rt_sigprocmask:
9014         {
9015             int how = arg1;
9016             sigset_t set, oldset, *set_ptr;
9017 
9018             if (arg4 != sizeof(target_sigset_t)) {
9019                 ret = -TARGET_EINVAL;
9020                 break;
9021             }
9022 
9023             if (arg2) {
9024                 switch(how) {
9025                 case TARGET_SIG_BLOCK:
9026                     how = SIG_BLOCK;
9027                     break;
9028                 case TARGET_SIG_UNBLOCK:
9029                     how = SIG_UNBLOCK;
9030                     break;
9031                 case TARGET_SIG_SETMASK:
9032                     how = SIG_SETMASK;
9033                     break;
9034                 default:
9035                     ret = -TARGET_EINVAL;
9036                     goto fail;
9037                 }
9038                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9039                     goto efault;
9040                 target_to_host_sigset(&set, p);
9041                 unlock_user(p, arg2, 0);
9042                 set_ptr = &set;
9043             } else {
9044                 how = 0;
9045                 set_ptr = NULL;
9046             }
9047             ret = do_sigprocmask(how, set_ptr, &oldset);
9048             if (!is_error(ret) && arg3) {
9049                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9050                     goto efault;
9051                 host_to_target_sigset(p, &oldset);
9052                 unlock_user(p, arg3, sizeof(target_sigset_t));
9053             }
9054         }
9055         break;
9056 #ifdef TARGET_NR_sigpending
9057     case TARGET_NR_sigpending:
9058         {
9059             sigset_t set;
9060             ret = get_errno(sigpending(&set));
9061             if (!is_error(ret)) {
9062                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9063                     goto efault;
9064                 host_to_target_old_sigset(p, &set);
9065                 unlock_user(p, arg1, sizeof(target_sigset_t));
9066             }
9067         }
9068         break;
9069 #endif
9070     case TARGET_NR_rt_sigpending:
9071         {
9072             sigset_t set;
9073 
9074             /* Yes, this check is >, not != like most. We follow the kernel's
9075              * logic and it does it like this because it implements
9076              * NR_sigpending through the same code path, and in that case
9077              * the old_sigset_t is smaller in size.
9078              */
9079             if (arg2 > sizeof(target_sigset_t)) {
9080                 ret = -TARGET_EINVAL;
9081                 break;
9082             }
9083 
9084             ret = get_errno(sigpending(&set));
9085             if (!is_error(ret)) {
9086                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9087                     goto efault;
9088                 host_to_target_sigset(p, &set);
9089                 unlock_user(p, arg1, sizeof(target_sigset_t));
9090             }
9091         }
9092         break;
9093 #ifdef TARGET_NR_sigsuspend
9094     case TARGET_NR_sigsuspend:
9095         {
9096             TaskState *ts = cpu->opaque;
9097 #if defined(TARGET_ALPHA)
9098             abi_ulong mask = arg1;
9099             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9100 #else
9101             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9102                 goto efault;
9103             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9104             unlock_user(p, arg1, 0);
9105 #endif
9106             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9107                                                SIGSET_T_SIZE));
9108             if (ret != -TARGET_ERESTARTSYS) {
9109                 ts->in_sigsuspend = 1;
9110             }
9111         }
9112         break;
9113 #endif
9114     case TARGET_NR_rt_sigsuspend:
9115         {
9116             TaskState *ts = cpu->opaque;
9117 
9118             if (arg2 != sizeof(target_sigset_t)) {
9119                 ret = -TARGET_EINVAL;
9120                 break;
9121             }
9122             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9123                 goto efault;
9124             target_to_host_sigset(&ts->sigsuspend_mask, p);
9125             unlock_user(p, arg1, 0);
9126             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9127                                                SIGSET_T_SIZE));
9128             if (ret != -TARGET_ERESTARTSYS) {
9129                 ts->in_sigsuspend = 1;
9130             }
9131         }
9132         break;
9133     case TARGET_NR_rt_sigtimedwait:
9134         {
9135             sigset_t set;
9136             struct timespec uts, *puts;
9137             siginfo_t uinfo;
9138 
9139             if (arg4 != sizeof(target_sigset_t)) {
9140                 ret = -TARGET_EINVAL;
9141                 break;
9142             }
9143 
9144             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9145                 goto efault;
9146             target_to_host_sigset(&set, p);
9147             unlock_user(p, arg1, 0);
9148             if (arg3) {
9149                 puts = &uts;
9150                 target_to_host_timespec(puts, arg3);
9151             } else {
9152                 puts = NULL;
9153             }
9154             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9155                                                  SIGSET_T_SIZE));
9156             if (!is_error(ret)) {
9157                 if (arg2) {
9158                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9159                                   0);
9160                     if (!p) {
9161                         goto efault;
9162                     }
9163                     host_to_target_siginfo(p, &uinfo);
9164                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9165                 }
9166                 ret = host_to_target_signal(ret);
9167             }
9168         }
9169         break;
9170     case TARGET_NR_rt_sigqueueinfo:
9171         {
9172             siginfo_t uinfo;
9173 
9174             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9175             if (!p) {
9176                 goto efault;
9177             }
9178             target_to_host_siginfo(&uinfo, p);
9179             unlock_user(p, arg3, 0);
9180             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9181         }
9182         break;
9183     case TARGET_NR_rt_tgsigqueueinfo:
9184         {
9185             siginfo_t uinfo;
9186 
9187             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9188             if (!p) {
9189                 goto efault;
9190             }
9191             target_to_host_siginfo(&uinfo, p);
9192             unlock_user(p, arg4, 0);
9193             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9194         }
9195         break;
9196 #ifdef TARGET_NR_sigreturn
9197     case TARGET_NR_sigreturn:
9198         if (block_signals()) {
9199             ret = -TARGET_ERESTARTSYS;
9200         } else {
9201             ret = do_sigreturn(cpu_env);
9202         }
9203         break;
9204 #endif
9205     case TARGET_NR_rt_sigreturn:
9206         if (block_signals()) {
9207             ret = -TARGET_ERESTARTSYS;
9208         } else {
9209             ret = do_rt_sigreturn(cpu_env);
9210         }
9211         break;
9212     case TARGET_NR_sethostname:
9213         if (!(p = lock_user_string(arg1)))
9214             goto efault;
9215         ret = get_errno(sethostname(p, arg2));
9216         unlock_user(p, arg1, 0);
9217         break;
9218     case TARGET_NR_setrlimit:
9219         {
9220             int resource = target_to_host_resource(arg1);
9221             struct target_rlimit *target_rlim;
9222             struct rlimit rlim;
9223             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9224                 goto efault;
9225             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9226             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9227             unlock_user_struct(target_rlim, arg2, 0);
9228             ret = get_errno(setrlimit(resource, &rlim));
9229         }
9230         break;
9231     case TARGET_NR_getrlimit:
9232         {
9233             int resource = target_to_host_resource(arg1);
9234             struct target_rlimit *target_rlim;
9235             struct rlimit rlim;
9236 
9237             ret = get_errno(getrlimit(resource, &rlim));
9238             if (!is_error(ret)) {
9239                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9240                     goto efault;
9241                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9242                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9243                 unlock_user_struct(target_rlim, arg2, 1);
9244             }
9245         }
9246         break;
9247     case TARGET_NR_getrusage:
9248         {
9249             struct rusage rusage;
9250             ret = get_errno(getrusage(arg1, &rusage));
9251             if (!is_error(ret)) {
9252                 ret = host_to_target_rusage(arg2, &rusage);
9253             }
9254         }
9255         break;
9256     case TARGET_NR_gettimeofday:
9257         {
9258             struct timeval tv;
9259             ret = get_errno(gettimeofday(&tv, NULL));
9260             if (!is_error(ret)) {
9261                 if (copy_to_user_timeval(arg1, &tv))
9262                     goto efault;
9263             }
9264         }
9265         break;
9266     case TARGET_NR_settimeofday:
9267         {
9268             struct timeval tv, *ptv = NULL;
9269             struct timezone tz, *ptz = NULL;
9270 
9271             if (arg1) {
9272                 if (copy_from_user_timeval(&tv, arg1)) {
9273                     goto efault;
9274                 }
9275                 ptv = &tv;
9276             }
9277 
9278             if (arg2) {
9279                 if (copy_from_user_timezone(&tz, arg2)) {
9280                     goto efault;
9281                 }
9282                 ptz = &tz;
9283             }
9284 
9285             ret = get_errno(settimeofday(ptv, ptz));
9286         }
9287         break;
9288 #if defined(TARGET_NR_select)
9289     case TARGET_NR_select:
9290 #if defined(TARGET_WANT_NI_OLD_SELECT)
9291         /* some architectures used to have old_select here
9292          * but now ENOSYS it.
9293          */
9294         ret = -TARGET_ENOSYS;
9295 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9296         ret = do_old_select(arg1);
9297 #else
9298         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9299 #endif
9300         break;
9301 #endif
9302 #ifdef TARGET_NR_pselect6
9303     case TARGET_NR_pselect6:
9304         {
9305             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9306             fd_set rfds, wfds, efds;
9307             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9308             struct timespec ts, *ts_ptr;
9309 
9310             /*
9311              * The 6th arg is actually two args smashed together,
9312              * so we cannot use the C library.
9313              */
9314             sigset_t set;
9315             struct {
9316                 sigset_t *set;
9317                 size_t size;
9318             } sig, *sig_ptr;
9319 
9320             abi_ulong arg_sigset, arg_sigsize, *arg7;
9321             target_sigset_t *target_sigset;
9322 
9323             n = arg1;
9324             rfd_addr = arg2;
9325             wfd_addr = arg3;
9326             efd_addr = arg4;
9327             ts_addr = arg5;
9328 
9329             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9330             if (ret) {
9331                 goto fail;
9332             }
9333             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9334             if (ret) {
9335                 goto fail;
9336             }
9337             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9338             if (ret) {
9339                 goto fail;
9340             }
9341 
9342             /*
9343              * This takes a timespec, and not a timeval, so we cannot
9344              * use the do_select() helper ...
9345              */
9346             if (ts_addr) {
9347                 if (target_to_host_timespec(&ts, ts_addr)) {
9348                     goto efault;
9349                 }
9350                 ts_ptr = &ts;
9351             } else {
9352                 ts_ptr = NULL;
9353             }
9354 
9355             /* Extract the two packed args for the sigset */
9356             if (arg6) {
9357                 sig_ptr = &sig;
9358                 sig.size = SIGSET_T_SIZE;
9359 
9360                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9361                 if (!arg7) {
9362                     goto efault;
9363                 }
9364                 arg_sigset = tswapal(arg7[0]);
9365                 arg_sigsize = tswapal(arg7[1]);
9366                 unlock_user(arg7, arg6, 0);
9367 
9368                 if (arg_sigset) {
9369                     sig.set = &set;
9370                     if (arg_sigsize != sizeof(*target_sigset)) {
9371                         /* Like the kernel, we enforce correct size sigsets */
9372                         ret = -TARGET_EINVAL;
9373                         goto fail;
9374                     }
9375                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
9376                                               sizeof(*target_sigset), 1);
9377                     if (!target_sigset) {
9378                         goto efault;
9379                     }
9380                     target_to_host_sigset(&set, target_sigset);
9381                     unlock_user(target_sigset, arg_sigset, 0);
9382                 } else {
9383                     sig.set = NULL;
9384                 }
9385             } else {
9386                 sig_ptr = NULL;
9387             }
9388 
9389             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9390                                           ts_ptr, sig_ptr));
9391 
9392             if (!is_error(ret)) {
9393                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9394                     goto efault;
9395                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9396                     goto efault;
9397                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9398                     goto efault;
9399 
9400                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9401                     goto efault;
9402             }
9403         }
9404         break;
9405 #endif
9406 #ifdef TARGET_NR_symlink
9407     case TARGET_NR_symlink:
9408         {
9409             void *p2;
9410             p = lock_user_string(arg1);
9411             p2 = lock_user_string(arg2);
9412             if (!p || !p2)
9413                 ret = -TARGET_EFAULT;
9414             else
9415                 ret = get_errno(symlink(p, p2));
9416             unlock_user(p2, arg2, 0);
9417             unlock_user(p, arg1, 0);
9418         }
9419         break;
9420 #endif
9421 #if defined(TARGET_NR_symlinkat)
9422     case TARGET_NR_symlinkat:
9423         {
9424             void *p2;
9425             p  = lock_user_string(arg1);
9426             p2 = lock_user_string(arg3);
9427             if (!p || !p2)
9428                 ret = -TARGET_EFAULT;
9429             else
9430                 ret = get_errno(symlinkat(p, arg2, p2));
9431             unlock_user(p2, arg3, 0);
9432             unlock_user(p, arg1, 0);
9433         }
9434         break;
9435 #endif
9436 #ifdef TARGET_NR_oldlstat
9437     case TARGET_NR_oldlstat:
9438         goto unimplemented;
9439 #endif
9440 #ifdef TARGET_NR_readlink
9441     case TARGET_NR_readlink:
9442         {
9443             void *p2;
9444             p = lock_user_string(arg1);
9445             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9446             if (!p || !p2) {
9447                 ret = -TARGET_EFAULT;
9448             } else if (!arg3) {
9449                 /* Short circuit this for the magic exe check. */
9450                 ret = -TARGET_EINVAL;
9451             } else if (is_proc_myself((const char *)p, "exe")) {
9452                 char real[PATH_MAX], *temp;
9453                 temp = realpath(exec_path, real);
9454                 /* Return value is # of bytes that we wrote to the buffer. */
9455                 if (temp == NULL) {
9456                     ret = get_errno(-1);
9457                 } else {
9458                     /* Don't worry about sign mismatch as earlier mapping
9459                      * logic would have thrown a bad address error. */
9460                     ret = MIN(strlen(real), arg3);
9461                     /* We cannot NUL terminate the string. */
9462                     memcpy(p2, real, ret);
9463                 }
9464             } else {
9465                 ret = get_errno(readlink(path(p), p2, arg3));
9466             }
9467             unlock_user(p2, arg2, ret);
9468             unlock_user(p, arg1, 0);
9469         }
9470         break;
9471 #endif
9472 #if defined(TARGET_NR_readlinkat)
9473     case TARGET_NR_readlinkat:
9474         {
9475             void *p2;
9476             p  = lock_user_string(arg2);
9477             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9478             if (!p || !p2) {
9479                 ret = -TARGET_EFAULT;
9480             } else if (is_proc_myself((const char *)p, "exe")) {
9481                 char real[PATH_MAX], *temp;
9482                 temp = realpath(exec_path, real);
9483                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9484                 snprintf((char *)p2, arg4, "%s", real);
9485             } else {
9486                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9487             }
9488             unlock_user(p2, arg3, ret);
9489             unlock_user(p, arg2, 0);
9490         }
9491         break;
9492 #endif
9493 #ifdef TARGET_NR_uselib
9494     case TARGET_NR_uselib:
9495         goto unimplemented;
9496 #endif
9497 #ifdef TARGET_NR_swapon
9498     case TARGET_NR_swapon:
9499         if (!(p = lock_user_string(arg1)))
9500             goto efault;
9501         ret = get_errno(swapon(p, arg2));
9502         unlock_user(p, arg1, 0);
9503         break;
9504 #endif
9505     case TARGET_NR_reboot:
9506         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9507            /* arg4 must be ignored in all other cases */
9508            p = lock_user_string(arg4);
9509            if (!p) {
9510               goto efault;
9511            }
9512            ret = get_errno(reboot(arg1, arg2, arg3, p));
9513            unlock_user(p, arg4, 0);
9514         } else {
9515            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9516         }
9517         break;
9518 #ifdef TARGET_NR_readdir
9519     case TARGET_NR_readdir:
9520         goto unimplemented;
9521 #endif
9522 #ifdef TARGET_NR_mmap
9523     case TARGET_NR_mmap:
9524 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9525     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9526     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9527     || defined(TARGET_S390X)
9528         {
9529             abi_ulong *v;
9530             abi_ulong v1, v2, v3, v4, v5, v6;
9531             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9532                 goto efault;
9533             v1 = tswapal(v[0]);
9534             v2 = tswapal(v[1]);
9535             v3 = tswapal(v[2]);
9536             v4 = tswapal(v[3]);
9537             v5 = tswapal(v[4]);
9538             v6 = tswapal(v[5]);
9539             unlock_user(v, arg1, 0);
9540             ret = get_errno(target_mmap(v1, v2, v3,
9541                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9542                                         v5, v6));
9543         }
9544 #else
9545         ret = get_errno(target_mmap(arg1, arg2, arg3,
9546                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9547                                     arg5,
9548                                     arg6));
9549 #endif
9550         break;
9551 #endif
9552 #ifdef TARGET_NR_mmap2
9553     case TARGET_NR_mmap2:
9554 #ifndef MMAP_SHIFT
9555 #define MMAP_SHIFT 12
9556 #endif
9557         ret = get_errno(target_mmap(arg1, arg2, arg3,
9558                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9559                                     arg5,
9560                                     arg6 << MMAP_SHIFT));
9561         break;
9562 #endif
9563     case TARGET_NR_munmap:
9564         ret = get_errno(target_munmap(arg1, arg2));
9565         break;
9566     case TARGET_NR_mprotect:
9567         {
9568             TaskState *ts = cpu->opaque;
9569             /* Special hack to detect libc making the stack executable.  */
9570             if ((arg3 & PROT_GROWSDOWN)
9571                 && arg1 >= ts->info->stack_limit
9572                 && arg1 <= ts->info->start_stack) {
9573                 arg3 &= ~PROT_GROWSDOWN;
9574                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9575                 arg1 = ts->info->stack_limit;
9576             }
9577         }
9578         ret = get_errno(target_mprotect(arg1, arg2, arg3));
9579         break;
9580 #ifdef TARGET_NR_mremap
9581     case TARGET_NR_mremap:
9582         ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9583         break;
9584 #endif
9585         /* ??? msync/mlock/munlock are broken for softmmu.  */
9586 #ifdef TARGET_NR_msync
9587     case TARGET_NR_msync:
9588         ret = get_errno(msync(g2h(arg1), arg2, arg3));
9589         break;
9590 #endif
9591 #ifdef TARGET_NR_mlock
9592     case TARGET_NR_mlock:
9593         ret = get_errno(mlock(g2h(arg1), arg2));
9594         break;
9595 #endif
9596 #ifdef TARGET_NR_munlock
9597     case TARGET_NR_munlock:
9598         ret = get_errno(munlock(g2h(arg1), arg2));
9599         break;
9600 #endif
9601 #ifdef TARGET_NR_mlockall
9602     case TARGET_NR_mlockall:
9603         ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9604         break;
9605 #endif
9606 #ifdef TARGET_NR_munlockall
9607     case TARGET_NR_munlockall:
9608         ret = get_errno(munlockall());
9609         break;
9610 #endif
9611     case TARGET_NR_truncate:
9612         if (!(p = lock_user_string(arg1)))
9613             goto efault;
9614         ret = get_errno(truncate(p, arg2));
9615         unlock_user(p, arg1, 0);
9616         break;
9617     case TARGET_NR_ftruncate:
9618         ret = get_errno(ftruncate(arg1, arg2));
9619         break;
9620     case TARGET_NR_fchmod:
9621         ret = get_errno(fchmod(arg1, arg2));
9622         break;
9623 #if defined(TARGET_NR_fchmodat)
9624     case TARGET_NR_fchmodat:
9625         if (!(p = lock_user_string(arg2)))
9626             goto efault;
9627         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9628         unlock_user(p, arg2, 0);
9629         break;
9630 #endif
9631     case TARGET_NR_getpriority:
9632         /* Note that negative values are valid for getpriority, so we must
9633            differentiate based on errno settings.  */
9634         errno = 0;
9635         ret = getpriority(arg1, arg2);
9636         if (ret == -1 && errno != 0) {
9637             ret = -host_to_target_errno(errno);
9638             break;
9639         }
9640 #ifdef TARGET_ALPHA
9641         /* Return value is the unbiased priority.  Signal no error.  */
9642         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9643 #else
9644         /* Return value is a biased priority to avoid negative numbers.  */
9645         ret = 20 - ret;
9646 #endif
9647         break;
9648     case TARGET_NR_setpriority:
9649         ret = get_errno(setpriority(arg1, arg2, arg3));
9650         break;
9651 #ifdef TARGET_NR_profil
9652     case TARGET_NR_profil:
9653         goto unimplemented;
9654 #endif
9655     case TARGET_NR_statfs:
9656         if (!(p = lock_user_string(arg1)))
9657             goto efault;
9658         ret = get_errno(statfs(path(p), &stfs));
9659         unlock_user(p, arg1, 0);
9660     convert_statfs:
9661         if (!is_error(ret)) {
9662             struct target_statfs *target_stfs;
9663 
9664             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9665                 goto efault;
9666             __put_user(stfs.f_type, &target_stfs->f_type);
9667             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9668             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9669             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9670             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9671             __put_user(stfs.f_files, &target_stfs->f_files);
9672             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9673             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9674             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9675             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9676             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9677 #ifdef _STATFS_F_FLAGS
9678             __put_user(stfs.f_flags, &target_stfs->f_flags);
9679 #else
9680             __put_user(0, &target_stfs->f_flags);
9681 #endif
9682             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9683             unlock_user_struct(target_stfs, arg2, 1);
9684         }
9685         break;
9686     case TARGET_NR_fstatfs:
9687         ret = get_errno(fstatfs(arg1, &stfs));
9688         goto convert_statfs;
9689 #ifdef TARGET_NR_statfs64
9690     case TARGET_NR_statfs64:
9691         if (!(p = lock_user_string(arg1)))
9692             goto efault;
9693         ret = get_errno(statfs(path(p), &stfs));
9694         unlock_user(p, arg1, 0);
9695     convert_statfs64:
9696         if (!is_error(ret)) {
9697             struct target_statfs64 *target_stfs;
9698 
9699             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9700                 goto efault;
9701             __put_user(stfs.f_type, &target_stfs->f_type);
9702             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9703             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9704             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9705             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9706             __put_user(stfs.f_files, &target_stfs->f_files);
9707             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9708             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9709             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9710             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9711             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9712             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9713             unlock_user_struct(target_stfs, arg3, 1);
9714         }
9715         break;
9716     case TARGET_NR_fstatfs64:
9717         ret = get_errno(fstatfs(arg1, &stfs));
9718         goto convert_statfs64;
9719 #endif
9720 #ifdef TARGET_NR_ioperm
9721     case TARGET_NR_ioperm:
9722         goto unimplemented;
9723 #endif
9724 #ifdef TARGET_NR_socketcall
9725     case TARGET_NR_socketcall:
9726         ret = do_socketcall(arg1, arg2);
9727         break;
9728 #endif
9729 #ifdef TARGET_NR_accept
9730     case TARGET_NR_accept:
9731         ret = do_accept4(arg1, arg2, arg3, 0);
9732         break;
9733 #endif
9734 #ifdef TARGET_NR_accept4
9735     case TARGET_NR_accept4:
9736         ret = do_accept4(arg1, arg2, arg3, arg4);
9737         break;
9738 #endif
9739 #ifdef TARGET_NR_bind
9740     case TARGET_NR_bind:
9741         ret = do_bind(arg1, arg2, arg3);
9742         break;
9743 #endif
9744 #ifdef TARGET_NR_connect
9745     case TARGET_NR_connect:
9746         ret = do_connect(arg1, arg2, arg3);
9747         break;
9748 #endif
9749 #ifdef TARGET_NR_getpeername
9750     case TARGET_NR_getpeername:
9751         ret = do_getpeername(arg1, arg2, arg3);
9752         break;
9753 #endif
9754 #ifdef TARGET_NR_getsockname
9755     case TARGET_NR_getsockname:
9756         ret = do_getsockname(arg1, arg2, arg3);
9757         break;
9758 #endif
9759 #ifdef TARGET_NR_getsockopt
9760     case TARGET_NR_getsockopt:
9761         ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9762         break;
9763 #endif
9764 #ifdef TARGET_NR_listen
9765     case TARGET_NR_listen:
9766         ret = get_errno(listen(arg1, arg2));
9767         break;
9768 #endif
9769 #ifdef TARGET_NR_recv
9770     case TARGET_NR_recv:
9771         ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9772         break;
9773 #endif
9774 #ifdef TARGET_NR_recvfrom
9775     case TARGET_NR_recvfrom:
9776         ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9777         break;
9778 #endif
9779 #ifdef TARGET_NR_recvmsg
9780     case TARGET_NR_recvmsg:
9781         ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9782         break;
9783 #endif
9784 #ifdef TARGET_NR_send
9785     case TARGET_NR_send:
9786         ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9787         break;
9788 #endif
9789 #ifdef TARGET_NR_sendmsg
9790     case TARGET_NR_sendmsg:
9791         ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9792         break;
9793 #endif
9794 #ifdef TARGET_NR_sendmmsg
9795     case TARGET_NR_sendmmsg:
9796         ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9797         break;
9798     case TARGET_NR_recvmmsg:
9799         ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9800         break;
9801 #endif
9802 #ifdef TARGET_NR_sendto
9803     case TARGET_NR_sendto:
9804         ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9805         break;
9806 #endif
9807 #ifdef TARGET_NR_shutdown
9808     case TARGET_NR_shutdown:
9809         ret = get_errno(shutdown(arg1, arg2));
9810         break;
9811 #endif
9812 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9813     case TARGET_NR_getrandom:
9814         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9815         if (!p) {
9816             goto efault;
9817         }
9818         ret = get_errno(getrandom(p, arg2, arg3));
9819         unlock_user(p, arg1, ret);
9820         break;
9821 #endif
9822 #ifdef TARGET_NR_socket
9823     case TARGET_NR_socket:
9824         ret = do_socket(arg1, arg2, arg3);
9825         break;
9826 #endif
9827 #ifdef TARGET_NR_socketpair
9828     case TARGET_NR_socketpair:
9829         ret = do_socketpair(arg1, arg2, arg3, arg4);
9830         break;
9831 #endif
9832 #ifdef TARGET_NR_setsockopt
9833     case TARGET_NR_setsockopt:
9834         ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9835         break;
9836 #endif
9837 #if defined(TARGET_NR_syslog)
9838     case TARGET_NR_syslog:
9839         {
9840             int len = arg2;
9841 
9842             switch (arg1) {
9843             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9844             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9845             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9846             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9847             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9848             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9849             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9850             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9851                 {
9852                     ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9853                 }
9854                 break;
9855             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9856             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9857             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9858                 {
9859                     ret = -TARGET_EINVAL;
9860                     if (len < 0) {
9861                         goto fail;
9862                     }
9863                     ret = 0;
9864                     if (len == 0) {
9865                         break;
9866                     }
9867                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9868                     if (!p) {
9869                         ret = -TARGET_EFAULT;
9870                         goto fail;
9871                     }
9872                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9873                     unlock_user(p, arg2, arg3);
9874                 }
9875                 break;
9876             default:
9877                 ret = -EINVAL;
9878                 break;
9879             }
9880         }
9881         break;
9882 #endif
9883     case TARGET_NR_setitimer:
9884         {
9885             struct itimerval value, ovalue, *pvalue;
9886 
9887             if (arg2) {
9888                 pvalue = &value;
9889                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9890                     || copy_from_user_timeval(&pvalue->it_value,
9891                                               arg2 + sizeof(struct target_timeval)))
9892                     goto efault;
9893             } else {
9894                 pvalue = NULL;
9895             }
9896             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9897             if (!is_error(ret) && arg3) {
9898                 if (copy_to_user_timeval(arg3,
9899                                          &ovalue.it_interval)
9900                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9901                                             &ovalue.it_value))
9902                     goto efault;
9903             }
9904         }
9905         break;
9906     case TARGET_NR_getitimer:
9907         {
9908             struct itimerval value;
9909 
9910             ret = get_errno(getitimer(arg1, &value));
9911             if (!is_error(ret) && arg2) {
9912                 if (copy_to_user_timeval(arg2,
9913                                          &value.it_interval)
9914                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9915                                             &value.it_value))
9916                     goto efault;
9917             }
9918         }
9919         break;
9920 #ifdef TARGET_NR_stat
9921     case TARGET_NR_stat:
9922         if (!(p = lock_user_string(arg1)))
9923             goto efault;
9924         ret = get_errno(stat(path(p), &st));
9925         unlock_user(p, arg1, 0);
9926         goto do_stat;
9927 #endif
9928 #ifdef TARGET_NR_lstat
9929     case TARGET_NR_lstat:
9930         if (!(p = lock_user_string(arg1)))
9931             goto efault;
9932         ret = get_errno(lstat(path(p), &st));
9933         unlock_user(p, arg1, 0);
9934         goto do_stat;
9935 #endif
9936     case TARGET_NR_fstat:
9937         {
9938             ret = get_errno(fstat(arg1, &st));
9939 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9940         do_stat:
9941 #endif
9942             if (!is_error(ret)) {
9943                 struct target_stat *target_st;
9944 
9945                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9946                     goto efault;
9947                 memset(target_st, 0, sizeof(*target_st));
9948                 __put_user(st.st_dev, &target_st->st_dev);
9949                 __put_user(st.st_ino, &target_st->st_ino);
9950                 __put_user(st.st_mode, &target_st->st_mode);
9951                 __put_user(st.st_uid, &target_st->st_uid);
9952                 __put_user(st.st_gid, &target_st->st_gid);
9953                 __put_user(st.st_nlink, &target_st->st_nlink);
9954                 __put_user(st.st_rdev, &target_st->st_rdev);
9955                 __put_user(st.st_size, &target_st->st_size);
9956                 __put_user(st.st_blksize, &target_st->st_blksize);
9957                 __put_user(st.st_blocks, &target_st->st_blocks);
9958                 __put_user(st.st_atime, &target_st->target_st_atime);
9959                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9960                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9961                 unlock_user_struct(target_st, arg2, 1);
9962             }
9963         }
9964         break;
9965 #ifdef TARGET_NR_olduname
9966     case TARGET_NR_olduname:
9967         goto unimplemented;
9968 #endif
9969 #ifdef TARGET_NR_iopl
9970     case TARGET_NR_iopl:
9971         goto unimplemented;
9972 #endif
9973     case TARGET_NR_vhangup:
9974         ret = get_errno(vhangup());
9975         break;
9976 #ifdef TARGET_NR_idle
9977     case TARGET_NR_idle:
9978         goto unimplemented;
9979 #endif
9980 #ifdef TARGET_NR_syscall
9981     case TARGET_NR_syscall:
9982         ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9983                          arg6, arg7, arg8, 0);
9984         break;
9985 #endif
9986     case TARGET_NR_wait4:
9987         {
9988             int status;
9989             abi_long status_ptr = arg2;
9990             struct rusage rusage, *rusage_ptr;
9991             abi_ulong target_rusage = arg4;
9992             abi_long rusage_err;
9993             if (target_rusage)
9994                 rusage_ptr = &rusage;
9995             else
9996                 rusage_ptr = NULL;
9997             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9998             if (!is_error(ret)) {
9999                 if (status_ptr && ret) {
10000                     status = host_to_target_waitstatus(status);
10001                     if (put_user_s32(status, status_ptr))
10002                         goto efault;
10003                 }
10004                 if (target_rusage) {
10005                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10006                     if (rusage_err) {
10007                         ret = rusage_err;
10008                     }
10009                 }
10010             }
10011         }
10012         break;
10013 #ifdef TARGET_NR_swapoff
10014     case TARGET_NR_swapoff:
10015         if (!(p = lock_user_string(arg1)))
10016             goto efault;
10017         ret = get_errno(swapoff(p));
10018         unlock_user(p, arg1, 0);
10019         break;
10020 #endif
10021     case TARGET_NR_sysinfo:
10022         {
10023             struct target_sysinfo *target_value;
10024             struct sysinfo value;
10025             ret = get_errno(sysinfo(&value));
10026             if (!is_error(ret) && arg1)
10027             {
10028                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10029                     goto efault;
10030                 __put_user(value.uptime, &target_value->uptime);
10031                 __put_user(value.loads[0], &target_value->loads[0]);
10032                 __put_user(value.loads[1], &target_value->loads[1]);
10033                 __put_user(value.loads[2], &target_value->loads[2]);
10034                 __put_user(value.totalram, &target_value->totalram);
10035                 __put_user(value.freeram, &target_value->freeram);
10036                 __put_user(value.sharedram, &target_value->sharedram);
10037                 __put_user(value.bufferram, &target_value->bufferram);
10038                 __put_user(value.totalswap, &target_value->totalswap);
10039                 __put_user(value.freeswap, &target_value->freeswap);
10040                 __put_user(value.procs, &target_value->procs);
10041                 __put_user(value.totalhigh, &target_value->totalhigh);
10042                 __put_user(value.freehigh, &target_value->freehigh);
10043                 __put_user(value.mem_unit, &target_value->mem_unit);
10044                 unlock_user_struct(target_value, arg1, 1);
10045             }
10046         }
10047         break;
10048 #ifdef TARGET_NR_ipc
10049     case TARGET_NR_ipc:
10050         ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10051         break;
10052 #endif
10053 #ifdef TARGET_NR_semget
10054     case TARGET_NR_semget:
10055         ret = get_errno(semget(arg1, arg2, arg3));
10056         break;
10057 #endif
10058 #ifdef TARGET_NR_semop
10059     case TARGET_NR_semop:
10060         ret = do_semop(arg1, arg2, arg3);
10061         break;
10062 #endif
10063 #ifdef TARGET_NR_semctl
10064     case TARGET_NR_semctl:
10065         ret = do_semctl(arg1, arg2, arg3, arg4);
10066         break;
10067 #endif
10068 #ifdef TARGET_NR_msgctl
10069     case TARGET_NR_msgctl:
10070         ret = do_msgctl(arg1, arg2, arg3);
10071         break;
10072 #endif
10073 #ifdef TARGET_NR_msgget
10074     case TARGET_NR_msgget:
10075         ret = get_errno(msgget(arg1, arg2));
10076         break;
10077 #endif
10078 #ifdef TARGET_NR_msgrcv
10079     case TARGET_NR_msgrcv:
10080         ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10081         break;
10082 #endif
10083 #ifdef TARGET_NR_msgsnd
10084     case TARGET_NR_msgsnd:
10085         ret = do_msgsnd(arg1, arg2, arg3, arg4);
10086         break;
10087 #endif
10088 #ifdef TARGET_NR_shmget
10089     case TARGET_NR_shmget:
10090         ret = get_errno(shmget(arg1, arg2, arg3));
10091         break;
10092 #endif
10093 #ifdef TARGET_NR_shmctl
10094     case TARGET_NR_shmctl:
10095         ret = do_shmctl(arg1, arg2, arg3);
10096         break;
10097 #endif
10098 #ifdef TARGET_NR_shmat
10099     case TARGET_NR_shmat:
10100         ret = do_shmat(cpu_env, arg1, arg2, arg3);
10101         break;
10102 #endif
10103 #ifdef TARGET_NR_shmdt
10104     case TARGET_NR_shmdt:
10105         ret = do_shmdt(arg1);
10106         break;
10107 #endif
10108     case TARGET_NR_fsync:
10109         ret = get_errno(fsync(arg1));
10110         break;
10111     case TARGET_NR_clone:
10112         /* Linux manages to have three different orderings for its
10113          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10114          * match the kernel's CONFIG_CLONE_* settings.
10115          * Microblaze is further special in that it uses a sixth
10116          * implicit argument to clone for the TLS pointer.
10117          */
10118 #if defined(TARGET_MICROBLAZE)
10119         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10120 #elif defined(TARGET_CLONE_BACKWARDS)
10121         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10122 #elif defined(TARGET_CLONE_BACKWARDS2)
10123         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10124 #else
10125         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10126 #endif
10127         break;
10128 #ifdef __NR_exit_group
10129         /* new thread calls */
10130     case TARGET_NR_exit_group:
10131         preexit_cleanup(cpu_env, arg1);
10132         ret = get_errno(exit_group(arg1));
10133         break;
10134 #endif
10135     case TARGET_NR_setdomainname:
10136         if (!(p = lock_user_string(arg1)))
10137             goto efault;
10138         ret = get_errno(setdomainname(p, arg2));
10139         unlock_user(p, arg1, 0);
10140         break;
10141     case TARGET_NR_uname:
10142         /* no need to transcode because we use the linux syscall */
10143         {
10144             struct new_utsname * buf;
10145 
10146             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10147                 goto efault;
10148             ret = get_errno(sys_uname(buf));
10149             if (!is_error(ret)) {
10150                 /* Overwrite the native machine name with whatever is being
10151                    emulated. */
10152                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10153                           sizeof(buf->machine));
10154                 /* Allow the user to override the reported release.  */
10155                 if (qemu_uname_release && *qemu_uname_release) {
10156                     g_strlcpy(buf->release, qemu_uname_release,
10157                               sizeof(buf->release));
10158                 }
10159             }
10160             unlock_user_struct(buf, arg1, 1);
10161         }
10162         break;
10163 #ifdef TARGET_I386
10164     case TARGET_NR_modify_ldt:
10165         ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
10166         break;
10167 #if !defined(TARGET_X86_64)
10168     case TARGET_NR_vm86old:
10169         goto unimplemented;
10170     case TARGET_NR_vm86:
10171         ret = do_vm86(cpu_env, arg1, arg2);
10172         break;
10173 #endif
10174 #endif
10175     case TARGET_NR_adjtimex:
10176         {
10177             struct timex host_buf;
10178 
10179             if (target_to_host_timex(&host_buf, arg1) != 0) {
10180                 goto efault;
10181             }
10182             ret = get_errno(adjtimex(&host_buf));
10183             if (!is_error(ret)) {
10184                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10185                     goto efault;
10186                 }
10187             }
10188         }
10189         break;
10190 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10191     case TARGET_NR_clock_adjtime:
10192         {
10193             struct timex htx, *phtx = &htx;
10194 
10195             if (target_to_host_timex(phtx, arg2) != 0) {
10196                 goto efault;
10197             }
10198             ret = get_errno(clock_adjtime(arg1, phtx));
10199             if (!is_error(ret) && phtx) {
10200                 if (host_to_target_timex(arg2, phtx) != 0) {
10201                     goto efault;
10202                 }
10203             }
10204         }
10205         break;
10206 #endif
10207 #ifdef TARGET_NR_create_module
10208     case TARGET_NR_create_module:
10209 #endif
10210     case TARGET_NR_init_module:
10211     case TARGET_NR_delete_module:
10212 #ifdef TARGET_NR_get_kernel_syms
10213     case TARGET_NR_get_kernel_syms:
10214 #endif
10215         goto unimplemented;
10216     case TARGET_NR_quotactl:
10217         goto unimplemented;
10218     case TARGET_NR_getpgid:
10219         ret = get_errno(getpgid(arg1));
10220         break;
10221     case TARGET_NR_fchdir:
10222         ret = get_errno(fchdir(arg1));
10223         break;
10224 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10225     case TARGET_NR_bdflush:
10226         goto unimplemented;
10227 #endif
10228 #ifdef TARGET_NR_sysfs
10229     case TARGET_NR_sysfs:
10230         goto unimplemented;
10231 #endif
10232     case TARGET_NR_personality:
10233         ret = get_errno(personality(arg1));
10234         break;
10235 #ifdef TARGET_NR_afs_syscall
10236     case TARGET_NR_afs_syscall:
10237         goto unimplemented;
10238 #endif
10239 #ifdef TARGET_NR__llseek /* Not on alpha */
10240     case TARGET_NR__llseek:
10241         {
10242             int64_t res;
10243 #if !defined(__NR_llseek)
10244             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10245             if (res == -1) {
10246                 ret = get_errno(res);
10247             } else {
10248                 ret = 0;
10249             }
10250 #else
10251             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10252 #endif
10253             if ((ret == 0) && put_user_s64(res, arg4)) {
10254                 goto efault;
10255             }
10256         }
10257         break;
10258 #endif
10259 #ifdef TARGET_NR_getdents
10260     case TARGET_NR_getdents:
10261 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10262 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10263         {
10264             struct target_dirent *target_dirp;
10265             struct linux_dirent *dirp;
10266             abi_long count = arg3;
10267 
10268             dirp = g_try_malloc(count);
10269             if (!dirp) {
10270                 ret = -TARGET_ENOMEM;
10271                 goto fail;
10272             }
10273 
10274             ret = get_errno(sys_getdents(arg1, dirp, count));
10275             if (!is_error(ret)) {
10276                 struct linux_dirent *de;
10277 		struct target_dirent *tde;
10278                 int len = ret;
10279                 int reclen, treclen;
10280 		int count1, tnamelen;
10281 
10282 		count1 = 0;
10283                 de = dirp;
10284                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10285                     goto efault;
10286 		tde = target_dirp;
10287                 while (len > 0) {
10288                     reclen = de->d_reclen;
10289                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10290                     assert(tnamelen >= 0);
10291                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10292                     assert(count1 + treclen <= count);
10293                     tde->d_reclen = tswap16(treclen);
10294                     tde->d_ino = tswapal(de->d_ino);
10295                     tde->d_off = tswapal(de->d_off);
10296                     memcpy(tde->d_name, de->d_name, tnamelen);
10297                     de = (struct linux_dirent *)((char *)de + reclen);
10298                     len -= reclen;
10299                     tde = (struct target_dirent *)((char *)tde + treclen);
10300 		    count1 += treclen;
10301                 }
10302 		ret = count1;
10303                 unlock_user(target_dirp, arg2, ret);
10304             }
10305             g_free(dirp);
10306         }
10307 #else
10308         {
10309             struct linux_dirent *dirp;
10310             abi_long count = arg3;
10311 
10312             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10313                 goto efault;
10314             ret = get_errno(sys_getdents(arg1, dirp, count));
10315             if (!is_error(ret)) {
10316                 struct linux_dirent *de;
10317                 int len = ret;
10318                 int reclen;
10319                 de = dirp;
10320                 while (len > 0) {
10321                     reclen = de->d_reclen;
10322                     if (reclen > len)
10323                         break;
10324                     de->d_reclen = tswap16(reclen);
10325                     tswapls(&de->d_ino);
10326                     tswapls(&de->d_off);
10327                     de = (struct linux_dirent *)((char *)de + reclen);
10328                     len -= reclen;
10329                 }
10330             }
10331             unlock_user(dirp, arg2, ret);
10332         }
10333 #endif
10334 #else
10335         /* Implement getdents in terms of getdents64 */
10336         {
10337             struct linux_dirent64 *dirp;
10338             abi_long count = arg3;
10339 
10340             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10341             if (!dirp) {
10342                 goto efault;
10343             }
10344             ret = get_errno(sys_getdents64(arg1, dirp, count));
10345             if (!is_error(ret)) {
10346                 /* Convert the dirent64 structs to target dirent.  We do this
10347                  * in-place, since we can guarantee that a target_dirent is no
10348                  * larger than a dirent64; however this means we have to be
10349                  * careful to read everything before writing in the new format.
10350                  */
10351                 struct linux_dirent64 *de;
10352                 struct target_dirent *tde;
10353                 int len = ret;
10354                 int tlen = 0;
10355 
10356                 de = dirp;
10357                 tde = (struct target_dirent *)dirp;
10358                 while (len > 0) {
10359                     int namelen, treclen;
10360                     int reclen = de->d_reclen;
10361                     uint64_t ino = de->d_ino;
10362                     int64_t off = de->d_off;
10363                     uint8_t type = de->d_type;
10364 
10365                     namelen = strlen(de->d_name);
10366                     treclen = offsetof(struct target_dirent, d_name)
10367                         + namelen + 2;
10368                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10369 
10370                     memmove(tde->d_name, de->d_name, namelen + 1);
10371                     tde->d_ino = tswapal(ino);
10372                     tde->d_off = tswapal(off);
10373                     tde->d_reclen = tswap16(treclen);
10374                     /* The target_dirent type is in what was formerly a padding
10375                      * byte at the end of the structure:
10376                      */
10377                     *(((char *)tde) + treclen - 1) = type;
10378 
10379                     de = (struct linux_dirent64 *)((char *)de + reclen);
10380                     tde = (struct target_dirent *)((char *)tde + treclen);
10381                     len -= reclen;
10382                     tlen += treclen;
10383                 }
10384                 ret = tlen;
10385             }
10386             unlock_user(dirp, arg2, ret);
10387         }
10388 #endif
10389         break;
10390 #endif /* TARGET_NR_getdents */
10391 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10392     case TARGET_NR_getdents64:
10393         {
10394             struct linux_dirent64 *dirp;
10395             abi_long count = arg3;
10396             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10397                 goto efault;
10398             ret = get_errno(sys_getdents64(arg1, dirp, count));
10399             if (!is_error(ret)) {
10400                 struct linux_dirent64 *de;
10401                 int len = ret;
10402                 int reclen;
10403                 de = dirp;
10404                 while (len > 0) {
10405                     reclen = de->d_reclen;
10406                     if (reclen > len)
10407                         break;
10408                     de->d_reclen = tswap16(reclen);
10409                     tswap64s((uint64_t *)&de->d_ino);
10410                     tswap64s((uint64_t *)&de->d_off);
10411                     de = (struct linux_dirent64 *)((char *)de + reclen);
10412                     len -= reclen;
10413                 }
10414             }
10415             unlock_user(dirp, arg2, ret);
10416         }
10417         break;
10418 #endif /* TARGET_NR_getdents64 */
10419 #if defined(TARGET_NR__newselect)
10420     case TARGET_NR__newselect:
10421         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10422         break;
10423 #endif
10424 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10425 # ifdef TARGET_NR_poll
10426     case TARGET_NR_poll:
10427 # endif
10428 # ifdef TARGET_NR_ppoll
10429     case TARGET_NR_ppoll:
10430 # endif
10431         {
10432             struct target_pollfd *target_pfd;
10433             unsigned int nfds = arg2;
10434             struct pollfd *pfd;
10435             unsigned int i;
10436 
10437             pfd = NULL;
10438             target_pfd = NULL;
10439             if (nfds) {
10440                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10441                     ret = -TARGET_EINVAL;
10442                     break;
10443                 }
10444 
10445                 target_pfd = lock_user(VERIFY_WRITE, arg1,
10446                                        sizeof(struct target_pollfd) * nfds, 1);
10447                 if (!target_pfd) {
10448                     goto efault;
10449                 }
10450 
10451                 pfd = alloca(sizeof(struct pollfd) * nfds);
10452                 for (i = 0; i < nfds; i++) {
10453                     pfd[i].fd = tswap32(target_pfd[i].fd);
10454                     pfd[i].events = tswap16(target_pfd[i].events);
10455                 }
10456             }
10457 
10458             switch (num) {
10459 # ifdef TARGET_NR_ppoll
10460             case TARGET_NR_ppoll:
10461             {
10462                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10463                 target_sigset_t *target_set;
10464                 sigset_t _set, *set = &_set;
10465 
10466                 if (arg3) {
10467                     if (target_to_host_timespec(timeout_ts, arg3)) {
10468                         unlock_user(target_pfd, arg1, 0);
10469                         goto efault;
10470                     }
10471                 } else {
10472                     timeout_ts = NULL;
10473                 }
10474 
10475                 if (arg4) {
10476                     if (arg5 != sizeof(target_sigset_t)) {
10477                         unlock_user(target_pfd, arg1, 0);
10478                         ret = -TARGET_EINVAL;
10479                         break;
10480                     }
10481 
10482                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10483                     if (!target_set) {
10484                         unlock_user(target_pfd, arg1, 0);
10485                         goto efault;
10486                     }
10487                     target_to_host_sigset(set, target_set);
10488                 } else {
10489                     set = NULL;
10490                 }
10491 
10492                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10493                                            set, SIGSET_T_SIZE));
10494 
10495                 if (!is_error(ret) && arg3) {
10496                     host_to_target_timespec(arg3, timeout_ts);
10497                 }
10498                 if (arg4) {
10499                     unlock_user(target_set, arg4, 0);
10500                 }
10501                 break;
10502             }
10503 # endif
10504 # ifdef TARGET_NR_poll
10505             case TARGET_NR_poll:
10506             {
10507                 struct timespec ts, *pts;
10508 
10509                 if (arg3 >= 0) {
10510                     /* Convert ms to secs, ns */
10511                     ts.tv_sec = arg3 / 1000;
10512                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10513                     pts = &ts;
10514                 } else {
10515                     /* -ve poll() timeout means "infinite" */
10516                     pts = NULL;
10517                 }
10518                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10519                 break;
10520             }
10521 # endif
10522             default:
10523                 g_assert_not_reached();
10524             }
10525 
10526             if (!is_error(ret)) {
10527                 for(i = 0; i < nfds; i++) {
10528                     target_pfd[i].revents = tswap16(pfd[i].revents);
10529                 }
10530             }
10531             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10532         }
10533         break;
10534 #endif
10535     case TARGET_NR_flock:
10536         /* NOTE: the flock constant seems to be the same for every
10537            Linux platform */
10538         ret = get_errno(safe_flock(arg1, arg2));
10539         break;
10540     case TARGET_NR_readv:
10541         {
10542             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10543             if (vec != NULL) {
10544                 ret = get_errno(safe_readv(arg1, vec, arg3));
10545                 unlock_iovec(vec, arg2, arg3, 1);
10546             } else {
10547                 ret = -host_to_target_errno(errno);
10548             }
10549         }
10550         break;
10551     case TARGET_NR_writev:
10552         {
10553             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10554             if (vec != NULL) {
10555                 ret = get_errno(safe_writev(arg1, vec, arg3));
10556                 unlock_iovec(vec, arg2, arg3, 0);
10557             } else {
10558                 ret = -host_to_target_errno(errno);
10559             }
10560         }
10561         break;
10562 #if defined(TARGET_NR_preadv)
10563     case TARGET_NR_preadv:
10564         {
10565             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10566             if (vec != NULL) {
10567                 unsigned long low, high;
10568 
10569                 target_to_host_low_high(arg4, arg5, &low, &high);
10570                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10571                 unlock_iovec(vec, arg2, arg3, 1);
10572             } else {
10573                 ret = -host_to_target_errno(errno);
10574            }
10575         }
10576         break;
10577 #endif
10578 #if defined(TARGET_NR_pwritev)
10579     case TARGET_NR_pwritev:
10580         {
10581             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10582             if (vec != NULL) {
10583                 unsigned long low, high;
10584 
10585                 target_to_host_low_high(arg4, arg5, &low, &high);
10586                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10587                 unlock_iovec(vec, arg2, arg3, 0);
10588             } else {
10589                 ret = -host_to_target_errno(errno);
10590            }
10591         }
10592         break;
10593 #endif
10594     case TARGET_NR_getsid:
10595         ret = get_errno(getsid(arg1));
10596         break;
10597 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10598     case TARGET_NR_fdatasync:
10599         ret = get_errno(fdatasync(arg1));
10600         break;
10601 #endif
10602 #ifdef TARGET_NR__sysctl
10603     case TARGET_NR__sysctl:
10604         /* We don't implement this, but ENOTDIR is always a safe
10605            return value. */
10606         ret = -TARGET_ENOTDIR;
10607         break;
10608 #endif
10609     case TARGET_NR_sched_getaffinity:
10610         {
10611             unsigned int mask_size;
10612             unsigned long *mask;
10613 
10614             /*
10615              * sched_getaffinity needs multiples of ulong, so need to take
10616              * care of mismatches between target ulong and host ulong sizes.
10617              */
10618             if (arg2 & (sizeof(abi_ulong) - 1)) {
10619                 ret = -TARGET_EINVAL;
10620                 break;
10621             }
10622             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10623 
10624             mask = alloca(mask_size);
10625             memset(mask, 0, mask_size);
10626             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10627 
10628             if (!is_error(ret)) {
10629                 if (ret > arg2) {
10630                     /* More data returned than the caller's buffer will fit.
10631                      * This only happens if sizeof(abi_long) < sizeof(long)
10632                      * and the caller passed us a buffer holding an odd number
10633                      * of abi_longs. If the host kernel is actually using the
10634                      * extra 4 bytes then fail EINVAL; otherwise we can just
10635                      * ignore them and only copy the interesting part.
10636                      */
10637                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10638                     if (numcpus > arg2 * 8) {
10639                         ret = -TARGET_EINVAL;
10640                         break;
10641                     }
10642                     ret = arg2;
10643                 }
10644 
10645                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10646                     goto efault;
10647                 }
10648             }
10649         }
10650         break;
10651     case TARGET_NR_sched_setaffinity:
10652         {
10653             unsigned int mask_size;
10654             unsigned long *mask;
10655 
10656             /*
10657              * sched_setaffinity needs multiples of ulong, so need to take
10658              * care of mismatches between target ulong and host ulong sizes.
10659              */
10660             if (arg2 & (sizeof(abi_ulong) - 1)) {
10661                 ret = -TARGET_EINVAL;
10662                 break;
10663             }
10664             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10665             mask = alloca(mask_size);
10666 
10667             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10668             if (ret) {
10669                 break;
10670             }
10671 
10672             ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10673         }
10674         break;
10675     case TARGET_NR_getcpu:
10676         {
10677             unsigned cpu, node;
10678             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10679                                        arg2 ? &node : NULL,
10680                                        NULL));
10681             if (is_error(ret)) {
10682                 goto fail;
10683             }
10684             if (arg1 && put_user_u32(cpu, arg1)) {
10685                 goto efault;
10686             }
10687             if (arg2 && put_user_u32(node, arg2)) {
10688                 goto efault;
10689             }
10690         }
10691         break;
10692     case TARGET_NR_sched_setparam:
10693         {
10694             struct sched_param *target_schp;
10695             struct sched_param schp;
10696 
10697             if (arg2 == 0) {
10698                 return -TARGET_EINVAL;
10699             }
10700             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10701                 goto efault;
10702             schp.sched_priority = tswap32(target_schp->sched_priority);
10703             unlock_user_struct(target_schp, arg2, 0);
10704             ret = get_errno(sched_setparam(arg1, &schp));
10705         }
10706         break;
10707     case TARGET_NR_sched_getparam:
10708         {
10709             struct sched_param *target_schp;
10710             struct sched_param schp;
10711 
10712             if (arg2 == 0) {
10713                 return -TARGET_EINVAL;
10714             }
10715             ret = get_errno(sched_getparam(arg1, &schp));
10716             if (!is_error(ret)) {
10717                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10718                     goto efault;
10719                 target_schp->sched_priority = tswap32(schp.sched_priority);
10720                 unlock_user_struct(target_schp, arg2, 1);
10721             }
10722         }
10723         break;
10724     case TARGET_NR_sched_setscheduler:
10725         {
10726             struct sched_param *target_schp;
10727             struct sched_param schp;
10728             if (arg3 == 0) {
10729                 return -TARGET_EINVAL;
10730             }
10731             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10732                 goto efault;
10733             schp.sched_priority = tswap32(target_schp->sched_priority);
10734             unlock_user_struct(target_schp, arg3, 0);
10735             ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10736         }
10737         break;
10738     case TARGET_NR_sched_getscheduler:
10739         ret = get_errno(sched_getscheduler(arg1));
10740         break;
10741     case TARGET_NR_sched_yield:
10742         ret = get_errno(sched_yield());
10743         break;
10744     case TARGET_NR_sched_get_priority_max:
10745         ret = get_errno(sched_get_priority_max(arg1));
10746         break;
10747     case TARGET_NR_sched_get_priority_min:
10748         ret = get_errno(sched_get_priority_min(arg1));
10749         break;
10750     case TARGET_NR_sched_rr_get_interval:
10751         {
10752             struct timespec ts;
10753             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10754             if (!is_error(ret)) {
10755                 ret = host_to_target_timespec(arg2, &ts);
10756             }
10757         }
10758         break;
10759     case TARGET_NR_nanosleep:
10760         {
10761             struct timespec req, rem;
10762             target_to_host_timespec(&req, arg1);
10763             ret = get_errno(safe_nanosleep(&req, &rem));
10764             if (is_error(ret) && arg2) {
10765                 host_to_target_timespec(arg2, &rem);
10766             }
10767         }
10768         break;
10769 #ifdef TARGET_NR_query_module
10770     case TARGET_NR_query_module:
10771         goto unimplemented;
10772 #endif
10773 #ifdef TARGET_NR_nfsservctl
10774     case TARGET_NR_nfsservctl:
10775         goto unimplemented;
10776 #endif
10777     case TARGET_NR_prctl:
10778         switch (arg1) {
10779         case PR_GET_PDEATHSIG:
10780         {
10781             int deathsig;
10782             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10783             if (!is_error(ret) && arg2
10784                 && put_user_ual(deathsig, arg2)) {
10785                 goto efault;
10786             }
10787             break;
10788         }
10789 #ifdef PR_GET_NAME
10790         case PR_GET_NAME:
10791         {
10792             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10793             if (!name) {
10794                 goto efault;
10795             }
10796             ret = get_errno(prctl(arg1, (unsigned long)name,
10797                                   arg3, arg4, arg5));
10798             unlock_user(name, arg2, 16);
10799             break;
10800         }
10801         case PR_SET_NAME:
10802         {
10803             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10804             if (!name) {
10805                 goto efault;
10806             }
10807             ret = get_errno(prctl(arg1, (unsigned long)name,
10808                                   arg3, arg4, arg5));
10809             unlock_user(name, arg2, 0);
10810             break;
10811         }
10812 #endif
10813 #ifdef TARGET_AARCH64
10814         case TARGET_PR_SVE_SET_VL:
10815             /* We cannot support either PR_SVE_SET_VL_ONEXEC
10816                or PR_SVE_VL_INHERIT.  Therefore, anything above
10817                ARM_MAX_VQ results in EINVAL.  */
10818             ret = -TARGET_EINVAL;
10819             if (arm_feature(cpu_env, ARM_FEATURE_SVE)
10820                 && arg2 >= 0 && arg2 <= ARM_MAX_VQ * 16 && !(arg2 & 15)) {
10821                 CPUARMState *env = cpu_env;
10822                 int old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10823                 int vq = MAX(arg2 / 16, 1);
10824 
10825                 if (vq < old_vq) {
10826                     aarch64_sve_narrow_vq(env, vq);
10827                 }
10828                 env->vfp.zcr_el[1] = vq - 1;
10829                 ret = vq * 16;
10830             }
10831             break;
10832         case TARGET_PR_SVE_GET_VL:
10833             ret = -TARGET_EINVAL;
10834             if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
10835                 CPUARMState *env = cpu_env;
10836                 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
10837             }
10838             break;
10839 #endif /* AARCH64 */
10840         case PR_GET_SECCOMP:
10841         case PR_SET_SECCOMP:
10842             /* Disable seccomp to prevent the target disabling syscalls we
10843              * need. */
10844             ret = -TARGET_EINVAL;
10845             break;
10846         default:
10847             /* Most prctl options have no pointer arguments */
10848             ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10849             break;
10850         }
10851         break;
10852 #ifdef TARGET_NR_arch_prctl
10853     case TARGET_NR_arch_prctl:
10854 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10855         ret = do_arch_prctl(cpu_env, arg1, arg2);
10856         break;
10857 #else
10858         goto unimplemented;
10859 #endif
10860 #endif
10861 #ifdef TARGET_NR_pread64
10862     case TARGET_NR_pread64:
10863         if (regpairs_aligned(cpu_env, num)) {
10864             arg4 = arg5;
10865             arg5 = arg6;
10866         }
10867         if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10868             goto efault;
10869         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10870         unlock_user(p, arg2, ret);
10871         break;
10872     case TARGET_NR_pwrite64:
10873         if (regpairs_aligned(cpu_env, num)) {
10874             arg4 = arg5;
10875             arg5 = arg6;
10876         }
10877         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10878             goto efault;
10879         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10880         unlock_user(p, arg2, 0);
10881         break;
10882 #endif
10883     case TARGET_NR_getcwd:
10884         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10885             goto efault;
10886         ret = get_errno(sys_getcwd1(p, arg2));
10887         unlock_user(p, arg1, ret);
10888         break;
10889     case TARGET_NR_capget:
10890     case TARGET_NR_capset:
10891     {
10892         struct target_user_cap_header *target_header;
10893         struct target_user_cap_data *target_data = NULL;
10894         struct __user_cap_header_struct header;
10895         struct __user_cap_data_struct data[2];
10896         struct __user_cap_data_struct *dataptr = NULL;
10897         int i, target_datalen;
10898         int data_items = 1;
10899 
10900         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10901             goto efault;
10902         }
10903         header.version = tswap32(target_header->version);
10904         header.pid = tswap32(target_header->pid);
10905 
10906         if (header.version != _LINUX_CAPABILITY_VERSION) {
10907             /* Version 2 and up takes pointer to two user_data structs */
10908             data_items = 2;
10909         }
10910 
10911         target_datalen = sizeof(*target_data) * data_items;
10912 
10913         if (arg2) {
10914             if (num == TARGET_NR_capget) {
10915                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10916             } else {
10917                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10918             }
10919             if (!target_data) {
10920                 unlock_user_struct(target_header, arg1, 0);
10921                 goto efault;
10922             }
10923 
10924             if (num == TARGET_NR_capset) {
10925                 for (i = 0; i < data_items; i++) {
10926                     data[i].effective = tswap32(target_data[i].effective);
10927                     data[i].permitted = tswap32(target_data[i].permitted);
10928                     data[i].inheritable = tswap32(target_data[i].inheritable);
10929                 }
10930             }
10931 
10932             dataptr = data;
10933         }
10934 
10935         if (num == TARGET_NR_capget) {
10936             ret = get_errno(capget(&header, dataptr));
10937         } else {
10938             ret = get_errno(capset(&header, dataptr));
10939         }
10940 
10941         /* The kernel always updates version for both capget and capset */
10942         target_header->version = tswap32(header.version);
10943         unlock_user_struct(target_header, arg1, 1);
10944 
10945         if (arg2) {
10946             if (num == TARGET_NR_capget) {
10947                 for (i = 0; i < data_items; i++) {
10948                     target_data[i].effective = tswap32(data[i].effective);
10949                     target_data[i].permitted = tswap32(data[i].permitted);
10950                     target_data[i].inheritable = tswap32(data[i].inheritable);
10951                 }
10952                 unlock_user(target_data, arg2, target_datalen);
10953             } else {
10954                 unlock_user(target_data, arg2, 0);
10955             }
10956         }
10957         break;
10958     }
10959     case TARGET_NR_sigaltstack:
10960         ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10961         break;
10962 
10963 #ifdef CONFIG_SENDFILE
10964     case TARGET_NR_sendfile:
10965     {
10966         off_t *offp = NULL;
10967         off_t off;
10968         if (arg3) {
10969             ret = get_user_sal(off, arg3);
10970             if (is_error(ret)) {
10971                 break;
10972             }
10973             offp = &off;
10974         }
10975         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10976         if (!is_error(ret) && arg3) {
10977             abi_long ret2 = put_user_sal(off, arg3);
10978             if (is_error(ret2)) {
10979                 ret = ret2;
10980             }
10981         }
10982         break;
10983     }
10984 #ifdef TARGET_NR_sendfile64
10985     case TARGET_NR_sendfile64:
10986     {
10987         off_t *offp = NULL;
10988         off_t off;
10989         if (arg3) {
10990             ret = get_user_s64(off, arg3);
10991             if (is_error(ret)) {
10992                 break;
10993             }
10994             offp = &off;
10995         }
10996         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10997         if (!is_error(ret) && arg3) {
10998             abi_long ret2 = put_user_s64(off, arg3);
10999             if (is_error(ret2)) {
11000                 ret = ret2;
11001             }
11002         }
11003         break;
11004     }
11005 #endif
11006 #else
11007     case TARGET_NR_sendfile:
11008 #ifdef TARGET_NR_sendfile64
11009     case TARGET_NR_sendfile64:
11010 #endif
11011         goto unimplemented;
11012 #endif
11013 
11014 #ifdef TARGET_NR_getpmsg
11015     case TARGET_NR_getpmsg:
11016         goto unimplemented;
11017 #endif
11018 #ifdef TARGET_NR_putpmsg
11019     case TARGET_NR_putpmsg:
11020         goto unimplemented;
11021 #endif
11022 #ifdef TARGET_NR_vfork
11023     case TARGET_NR_vfork:
11024         ret = get_errno(do_fork(cpu_env,
11025                         CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11026                         0, 0, 0, 0));
11027         break;
11028 #endif
11029 #ifdef TARGET_NR_ugetrlimit
11030     case TARGET_NR_ugetrlimit:
11031     {
11032 	struct rlimit rlim;
11033 	int resource = target_to_host_resource(arg1);
11034 	ret = get_errno(getrlimit(resource, &rlim));
11035 	if (!is_error(ret)) {
11036 	    struct target_rlimit *target_rlim;
11037             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11038                 goto efault;
11039 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11040 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11041             unlock_user_struct(target_rlim, arg2, 1);
11042 	}
11043 	break;
11044     }
11045 #endif
11046 #ifdef TARGET_NR_truncate64
11047     case TARGET_NR_truncate64:
11048         if (!(p = lock_user_string(arg1)))
11049             goto efault;
11050 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11051         unlock_user(p, arg1, 0);
11052 	break;
11053 #endif
11054 #ifdef TARGET_NR_ftruncate64
11055     case TARGET_NR_ftruncate64:
11056 	ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11057 	break;
11058 #endif
11059 #ifdef TARGET_NR_stat64
11060     case TARGET_NR_stat64:
11061         if (!(p = lock_user_string(arg1)))
11062             goto efault;
11063         ret = get_errno(stat(path(p), &st));
11064         unlock_user(p, arg1, 0);
11065         if (!is_error(ret))
11066             ret = host_to_target_stat64(cpu_env, arg2, &st);
11067         break;
11068 #endif
11069 #ifdef TARGET_NR_lstat64
11070     case TARGET_NR_lstat64:
11071         if (!(p = lock_user_string(arg1)))
11072             goto efault;
11073         ret = get_errno(lstat(path(p), &st));
11074         unlock_user(p, arg1, 0);
11075         if (!is_error(ret))
11076             ret = host_to_target_stat64(cpu_env, arg2, &st);
11077         break;
11078 #endif
11079 #ifdef TARGET_NR_fstat64
11080     case TARGET_NR_fstat64:
11081         ret = get_errno(fstat(arg1, &st));
11082         if (!is_error(ret))
11083             ret = host_to_target_stat64(cpu_env, arg2, &st);
11084         break;
11085 #endif
11086 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11087 #ifdef TARGET_NR_fstatat64
11088     case TARGET_NR_fstatat64:
11089 #endif
11090 #ifdef TARGET_NR_newfstatat
11091     case TARGET_NR_newfstatat:
11092 #endif
11093         if (!(p = lock_user_string(arg2)))
11094             goto efault;
11095         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11096         if (!is_error(ret))
11097             ret = host_to_target_stat64(cpu_env, arg3, &st);
11098         break;
11099 #endif
11100 #ifdef TARGET_NR_lchown
11101     case TARGET_NR_lchown:
11102         if (!(p = lock_user_string(arg1)))
11103             goto efault;
11104         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11105         unlock_user(p, arg1, 0);
11106         break;
11107 #endif
11108 #ifdef TARGET_NR_getuid
11109     case TARGET_NR_getuid:
11110         ret = get_errno(high2lowuid(getuid()));
11111         break;
11112 #endif
11113 #ifdef TARGET_NR_getgid
11114     case TARGET_NR_getgid:
11115         ret = get_errno(high2lowgid(getgid()));
11116         break;
11117 #endif
11118 #ifdef TARGET_NR_geteuid
11119     case TARGET_NR_geteuid:
11120         ret = get_errno(high2lowuid(geteuid()));
11121         break;
11122 #endif
11123 #ifdef TARGET_NR_getegid
11124     case TARGET_NR_getegid:
11125         ret = get_errno(high2lowgid(getegid()));
11126         break;
11127 #endif
11128     case TARGET_NR_setreuid:
11129         ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11130         break;
11131     case TARGET_NR_setregid:
11132         ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11133         break;
11134     case TARGET_NR_getgroups:
11135         {
11136             int gidsetsize = arg1;
11137             target_id *target_grouplist;
11138             gid_t *grouplist;
11139             int i;
11140 
11141             grouplist = alloca(gidsetsize * sizeof(gid_t));
11142             ret = get_errno(getgroups(gidsetsize, grouplist));
11143             if (gidsetsize == 0)
11144                 break;
11145             if (!is_error(ret)) {
11146                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11147                 if (!target_grouplist)
11148                     goto efault;
11149                 for(i = 0;i < ret; i++)
11150                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11151                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11152             }
11153         }
11154         break;
11155     case TARGET_NR_setgroups:
11156         {
11157             int gidsetsize = arg1;
11158             target_id *target_grouplist;
11159             gid_t *grouplist = NULL;
11160             int i;
11161             if (gidsetsize) {
11162                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11163                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11164                 if (!target_grouplist) {
11165                     ret = -TARGET_EFAULT;
11166                     goto fail;
11167                 }
11168                 for (i = 0; i < gidsetsize; i++) {
11169                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11170                 }
11171                 unlock_user(target_grouplist, arg2, 0);
11172             }
11173             ret = get_errno(setgroups(gidsetsize, grouplist));
11174         }
11175         break;
11176     case TARGET_NR_fchown:
11177         ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11178         break;
11179 #if defined(TARGET_NR_fchownat)
11180     case TARGET_NR_fchownat:
11181         if (!(p = lock_user_string(arg2)))
11182             goto efault;
11183         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11184                                  low2highgid(arg4), arg5));
11185         unlock_user(p, arg2, 0);
11186         break;
11187 #endif
11188 #ifdef TARGET_NR_setresuid
11189     case TARGET_NR_setresuid:
11190         ret = get_errno(sys_setresuid(low2highuid(arg1),
11191                                       low2highuid(arg2),
11192                                       low2highuid(arg3)));
11193         break;
11194 #endif
11195 #ifdef TARGET_NR_getresuid
11196     case TARGET_NR_getresuid:
11197         {
11198             uid_t ruid, euid, suid;
11199             ret = get_errno(getresuid(&ruid, &euid, &suid));
11200             if (!is_error(ret)) {
11201                 if (put_user_id(high2lowuid(ruid), arg1)
11202                     || put_user_id(high2lowuid(euid), arg2)
11203                     || put_user_id(high2lowuid(suid), arg3))
11204                     goto efault;
11205             }
11206         }
11207         break;
11208 #endif
11209 #ifdef TARGET_NR_getresgid
11210     case TARGET_NR_setresgid:
11211         ret = get_errno(sys_setresgid(low2highgid(arg1),
11212                                       low2highgid(arg2),
11213                                       low2highgid(arg3)));
11214         break;
11215 #endif
11216 #ifdef TARGET_NR_getresgid
11217     case TARGET_NR_getresgid:
11218         {
11219             gid_t rgid, egid, sgid;
11220             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11221             if (!is_error(ret)) {
11222                 if (put_user_id(high2lowgid(rgid), arg1)
11223                     || put_user_id(high2lowgid(egid), arg2)
11224                     || put_user_id(high2lowgid(sgid), arg3))
11225                     goto efault;
11226             }
11227         }
11228         break;
11229 #endif
11230 #ifdef TARGET_NR_chown
11231     case TARGET_NR_chown:
11232         if (!(p = lock_user_string(arg1)))
11233             goto efault;
11234         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11235         unlock_user(p, arg1, 0);
11236         break;
11237 #endif
11238     case TARGET_NR_setuid:
11239         ret = get_errno(sys_setuid(low2highuid(arg1)));
11240         break;
11241     case TARGET_NR_setgid:
11242         ret = get_errno(sys_setgid(low2highgid(arg1)));
11243         break;
11244     case TARGET_NR_setfsuid:
11245         ret = get_errno(setfsuid(arg1));
11246         break;
11247     case TARGET_NR_setfsgid:
11248         ret = get_errno(setfsgid(arg1));
11249         break;
11250 
11251 #ifdef TARGET_NR_lchown32
11252     case TARGET_NR_lchown32:
11253         if (!(p = lock_user_string(arg1)))
11254             goto efault;
11255         ret = get_errno(lchown(p, arg2, arg3));
11256         unlock_user(p, arg1, 0);
11257         break;
11258 #endif
11259 #ifdef TARGET_NR_getuid32
11260     case TARGET_NR_getuid32:
11261         ret = get_errno(getuid());
11262         break;
11263 #endif
11264 
11265 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11266    /* Alpha specific */
11267     case TARGET_NR_getxuid:
11268          {
11269             uid_t euid;
11270             euid=geteuid();
11271             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11272          }
11273         ret = get_errno(getuid());
11274         break;
11275 #endif
11276 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11277    /* Alpha specific */
11278     case TARGET_NR_getxgid:
11279          {
11280             uid_t egid;
11281             egid=getegid();
11282             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11283          }
11284         ret = get_errno(getgid());
11285         break;
11286 #endif
11287 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11288     /* Alpha specific */
11289     case TARGET_NR_osf_getsysinfo:
11290         ret = -TARGET_EOPNOTSUPP;
11291         switch (arg1) {
11292           case TARGET_GSI_IEEE_FP_CONTROL:
11293             {
11294                 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11295 
11296                 /* Copied from linux ieee_fpcr_to_swcr.  */
11297                 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11298                 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11299                 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11300                                         | SWCR_TRAP_ENABLE_DZE
11301                                         | SWCR_TRAP_ENABLE_OVF);
11302                 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11303                                         | SWCR_TRAP_ENABLE_INE);
11304                 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11305                 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11306 
11307                 if (put_user_u64 (swcr, arg2))
11308                         goto efault;
11309                 ret = 0;
11310             }
11311             break;
11312 
11313           /* case GSI_IEEE_STATE_AT_SIGNAL:
11314              -- Not implemented in linux kernel.
11315              case GSI_UACPROC:
11316              -- Retrieves current unaligned access state; not much used.
11317              case GSI_PROC_TYPE:
11318              -- Retrieves implver information; surely not used.
11319              case GSI_GET_HWRPB:
11320              -- Grabs a copy of the HWRPB; surely not used.
11321           */
11322         }
11323         break;
11324 #endif
11325 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11326     /* Alpha specific */
11327     case TARGET_NR_osf_setsysinfo:
11328         ret = -TARGET_EOPNOTSUPP;
11329         switch (arg1) {
11330           case TARGET_SSI_IEEE_FP_CONTROL:
11331             {
11332                 uint64_t swcr, fpcr, orig_fpcr;
11333 
11334                 if (get_user_u64 (swcr, arg2)) {
11335                     goto efault;
11336                 }
11337                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11338                 fpcr = orig_fpcr & FPCR_DYN_MASK;
11339 
11340                 /* Copied from linux ieee_swcr_to_fpcr.  */
11341                 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11342                 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11343                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11344                                   | SWCR_TRAP_ENABLE_DZE
11345                                   | SWCR_TRAP_ENABLE_OVF)) << 48;
11346                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11347                                   | SWCR_TRAP_ENABLE_INE)) << 57;
11348                 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11349                 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11350 
11351                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11352                 ret = 0;
11353             }
11354             break;
11355 
11356           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11357             {
11358                 uint64_t exc, fpcr, orig_fpcr;
11359                 int si_code;
11360 
11361                 if (get_user_u64(exc, arg2)) {
11362                     goto efault;
11363                 }
11364 
11365                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11366 
11367                 /* We only add to the exception status here.  */
11368                 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11369 
11370                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11371                 ret = 0;
11372 
11373                 /* Old exceptions are not signaled.  */
11374                 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11375 
11376                 /* If any exceptions set by this call,
11377                    and are unmasked, send a signal.  */
11378                 si_code = 0;
11379                 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11380                     si_code = TARGET_FPE_FLTRES;
11381                 }
11382                 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11383                     si_code = TARGET_FPE_FLTUND;
11384                 }
11385                 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11386                     si_code = TARGET_FPE_FLTOVF;
11387                 }
11388                 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11389                     si_code = TARGET_FPE_FLTDIV;
11390                 }
11391                 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11392                     si_code = TARGET_FPE_FLTINV;
11393                 }
11394                 if (si_code != 0) {
11395                     target_siginfo_t info;
11396                     info.si_signo = SIGFPE;
11397                     info.si_errno = 0;
11398                     info.si_code = si_code;
11399                     info._sifields._sigfault._addr
11400                         = ((CPUArchState *)cpu_env)->pc;
11401                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11402                                  QEMU_SI_FAULT, &info);
11403                 }
11404             }
11405             break;
11406 
11407           /* case SSI_NVPAIRS:
11408              -- Used with SSIN_UACPROC to enable unaligned accesses.
11409              case SSI_IEEE_STATE_AT_SIGNAL:
11410              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11411              -- Not implemented in linux kernel
11412           */
11413         }
11414         break;
11415 #endif
11416 #ifdef TARGET_NR_osf_sigprocmask
11417     /* Alpha specific.  */
11418     case TARGET_NR_osf_sigprocmask:
11419         {
11420             abi_ulong mask;
11421             int how;
11422             sigset_t set, oldset;
11423 
11424             switch(arg1) {
11425             case TARGET_SIG_BLOCK:
11426                 how = SIG_BLOCK;
11427                 break;
11428             case TARGET_SIG_UNBLOCK:
11429                 how = SIG_UNBLOCK;
11430                 break;
11431             case TARGET_SIG_SETMASK:
11432                 how = SIG_SETMASK;
11433                 break;
11434             default:
11435                 ret = -TARGET_EINVAL;
11436                 goto fail;
11437             }
11438             mask = arg2;
11439             target_to_host_old_sigset(&set, &mask);
11440             ret = do_sigprocmask(how, &set, &oldset);
11441             if (!ret) {
11442                 host_to_target_old_sigset(&mask, &oldset);
11443                 ret = mask;
11444             }
11445         }
11446         break;
11447 #endif
11448 
11449 #ifdef TARGET_NR_getgid32
11450     case TARGET_NR_getgid32:
11451         ret = get_errno(getgid());
11452         break;
11453 #endif
11454 #ifdef TARGET_NR_geteuid32
11455     case TARGET_NR_geteuid32:
11456         ret = get_errno(geteuid());
11457         break;
11458 #endif
11459 #ifdef TARGET_NR_getegid32
11460     case TARGET_NR_getegid32:
11461         ret = get_errno(getegid());
11462         break;
11463 #endif
11464 #ifdef TARGET_NR_setreuid32
11465     case TARGET_NR_setreuid32:
11466         ret = get_errno(setreuid(arg1, arg2));
11467         break;
11468 #endif
11469 #ifdef TARGET_NR_setregid32
11470     case TARGET_NR_setregid32:
11471         ret = get_errno(setregid(arg1, arg2));
11472         break;
11473 #endif
11474 #ifdef TARGET_NR_getgroups32
11475     case TARGET_NR_getgroups32:
11476         {
11477             int gidsetsize = arg1;
11478             uint32_t *target_grouplist;
11479             gid_t *grouplist;
11480             int i;
11481 
11482             grouplist = alloca(gidsetsize * sizeof(gid_t));
11483             ret = get_errno(getgroups(gidsetsize, grouplist));
11484             if (gidsetsize == 0)
11485                 break;
11486             if (!is_error(ret)) {
11487                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11488                 if (!target_grouplist) {
11489                     ret = -TARGET_EFAULT;
11490                     goto fail;
11491                 }
11492                 for(i = 0;i < ret; i++)
11493                     target_grouplist[i] = tswap32(grouplist[i]);
11494                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11495             }
11496         }
11497         break;
11498 #endif
11499 #ifdef TARGET_NR_setgroups32
11500     case TARGET_NR_setgroups32:
11501         {
11502             int gidsetsize = arg1;
11503             uint32_t *target_grouplist;
11504             gid_t *grouplist;
11505             int i;
11506 
11507             grouplist = alloca(gidsetsize * sizeof(gid_t));
11508             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11509             if (!target_grouplist) {
11510                 ret = -TARGET_EFAULT;
11511                 goto fail;
11512             }
11513             for(i = 0;i < gidsetsize; i++)
11514                 grouplist[i] = tswap32(target_grouplist[i]);
11515             unlock_user(target_grouplist, arg2, 0);
11516             ret = get_errno(setgroups(gidsetsize, grouplist));
11517         }
11518         break;
11519 #endif
11520 #ifdef TARGET_NR_fchown32
11521     case TARGET_NR_fchown32:
11522         ret = get_errno(fchown(arg1, arg2, arg3));
11523         break;
11524 #endif
11525 #ifdef TARGET_NR_setresuid32
11526     case TARGET_NR_setresuid32:
11527         ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11528         break;
11529 #endif
11530 #ifdef TARGET_NR_getresuid32
11531     case TARGET_NR_getresuid32:
11532         {
11533             uid_t ruid, euid, suid;
11534             ret = get_errno(getresuid(&ruid, &euid, &suid));
11535             if (!is_error(ret)) {
11536                 if (put_user_u32(ruid, arg1)
11537                     || put_user_u32(euid, arg2)
11538                     || put_user_u32(suid, arg3))
11539                     goto efault;
11540             }
11541         }
11542         break;
11543 #endif
11544 #ifdef TARGET_NR_setresgid32
11545     case TARGET_NR_setresgid32:
11546         ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11547         break;
11548 #endif
11549 #ifdef TARGET_NR_getresgid32
11550     case TARGET_NR_getresgid32:
11551         {
11552             gid_t rgid, egid, sgid;
11553             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11554             if (!is_error(ret)) {
11555                 if (put_user_u32(rgid, arg1)
11556                     || put_user_u32(egid, arg2)
11557                     || put_user_u32(sgid, arg3))
11558                     goto efault;
11559             }
11560         }
11561         break;
11562 #endif
11563 #ifdef TARGET_NR_chown32
11564     case TARGET_NR_chown32:
11565         if (!(p = lock_user_string(arg1)))
11566             goto efault;
11567         ret = get_errno(chown(p, arg2, arg3));
11568         unlock_user(p, arg1, 0);
11569         break;
11570 #endif
11571 #ifdef TARGET_NR_setuid32
11572     case TARGET_NR_setuid32:
11573         ret = get_errno(sys_setuid(arg1));
11574         break;
11575 #endif
11576 #ifdef TARGET_NR_setgid32
11577     case TARGET_NR_setgid32:
11578         ret = get_errno(sys_setgid(arg1));
11579         break;
11580 #endif
11581 #ifdef TARGET_NR_setfsuid32
11582     case TARGET_NR_setfsuid32:
11583         ret = get_errno(setfsuid(arg1));
11584         break;
11585 #endif
11586 #ifdef TARGET_NR_setfsgid32
11587     case TARGET_NR_setfsgid32:
11588         ret = get_errno(setfsgid(arg1));
11589         break;
11590 #endif
11591 
11592     case TARGET_NR_pivot_root:
11593         goto unimplemented;
11594 #ifdef TARGET_NR_mincore
11595     case TARGET_NR_mincore:
11596         {
11597             void *a;
11598             ret = -TARGET_ENOMEM;
11599             a = lock_user(VERIFY_READ, arg1, arg2, 0);
11600             if (!a) {
11601                 goto fail;
11602             }
11603             ret = -TARGET_EFAULT;
11604             p = lock_user_string(arg3);
11605             if (!p) {
11606                 goto mincore_fail;
11607             }
11608             ret = get_errno(mincore(a, arg2, p));
11609             unlock_user(p, arg3, ret);
11610             mincore_fail:
11611             unlock_user(a, arg1, 0);
11612         }
11613         break;
11614 #endif
11615 #ifdef TARGET_NR_arm_fadvise64_64
11616     case TARGET_NR_arm_fadvise64_64:
11617         /* arm_fadvise64_64 looks like fadvise64_64 but
11618          * with different argument order: fd, advice, offset, len
11619          * rather than the usual fd, offset, len, advice.
11620          * Note that offset and len are both 64-bit so appear as
11621          * pairs of 32-bit registers.
11622          */
11623         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11624                             target_offset64(arg5, arg6), arg2);
11625         ret = -host_to_target_errno(ret);
11626         break;
11627 #endif
11628 
11629 #if TARGET_ABI_BITS == 32
11630 
11631 #ifdef TARGET_NR_fadvise64_64
11632     case TARGET_NR_fadvise64_64:
11633 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11634         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11635         ret = arg2;
11636         arg2 = arg3;
11637         arg3 = arg4;
11638         arg4 = arg5;
11639         arg5 = arg6;
11640         arg6 = ret;
11641 #else
11642         /* 6 args: fd, offset (high, low), len (high, low), advice */
11643         if (regpairs_aligned(cpu_env, num)) {
11644             /* offset is in (3,4), len in (5,6) and advice in 7 */
11645             arg2 = arg3;
11646             arg3 = arg4;
11647             arg4 = arg5;
11648             arg5 = arg6;
11649             arg6 = arg7;
11650         }
11651 #endif
11652         ret = -host_to_target_errno(posix_fadvise(arg1,
11653                                                   target_offset64(arg2, arg3),
11654                                                   target_offset64(arg4, arg5),
11655                                                   arg6));
11656         break;
11657 #endif
11658 
11659 #ifdef TARGET_NR_fadvise64
11660     case TARGET_NR_fadvise64:
11661         /* 5 args: fd, offset (high, low), len, advice */
11662         if (regpairs_aligned(cpu_env, num)) {
11663             /* offset is in (3,4), len in 5 and advice in 6 */
11664             arg2 = arg3;
11665             arg3 = arg4;
11666             arg4 = arg5;
11667             arg5 = arg6;
11668         }
11669         ret = -host_to_target_errno(posix_fadvise(arg1,
11670                                                   target_offset64(arg2, arg3),
11671                                                   arg4, arg5));
11672         break;
11673 #endif
11674 
11675 #else /* not a 32-bit ABI */
11676 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11677 #ifdef TARGET_NR_fadvise64_64
11678     case TARGET_NR_fadvise64_64:
11679 #endif
11680 #ifdef TARGET_NR_fadvise64
11681     case TARGET_NR_fadvise64:
11682 #endif
11683 #ifdef TARGET_S390X
11684         switch (arg4) {
11685         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11686         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11687         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11688         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11689         default: break;
11690         }
11691 #endif
11692         ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11693         break;
11694 #endif
11695 #endif /* end of 64-bit ABI fadvise handling */
11696 
11697 #ifdef TARGET_NR_madvise
11698     case TARGET_NR_madvise:
11699         /* A straight passthrough may not be safe because qemu sometimes
11700            turns private file-backed mappings into anonymous mappings.
11701            This will break MADV_DONTNEED.
11702            This is a hint, so ignoring and returning success is ok.  */
11703         ret = get_errno(0);
11704         break;
11705 #endif
11706 #if TARGET_ABI_BITS == 32
11707     case TARGET_NR_fcntl64:
11708     {
11709 	int cmd;
11710 	struct flock64 fl;
11711         from_flock64_fn *copyfrom = copy_from_user_flock64;
11712         to_flock64_fn *copyto = copy_to_user_flock64;
11713 
11714 #ifdef TARGET_ARM
11715         if (!((CPUARMState *)cpu_env)->eabi) {
11716             copyfrom = copy_from_user_oabi_flock64;
11717             copyto = copy_to_user_oabi_flock64;
11718         }
11719 #endif
11720 
11721 	cmd = target_to_host_fcntl_cmd(arg2);
11722         if (cmd == -TARGET_EINVAL) {
11723             ret = cmd;
11724             break;
11725         }
11726 
11727         switch(arg2) {
11728         case TARGET_F_GETLK64:
11729             ret = copyfrom(&fl, arg3);
11730             if (ret) {
11731                 break;
11732             }
11733             ret = get_errno(fcntl(arg1, cmd, &fl));
11734             if (ret == 0) {
11735                 ret = copyto(arg3, &fl);
11736             }
11737 	    break;
11738 
11739         case TARGET_F_SETLK64:
11740         case TARGET_F_SETLKW64:
11741             ret = copyfrom(&fl, arg3);
11742             if (ret) {
11743                 break;
11744             }
11745             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11746 	    break;
11747         default:
11748             ret = do_fcntl(arg1, arg2, arg3);
11749             break;
11750         }
11751 	break;
11752     }
11753 #endif
11754 #ifdef TARGET_NR_cacheflush
11755     case TARGET_NR_cacheflush:
11756         /* self-modifying code is handled automatically, so nothing needed */
11757         ret = 0;
11758         break;
11759 #endif
11760 #ifdef TARGET_NR_security
11761     case TARGET_NR_security:
11762         goto unimplemented;
11763 #endif
11764 #ifdef TARGET_NR_getpagesize
11765     case TARGET_NR_getpagesize:
11766         ret = TARGET_PAGE_SIZE;
11767         break;
11768 #endif
11769     case TARGET_NR_gettid:
11770         ret = get_errno(gettid());
11771         break;
11772 #ifdef TARGET_NR_readahead
11773     case TARGET_NR_readahead:
11774 #if TARGET_ABI_BITS == 32
11775         if (regpairs_aligned(cpu_env, num)) {
11776             arg2 = arg3;
11777             arg3 = arg4;
11778             arg4 = arg5;
11779         }
11780         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11781 #else
11782         ret = get_errno(readahead(arg1, arg2, arg3));
11783 #endif
11784         break;
11785 #endif
11786 #ifdef CONFIG_ATTR
11787 #ifdef TARGET_NR_setxattr
11788     case TARGET_NR_listxattr:
11789     case TARGET_NR_llistxattr:
11790     {
11791         void *p, *b = 0;
11792         if (arg2) {
11793             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11794             if (!b) {
11795                 ret = -TARGET_EFAULT;
11796                 break;
11797             }
11798         }
11799         p = lock_user_string(arg1);
11800         if (p) {
11801             if (num == TARGET_NR_listxattr) {
11802                 ret = get_errno(listxattr(p, b, arg3));
11803             } else {
11804                 ret = get_errno(llistxattr(p, b, arg3));
11805             }
11806         } else {
11807             ret = -TARGET_EFAULT;
11808         }
11809         unlock_user(p, arg1, 0);
11810         unlock_user(b, arg2, arg3);
11811         break;
11812     }
11813     case TARGET_NR_flistxattr:
11814     {
11815         void *b = 0;
11816         if (arg2) {
11817             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11818             if (!b) {
11819                 ret = -TARGET_EFAULT;
11820                 break;
11821             }
11822         }
11823         ret = get_errno(flistxattr(arg1, b, arg3));
11824         unlock_user(b, arg2, arg3);
11825         break;
11826     }
11827     case TARGET_NR_setxattr:
11828     case TARGET_NR_lsetxattr:
11829         {
11830             void *p, *n, *v = 0;
11831             if (arg3) {
11832                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11833                 if (!v) {
11834                     ret = -TARGET_EFAULT;
11835                     break;
11836                 }
11837             }
11838             p = lock_user_string(arg1);
11839             n = lock_user_string(arg2);
11840             if (p && n) {
11841                 if (num == TARGET_NR_setxattr) {
11842                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11843                 } else {
11844                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11845                 }
11846             } else {
11847                 ret = -TARGET_EFAULT;
11848             }
11849             unlock_user(p, arg1, 0);
11850             unlock_user(n, arg2, 0);
11851             unlock_user(v, arg3, 0);
11852         }
11853         break;
11854     case TARGET_NR_fsetxattr:
11855         {
11856             void *n, *v = 0;
11857             if (arg3) {
11858                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11859                 if (!v) {
11860                     ret = -TARGET_EFAULT;
11861                     break;
11862                 }
11863             }
11864             n = lock_user_string(arg2);
11865             if (n) {
11866                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11867             } else {
11868                 ret = -TARGET_EFAULT;
11869             }
11870             unlock_user(n, arg2, 0);
11871             unlock_user(v, arg3, 0);
11872         }
11873         break;
11874     case TARGET_NR_getxattr:
11875     case TARGET_NR_lgetxattr:
11876         {
11877             void *p, *n, *v = 0;
11878             if (arg3) {
11879                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11880                 if (!v) {
11881                     ret = -TARGET_EFAULT;
11882                     break;
11883                 }
11884             }
11885             p = lock_user_string(arg1);
11886             n = lock_user_string(arg2);
11887             if (p && n) {
11888                 if (num == TARGET_NR_getxattr) {
11889                     ret = get_errno(getxattr(p, n, v, arg4));
11890                 } else {
11891                     ret = get_errno(lgetxattr(p, n, v, arg4));
11892                 }
11893             } else {
11894                 ret = -TARGET_EFAULT;
11895             }
11896             unlock_user(p, arg1, 0);
11897             unlock_user(n, arg2, 0);
11898             unlock_user(v, arg3, arg4);
11899         }
11900         break;
11901     case TARGET_NR_fgetxattr:
11902         {
11903             void *n, *v = 0;
11904             if (arg3) {
11905                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11906                 if (!v) {
11907                     ret = -TARGET_EFAULT;
11908                     break;
11909                 }
11910             }
11911             n = lock_user_string(arg2);
11912             if (n) {
11913                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11914             } else {
11915                 ret = -TARGET_EFAULT;
11916             }
11917             unlock_user(n, arg2, 0);
11918             unlock_user(v, arg3, arg4);
11919         }
11920         break;
11921     case TARGET_NR_removexattr:
11922     case TARGET_NR_lremovexattr:
11923         {
11924             void *p, *n;
11925             p = lock_user_string(arg1);
11926             n = lock_user_string(arg2);
11927             if (p && n) {
11928                 if (num == TARGET_NR_removexattr) {
11929                     ret = get_errno(removexattr(p, n));
11930                 } else {
11931                     ret = get_errno(lremovexattr(p, n));
11932                 }
11933             } else {
11934                 ret = -TARGET_EFAULT;
11935             }
11936             unlock_user(p, arg1, 0);
11937             unlock_user(n, arg2, 0);
11938         }
11939         break;
11940     case TARGET_NR_fremovexattr:
11941         {
11942             void *n;
11943             n = lock_user_string(arg2);
11944             if (n) {
11945                 ret = get_errno(fremovexattr(arg1, n));
11946             } else {
11947                 ret = -TARGET_EFAULT;
11948             }
11949             unlock_user(n, arg2, 0);
11950         }
11951         break;
11952 #endif
11953 #endif /* CONFIG_ATTR */
11954 #ifdef TARGET_NR_set_thread_area
11955     case TARGET_NR_set_thread_area:
11956 #if defined(TARGET_MIPS)
11957       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11958       ret = 0;
11959       break;
11960 #elif defined(TARGET_CRIS)
11961       if (arg1 & 0xff)
11962           ret = -TARGET_EINVAL;
11963       else {
11964           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11965           ret = 0;
11966       }
11967       break;
11968 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11969       ret = do_set_thread_area(cpu_env, arg1);
11970       break;
11971 #elif defined(TARGET_M68K)
11972       {
11973           TaskState *ts = cpu->opaque;
11974           ts->tp_value = arg1;
11975           ret = 0;
11976           break;
11977       }
11978 #else
11979       goto unimplemented_nowarn;
11980 #endif
11981 #endif
11982 #ifdef TARGET_NR_get_thread_area
11983     case TARGET_NR_get_thread_area:
11984 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11985         ret = do_get_thread_area(cpu_env, arg1);
11986         break;
11987 #elif defined(TARGET_M68K)
11988         {
11989             TaskState *ts = cpu->opaque;
11990             ret = ts->tp_value;
11991             break;
11992         }
11993 #else
11994         goto unimplemented_nowarn;
11995 #endif
11996 #endif
11997 #ifdef TARGET_NR_getdomainname
11998     case TARGET_NR_getdomainname:
11999         goto unimplemented_nowarn;
12000 #endif
12001 
12002 #ifdef TARGET_NR_clock_settime
12003     case TARGET_NR_clock_settime:
12004     {
12005         struct timespec ts;
12006 
12007         ret = target_to_host_timespec(&ts, arg2);
12008         if (!is_error(ret)) {
12009             ret = get_errno(clock_settime(arg1, &ts));
12010         }
12011         break;
12012     }
12013 #endif
12014 #ifdef TARGET_NR_clock_gettime
12015     case TARGET_NR_clock_gettime:
12016     {
12017         struct timespec ts;
12018         ret = get_errno(clock_gettime(arg1, &ts));
12019         if (!is_error(ret)) {
12020             ret = host_to_target_timespec(arg2, &ts);
12021         }
12022         break;
12023     }
12024 #endif
12025 #ifdef TARGET_NR_clock_getres
12026     case TARGET_NR_clock_getres:
12027     {
12028         struct timespec ts;
12029         ret = get_errno(clock_getres(arg1, &ts));
12030         if (!is_error(ret)) {
12031             host_to_target_timespec(arg2, &ts);
12032         }
12033         break;
12034     }
12035 #endif
12036 #ifdef TARGET_NR_clock_nanosleep
12037     case TARGET_NR_clock_nanosleep:
12038     {
12039         struct timespec ts;
12040         target_to_host_timespec(&ts, arg3);
12041         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12042                                              &ts, arg4 ? &ts : NULL));
12043         if (arg4)
12044             host_to_target_timespec(arg4, &ts);
12045 
12046 #if defined(TARGET_PPC)
12047         /* clock_nanosleep is odd in that it returns positive errno values.
12048          * On PPC, CR0 bit 3 should be set in such a situation. */
12049         if (ret && ret != -TARGET_ERESTARTSYS) {
12050             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
12051         }
12052 #endif
12053         break;
12054     }
12055 #endif
12056 
12057 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12058     case TARGET_NR_set_tid_address:
12059         ret = get_errno(set_tid_address((int *)g2h(arg1)));
12060         break;
12061 #endif
12062 
12063     case TARGET_NR_tkill:
12064         ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12065         break;
12066 
12067     case TARGET_NR_tgkill:
12068         ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
12069                         target_to_host_signal(arg3)));
12070         break;
12071 
12072 #ifdef TARGET_NR_set_robust_list
12073     case TARGET_NR_set_robust_list:
12074     case TARGET_NR_get_robust_list:
12075         /* The ABI for supporting robust futexes has userspace pass
12076          * the kernel a pointer to a linked list which is updated by
12077          * userspace after the syscall; the list is walked by the kernel
12078          * when the thread exits. Since the linked list in QEMU guest
12079          * memory isn't a valid linked list for the host and we have
12080          * no way to reliably intercept the thread-death event, we can't
12081          * support these. Silently return ENOSYS so that guest userspace
12082          * falls back to a non-robust futex implementation (which should
12083          * be OK except in the corner case of the guest crashing while
12084          * holding a mutex that is shared with another process via
12085          * shared memory).
12086          */
12087         goto unimplemented_nowarn;
12088 #endif
12089 
12090 #if defined(TARGET_NR_utimensat)
12091     case TARGET_NR_utimensat:
12092         {
12093             struct timespec *tsp, ts[2];
12094             if (!arg3) {
12095                 tsp = NULL;
12096             } else {
12097                 target_to_host_timespec(ts, arg3);
12098                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
12099                 tsp = ts;
12100             }
12101             if (!arg2)
12102                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12103             else {
12104                 if (!(p = lock_user_string(arg2))) {
12105                     ret = -TARGET_EFAULT;
12106                     goto fail;
12107                 }
12108                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12109                 unlock_user(p, arg2, 0);
12110             }
12111         }
12112 	break;
12113 #endif
12114     case TARGET_NR_futex:
12115         ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12116         break;
12117 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12118     case TARGET_NR_inotify_init:
12119         ret = get_errno(sys_inotify_init());
12120         if (ret >= 0) {
12121             fd_trans_register(ret, &target_inotify_trans);
12122         }
12123         break;
12124 #endif
12125 #ifdef CONFIG_INOTIFY1
12126 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12127     case TARGET_NR_inotify_init1:
12128         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12129                                           fcntl_flags_tbl)));
12130         if (ret >= 0) {
12131             fd_trans_register(ret, &target_inotify_trans);
12132         }
12133         break;
12134 #endif
12135 #endif
12136 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12137     case TARGET_NR_inotify_add_watch:
12138         p = lock_user_string(arg2);
12139         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12140         unlock_user(p, arg2, 0);
12141         break;
12142 #endif
12143 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12144     case TARGET_NR_inotify_rm_watch:
12145         ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
12146         break;
12147 #endif
12148 
12149 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12150     case TARGET_NR_mq_open:
12151         {
12152             struct mq_attr posix_mq_attr;
12153             struct mq_attr *pposix_mq_attr;
12154             int host_flags;
12155 
12156             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12157             pposix_mq_attr = NULL;
12158             if (arg4) {
12159                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12160                     goto efault;
12161                 }
12162                 pposix_mq_attr = &posix_mq_attr;
12163             }
12164             p = lock_user_string(arg1 - 1);
12165             if (!p) {
12166                 goto efault;
12167             }
12168             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12169             unlock_user (p, arg1, 0);
12170         }
12171         break;
12172 
12173     case TARGET_NR_mq_unlink:
12174         p = lock_user_string(arg1 - 1);
12175         if (!p) {
12176             ret = -TARGET_EFAULT;
12177             break;
12178         }
12179         ret = get_errno(mq_unlink(p));
12180         unlock_user (p, arg1, 0);
12181         break;
12182 
12183     case TARGET_NR_mq_timedsend:
12184         {
12185             struct timespec ts;
12186 
12187             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12188             if (arg5 != 0) {
12189                 target_to_host_timespec(&ts, arg5);
12190                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12191                 host_to_target_timespec(arg5, &ts);
12192             } else {
12193                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12194             }
12195             unlock_user (p, arg2, arg3);
12196         }
12197         break;
12198 
12199     case TARGET_NR_mq_timedreceive:
12200         {
12201             struct timespec ts;
12202             unsigned int prio;
12203 
12204             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12205             if (arg5 != 0) {
12206                 target_to_host_timespec(&ts, arg5);
12207                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12208                                                      &prio, &ts));
12209                 host_to_target_timespec(arg5, &ts);
12210             } else {
12211                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12212                                                      &prio, NULL));
12213             }
12214             unlock_user (p, arg2, arg3);
12215             if (arg4 != 0)
12216                 put_user_u32(prio, arg4);
12217         }
12218         break;
12219 
12220     /* Not implemented for now... */
12221 /*     case TARGET_NR_mq_notify: */
12222 /*         break; */
12223 
12224     case TARGET_NR_mq_getsetattr:
12225         {
12226             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12227             ret = 0;
12228             if (arg2 != 0) {
12229                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12230                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12231                                            &posix_mq_attr_out));
12232             } else if (arg3 != 0) {
12233                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12234             }
12235             if (ret == 0 && arg3 != 0) {
12236                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12237             }
12238         }
12239         break;
12240 #endif
12241 
12242 #ifdef CONFIG_SPLICE
12243 #ifdef TARGET_NR_tee
12244     case TARGET_NR_tee:
12245         {
12246             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12247         }
12248         break;
12249 #endif
12250 #ifdef TARGET_NR_splice
12251     case TARGET_NR_splice:
12252         {
12253             loff_t loff_in, loff_out;
12254             loff_t *ploff_in = NULL, *ploff_out = NULL;
12255             if (arg2) {
12256                 if (get_user_u64(loff_in, arg2)) {
12257                     goto efault;
12258                 }
12259                 ploff_in = &loff_in;
12260             }
12261             if (arg4) {
12262                 if (get_user_u64(loff_out, arg4)) {
12263                     goto efault;
12264                 }
12265                 ploff_out = &loff_out;
12266             }
12267             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12268             if (arg2) {
12269                 if (put_user_u64(loff_in, arg2)) {
12270                     goto efault;
12271                 }
12272             }
12273             if (arg4) {
12274                 if (put_user_u64(loff_out, arg4)) {
12275                     goto efault;
12276                 }
12277             }
12278         }
12279         break;
12280 #endif
12281 #ifdef TARGET_NR_vmsplice
12282 	case TARGET_NR_vmsplice:
12283         {
12284             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12285             if (vec != NULL) {
12286                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12287                 unlock_iovec(vec, arg2, arg3, 0);
12288             } else {
12289                 ret = -host_to_target_errno(errno);
12290             }
12291         }
12292         break;
12293 #endif
12294 #endif /* CONFIG_SPLICE */
12295 #ifdef CONFIG_EVENTFD
12296 #if defined(TARGET_NR_eventfd)
12297     case TARGET_NR_eventfd:
12298         ret = get_errno(eventfd(arg1, 0));
12299         if (ret >= 0) {
12300             fd_trans_register(ret, &target_eventfd_trans);
12301         }
12302         break;
12303 #endif
12304 #if defined(TARGET_NR_eventfd2)
12305     case TARGET_NR_eventfd2:
12306     {
12307         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12308         if (arg2 & TARGET_O_NONBLOCK) {
12309             host_flags |= O_NONBLOCK;
12310         }
12311         if (arg2 & TARGET_O_CLOEXEC) {
12312             host_flags |= O_CLOEXEC;
12313         }
12314         ret = get_errno(eventfd(arg1, host_flags));
12315         if (ret >= 0) {
12316             fd_trans_register(ret, &target_eventfd_trans);
12317         }
12318         break;
12319     }
12320 #endif
12321 #endif /* CONFIG_EVENTFD  */
12322 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12323     case TARGET_NR_fallocate:
12324 #if TARGET_ABI_BITS == 32
12325         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12326                                   target_offset64(arg5, arg6)));
12327 #else
12328         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12329 #endif
12330         break;
12331 #endif
12332 #if defined(CONFIG_SYNC_FILE_RANGE)
12333 #if defined(TARGET_NR_sync_file_range)
12334     case TARGET_NR_sync_file_range:
12335 #if TARGET_ABI_BITS == 32
12336 #if defined(TARGET_MIPS)
12337         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12338                                         target_offset64(arg5, arg6), arg7));
12339 #else
12340         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12341                                         target_offset64(arg4, arg5), arg6));
12342 #endif /* !TARGET_MIPS */
12343 #else
12344         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12345 #endif
12346         break;
12347 #endif
12348 #if defined(TARGET_NR_sync_file_range2)
12349     case TARGET_NR_sync_file_range2:
12350         /* This is like sync_file_range but the arguments are reordered */
12351 #if TARGET_ABI_BITS == 32
12352         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12353                                         target_offset64(arg5, arg6), arg2));
12354 #else
12355         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12356 #endif
12357         break;
12358 #endif
12359 #endif
12360 #if defined(TARGET_NR_signalfd4)
12361     case TARGET_NR_signalfd4:
12362         ret = do_signalfd4(arg1, arg2, arg4);
12363         break;
12364 #endif
12365 #if defined(TARGET_NR_signalfd)
12366     case TARGET_NR_signalfd:
12367         ret = do_signalfd4(arg1, arg2, 0);
12368         break;
12369 #endif
12370 #if defined(CONFIG_EPOLL)
12371 #if defined(TARGET_NR_epoll_create)
12372     case TARGET_NR_epoll_create:
12373         ret = get_errno(epoll_create(arg1));
12374         break;
12375 #endif
12376 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12377     case TARGET_NR_epoll_create1:
12378         ret = get_errno(epoll_create1(arg1));
12379         break;
12380 #endif
12381 #if defined(TARGET_NR_epoll_ctl)
12382     case TARGET_NR_epoll_ctl:
12383     {
12384         struct epoll_event ep;
12385         struct epoll_event *epp = 0;
12386         if (arg4) {
12387             struct target_epoll_event *target_ep;
12388             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12389                 goto efault;
12390             }
12391             ep.events = tswap32(target_ep->events);
12392             /* The epoll_data_t union is just opaque data to the kernel,
12393              * so we transfer all 64 bits across and need not worry what
12394              * actual data type it is.
12395              */
12396             ep.data.u64 = tswap64(target_ep->data.u64);
12397             unlock_user_struct(target_ep, arg4, 0);
12398             epp = &ep;
12399         }
12400         ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12401         break;
12402     }
12403 #endif
12404 
12405 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12406 #if defined(TARGET_NR_epoll_wait)
12407     case TARGET_NR_epoll_wait:
12408 #endif
12409 #if defined(TARGET_NR_epoll_pwait)
12410     case TARGET_NR_epoll_pwait:
12411 #endif
12412     {
12413         struct target_epoll_event *target_ep;
12414         struct epoll_event *ep;
12415         int epfd = arg1;
12416         int maxevents = arg3;
12417         int timeout = arg4;
12418 
12419         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12420             ret = -TARGET_EINVAL;
12421             break;
12422         }
12423 
12424         target_ep = lock_user(VERIFY_WRITE, arg2,
12425                               maxevents * sizeof(struct target_epoll_event), 1);
12426         if (!target_ep) {
12427             goto efault;
12428         }
12429 
12430         ep = g_try_new(struct epoll_event, maxevents);
12431         if (!ep) {
12432             unlock_user(target_ep, arg2, 0);
12433             ret = -TARGET_ENOMEM;
12434             break;
12435         }
12436 
12437         switch (num) {
12438 #if defined(TARGET_NR_epoll_pwait)
12439         case TARGET_NR_epoll_pwait:
12440         {
12441             target_sigset_t *target_set;
12442             sigset_t _set, *set = &_set;
12443 
12444             if (arg5) {
12445                 if (arg6 != sizeof(target_sigset_t)) {
12446                     ret = -TARGET_EINVAL;
12447                     break;
12448                 }
12449 
12450                 target_set = lock_user(VERIFY_READ, arg5,
12451                                        sizeof(target_sigset_t), 1);
12452                 if (!target_set) {
12453                     ret = -TARGET_EFAULT;
12454                     break;
12455                 }
12456                 target_to_host_sigset(set, target_set);
12457                 unlock_user(target_set, arg5, 0);
12458             } else {
12459                 set = NULL;
12460             }
12461 
12462             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12463                                              set, SIGSET_T_SIZE));
12464             break;
12465         }
12466 #endif
12467 #if defined(TARGET_NR_epoll_wait)
12468         case TARGET_NR_epoll_wait:
12469             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12470                                              NULL, 0));
12471             break;
12472 #endif
12473         default:
12474             ret = -TARGET_ENOSYS;
12475         }
12476         if (!is_error(ret)) {
12477             int i;
12478             for (i = 0; i < ret; i++) {
12479                 target_ep[i].events = tswap32(ep[i].events);
12480                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12481             }
12482             unlock_user(target_ep, arg2,
12483                         ret * sizeof(struct target_epoll_event));
12484         } else {
12485             unlock_user(target_ep, arg2, 0);
12486         }
12487         g_free(ep);
12488         break;
12489     }
12490 #endif
12491 #endif
12492 #ifdef TARGET_NR_prlimit64
12493     case TARGET_NR_prlimit64:
12494     {
12495         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12496         struct target_rlimit64 *target_rnew, *target_rold;
12497         struct host_rlimit64 rnew, rold, *rnewp = 0;
12498         int resource = target_to_host_resource(arg2);
12499         if (arg3) {
12500             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12501                 goto efault;
12502             }
12503             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12504             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12505             unlock_user_struct(target_rnew, arg3, 0);
12506             rnewp = &rnew;
12507         }
12508 
12509         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12510         if (!is_error(ret) && arg4) {
12511             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12512                 goto efault;
12513             }
12514             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12515             target_rold->rlim_max = tswap64(rold.rlim_max);
12516             unlock_user_struct(target_rold, arg4, 1);
12517         }
12518         break;
12519     }
12520 #endif
12521 #ifdef TARGET_NR_gethostname
12522     case TARGET_NR_gethostname:
12523     {
12524         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12525         if (name) {
12526             ret = get_errno(gethostname(name, arg2));
12527             unlock_user(name, arg1, arg2);
12528         } else {
12529             ret = -TARGET_EFAULT;
12530         }
12531         break;
12532     }
12533 #endif
12534 #ifdef TARGET_NR_atomic_cmpxchg_32
12535     case TARGET_NR_atomic_cmpxchg_32:
12536     {
12537         /* should use start_exclusive from main.c */
12538         abi_ulong mem_value;
12539         if (get_user_u32(mem_value, arg6)) {
12540             target_siginfo_t info;
12541             info.si_signo = SIGSEGV;
12542             info.si_errno = 0;
12543             info.si_code = TARGET_SEGV_MAPERR;
12544             info._sifields._sigfault._addr = arg6;
12545             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12546                          QEMU_SI_FAULT, &info);
12547             ret = 0xdeadbeef;
12548 
12549         }
12550         if (mem_value == arg2)
12551             put_user_u32(arg1, arg6);
12552         ret = mem_value;
12553         break;
12554     }
12555 #endif
12556 #ifdef TARGET_NR_atomic_barrier
12557     case TARGET_NR_atomic_barrier:
12558     {
12559         /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12560         ret = 0;
12561         break;
12562     }
12563 #endif
12564 
12565 #ifdef TARGET_NR_timer_create
12566     case TARGET_NR_timer_create:
12567     {
12568         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12569 
12570         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12571 
12572         int clkid = arg1;
12573         int timer_index = next_free_host_timer();
12574 
12575         if (timer_index < 0) {
12576             ret = -TARGET_EAGAIN;
12577         } else {
12578             timer_t *phtimer = g_posix_timers  + timer_index;
12579 
12580             if (arg2) {
12581                 phost_sevp = &host_sevp;
12582                 ret = target_to_host_sigevent(phost_sevp, arg2);
12583                 if (ret != 0) {
12584                     break;
12585                 }
12586             }
12587 
12588             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12589             if (ret) {
12590                 phtimer = NULL;
12591             } else {
12592                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12593                     goto efault;
12594                 }
12595             }
12596         }
12597         break;
12598     }
12599 #endif
12600 
12601 #ifdef TARGET_NR_timer_settime
12602     case TARGET_NR_timer_settime:
12603     {
12604         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12605          * struct itimerspec * old_value */
12606         target_timer_t timerid = get_timer_id(arg1);
12607 
12608         if (timerid < 0) {
12609             ret = timerid;
12610         } else if (arg3 == 0) {
12611             ret = -TARGET_EINVAL;
12612         } else {
12613             timer_t htimer = g_posix_timers[timerid];
12614             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12615 
12616             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12617                 goto efault;
12618             }
12619             ret = get_errno(
12620                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12621             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12622                 goto efault;
12623             }
12624         }
12625         break;
12626     }
12627 #endif
12628 
12629 #ifdef TARGET_NR_timer_gettime
12630     case TARGET_NR_timer_gettime:
12631     {
12632         /* args: timer_t timerid, struct itimerspec *curr_value */
12633         target_timer_t timerid = get_timer_id(arg1);
12634 
12635         if (timerid < 0) {
12636             ret = timerid;
12637         } else if (!arg2) {
12638             ret = -TARGET_EFAULT;
12639         } else {
12640             timer_t htimer = g_posix_timers[timerid];
12641             struct itimerspec hspec;
12642             ret = get_errno(timer_gettime(htimer, &hspec));
12643 
12644             if (host_to_target_itimerspec(arg2, &hspec)) {
12645                 ret = -TARGET_EFAULT;
12646             }
12647         }
12648         break;
12649     }
12650 #endif
12651 
12652 #ifdef TARGET_NR_timer_getoverrun
12653     case TARGET_NR_timer_getoverrun:
12654     {
12655         /* args: timer_t timerid */
12656         target_timer_t timerid = get_timer_id(arg1);
12657 
12658         if (timerid < 0) {
12659             ret = timerid;
12660         } else {
12661             timer_t htimer = g_posix_timers[timerid];
12662             ret = get_errno(timer_getoverrun(htimer));
12663         }
12664         fd_trans_unregister(ret);
12665         break;
12666     }
12667 #endif
12668 
12669 #ifdef TARGET_NR_timer_delete
12670     case TARGET_NR_timer_delete:
12671     {
12672         /* args: timer_t timerid */
12673         target_timer_t timerid = get_timer_id(arg1);
12674 
12675         if (timerid < 0) {
12676             ret = timerid;
12677         } else {
12678             timer_t htimer = g_posix_timers[timerid];
12679             ret = get_errno(timer_delete(htimer));
12680             g_posix_timers[timerid] = 0;
12681         }
12682         break;
12683     }
12684 #endif
12685 
12686 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12687     case TARGET_NR_timerfd_create:
12688         ret = get_errno(timerfd_create(arg1,
12689                 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12690         break;
12691 #endif
12692 
12693 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12694     case TARGET_NR_timerfd_gettime:
12695         {
12696             struct itimerspec its_curr;
12697 
12698             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12699 
12700             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12701                 goto efault;
12702             }
12703         }
12704         break;
12705 #endif
12706 
12707 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12708     case TARGET_NR_timerfd_settime:
12709         {
12710             struct itimerspec its_new, its_old, *p_new;
12711 
12712             if (arg3) {
12713                 if (target_to_host_itimerspec(&its_new, arg3)) {
12714                     goto efault;
12715                 }
12716                 p_new = &its_new;
12717             } else {
12718                 p_new = NULL;
12719             }
12720 
12721             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12722 
12723             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12724                 goto efault;
12725             }
12726         }
12727         break;
12728 #endif
12729 
12730 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12731     case TARGET_NR_ioprio_get:
12732         ret = get_errno(ioprio_get(arg1, arg2));
12733         break;
12734 #endif
12735 
12736 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12737     case TARGET_NR_ioprio_set:
12738         ret = get_errno(ioprio_set(arg1, arg2, arg3));
12739         break;
12740 #endif
12741 
12742 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12743     case TARGET_NR_setns:
12744         ret = get_errno(setns(arg1, arg2));
12745         break;
12746 #endif
12747 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12748     case TARGET_NR_unshare:
12749         ret = get_errno(unshare(arg1));
12750         break;
12751 #endif
12752 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12753     case TARGET_NR_kcmp:
12754         ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12755         break;
12756 #endif
12757 
12758     default:
12759     unimplemented:
12760         gemu_log("qemu: Unsupported syscall: %d\n", num);
12761 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12762     unimplemented_nowarn:
12763 #endif
12764         ret = -TARGET_ENOSYS;
12765         break;
12766     }
12767 fail:
12768 #ifdef DEBUG
12769     gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12770 #endif
12771     if(do_strace)
12772         print_syscall_ret(num, ret);
12773     trace_guest_user_syscall_ret(cpu, num, ret);
12774     return ret;
12775 efault:
12776     ret = -TARGET_EFAULT;
12777     goto fail;
12778 }
12779