xref: /openbmc/qemu/linux-user/syscall.c (revision 5dafaf4f)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77 
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84 
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
112 
113 #include "qemu.h"
114 
115 #ifndef CLONE_IO
116 #define CLONE_IO                0x80000000      /* Clone io context */
117 #endif
118 
119 /* We can't directly call the host clone syscall, because this will
120  * badly confuse libc (breaking mutexes, for example). So we must
121  * divide clone flags into:
122  *  * flag combinations that look like pthread_create()
123  *  * flag combinations that look like fork()
124  *  * flags we can implement within QEMU itself
125  *  * flags we can't support and will return an error for
126  */
127 /* For thread creation, all these flags must be present; for
128  * fork, none must be present.
129  */
130 #define CLONE_THREAD_FLAGS                              \
131     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
132      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
133 
134 /* These flags are ignored:
135  * CLONE_DETACHED is now ignored by the kernel;
136  * CLONE_IO is just an optimisation hint to the I/O scheduler
137  */
138 #define CLONE_IGNORED_FLAGS                     \
139     (CLONE_DETACHED | CLONE_IO)
140 
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS               \
143     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
144      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
145 
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
148     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
149      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
150 
151 #define CLONE_INVALID_FORK_FLAGS                                        \
152     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
153 
154 #define CLONE_INVALID_THREAD_FLAGS                                      \
155     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
156        CLONE_IGNORED_FLAGS))
157 
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159  * have almost all been allocated. We cannot support any of
160  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162  * The checks against the invalid thread masks above will catch these.
163  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
164  */
165 
166 //#define DEBUG
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168  * once. This exercises the codepaths for restart.
169  */
170 //#define DEBUG_ERESTARTSYS
171 
172 //#include <linux/msdos_fs.h>
173 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
174 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
175 
176 #undef _syscall0
177 #undef _syscall1
178 #undef _syscall2
179 #undef _syscall3
180 #undef _syscall4
181 #undef _syscall5
182 #undef _syscall6
183 
184 #define _syscall0(type,name)		\
185 static type name (void)			\
186 {					\
187 	return syscall(__NR_##name);	\
188 }
189 
190 #define _syscall1(type,name,type1,arg1)		\
191 static type name (type1 arg1)			\
192 {						\
193 	return syscall(__NR_##name, arg1);	\
194 }
195 
196 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
197 static type name (type1 arg1,type2 arg2)		\
198 {							\
199 	return syscall(__NR_##name, arg1, arg2);	\
200 }
201 
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
203 static type name (type1 arg1,type2 arg2,type3 arg3)		\
204 {								\
205 	return syscall(__NR_##name, arg1, arg2, arg3);		\
206 }
207 
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
210 {										\
211 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
212 }
213 
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
215 		  type5,arg5)							\
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
217 {										\
218 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
219 }
220 
221 
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
223 		  type5,arg5,type6,arg6)					\
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
225                   type6 arg6)							\
226 {										\
227 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
228 }
229 
230 
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
243 
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
246 #endif
247 
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
251 #endif
252 
253 #ifdef __NR_gettid
254 _syscall0(int, gettid)
255 #else
256 /* This is a replacement for the host gettid() and must return a host
257    errno. */
258 static int gettid(void) {
259     return -ENOSYS;
260 }
261 #endif
262 
263 /* For the 64-bit guest on 32-bit host case we must emulate
264  * getdents using getdents64, because otherwise the host
265  * might hand us back more dirent records than we can fit
266  * into the guest buffer after structure format conversion.
267  * Otherwise we emulate getdents with getdents if the host has it.
268  */
269 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
270 #define EMULATE_GETDENTS_WITH_GETDENTS
271 #endif
272 
273 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
274 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
275 #endif
276 #if (defined(TARGET_NR_getdents) && \
277       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
278     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
279 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
280 #endif
281 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
282 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
283           loff_t *, res, uint, wh);
284 #endif
285 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
286 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
287           siginfo_t *, uinfo)
288 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
289 #ifdef __NR_exit_group
290 _syscall1(int,exit_group,int,error_code)
291 #endif
292 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
293 _syscall1(int,set_tid_address,int *,tidptr)
294 #endif
295 #if defined(TARGET_NR_futex) && defined(__NR_futex)
296 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
297           const struct timespec *,timeout,int *,uaddr2,int,val3)
298 #endif
299 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
300 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
301           unsigned long *, user_mask_ptr);
302 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
303 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
304           unsigned long *, user_mask_ptr);
305 #define __NR_sys_getcpu __NR_getcpu
306 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
307 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
308           void *, arg);
309 _syscall2(int, capget, struct __user_cap_header_struct *, header,
310           struct __user_cap_data_struct *, data);
311 _syscall2(int, capset, struct __user_cap_header_struct *, header,
312           struct __user_cap_data_struct *, data);
313 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
314 _syscall2(int, ioprio_get, int, which, int, who)
315 #endif
316 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
317 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
318 #endif
319 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
320 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
321 #endif
322 
323 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
324 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
325           unsigned long, idx1, unsigned long, idx2)
326 #endif
327 
328 static bitmask_transtbl fcntl_flags_tbl[] = {
329   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
330   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
331   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
332   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
333   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
334   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
335   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
336   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
337   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
338   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
339   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
340   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
341   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
342 #if defined(O_DIRECT)
343   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
344 #endif
345 #if defined(O_NOATIME)
346   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
347 #endif
348 #if defined(O_CLOEXEC)
349   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
350 #endif
351 #if defined(O_PATH)
352   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
353 #endif
354 #if defined(O_TMPFILE)
355   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
356 #endif
357   /* Don't terminate the list prematurely on 64-bit host+guest.  */
358 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
359   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
360 #endif
361   { 0, 0, 0, 0 }
362 };
363 
364 enum {
365     QEMU_IFLA_BR_UNSPEC,
366     QEMU_IFLA_BR_FORWARD_DELAY,
367     QEMU_IFLA_BR_HELLO_TIME,
368     QEMU_IFLA_BR_MAX_AGE,
369     QEMU_IFLA_BR_AGEING_TIME,
370     QEMU_IFLA_BR_STP_STATE,
371     QEMU_IFLA_BR_PRIORITY,
372     QEMU_IFLA_BR_VLAN_FILTERING,
373     QEMU_IFLA_BR_VLAN_PROTOCOL,
374     QEMU_IFLA_BR_GROUP_FWD_MASK,
375     QEMU_IFLA_BR_ROOT_ID,
376     QEMU_IFLA_BR_BRIDGE_ID,
377     QEMU_IFLA_BR_ROOT_PORT,
378     QEMU_IFLA_BR_ROOT_PATH_COST,
379     QEMU_IFLA_BR_TOPOLOGY_CHANGE,
380     QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
381     QEMU_IFLA_BR_HELLO_TIMER,
382     QEMU_IFLA_BR_TCN_TIMER,
383     QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
384     QEMU_IFLA_BR_GC_TIMER,
385     QEMU_IFLA_BR_GROUP_ADDR,
386     QEMU_IFLA_BR_FDB_FLUSH,
387     QEMU_IFLA_BR_MCAST_ROUTER,
388     QEMU_IFLA_BR_MCAST_SNOOPING,
389     QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
390     QEMU_IFLA_BR_MCAST_QUERIER,
391     QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
392     QEMU_IFLA_BR_MCAST_HASH_MAX,
393     QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
394     QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
395     QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
396     QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
397     QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
398     QEMU_IFLA_BR_MCAST_QUERY_INTVL,
399     QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
400     QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
401     QEMU_IFLA_BR_NF_CALL_IPTABLES,
402     QEMU_IFLA_BR_NF_CALL_IP6TABLES,
403     QEMU_IFLA_BR_NF_CALL_ARPTABLES,
404     QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
405     QEMU_IFLA_BR_PAD,
406     QEMU_IFLA_BR_VLAN_STATS_ENABLED,
407     QEMU_IFLA_BR_MCAST_STATS_ENABLED,
408     QEMU_IFLA_BR_MCAST_IGMP_VERSION,
409     QEMU_IFLA_BR_MCAST_MLD_VERSION,
410     QEMU___IFLA_BR_MAX,
411 };
412 
413 enum {
414     QEMU_IFLA_UNSPEC,
415     QEMU_IFLA_ADDRESS,
416     QEMU_IFLA_BROADCAST,
417     QEMU_IFLA_IFNAME,
418     QEMU_IFLA_MTU,
419     QEMU_IFLA_LINK,
420     QEMU_IFLA_QDISC,
421     QEMU_IFLA_STATS,
422     QEMU_IFLA_COST,
423     QEMU_IFLA_PRIORITY,
424     QEMU_IFLA_MASTER,
425     QEMU_IFLA_WIRELESS,
426     QEMU_IFLA_PROTINFO,
427     QEMU_IFLA_TXQLEN,
428     QEMU_IFLA_MAP,
429     QEMU_IFLA_WEIGHT,
430     QEMU_IFLA_OPERSTATE,
431     QEMU_IFLA_LINKMODE,
432     QEMU_IFLA_LINKINFO,
433     QEMU_IFLA_NET_NS_PID,
434     QEMU_IFLA_IFALIAS,
435     QEMU_IFLA_NUM_VF,
436     QEMU_IFLA_VFINFO_LIST,
437     QEMU_IFLA_STATS64,
438     QEMU_IFLA_VF_PORTS,
439     QEMU_IFLA_PORT_SELF,
440     QEMU_IFLA_AF_SPEC,
441     QEMU_IFLA_GROUP,
442     QEMU_IFLA_NET_NS_FD,
443     QEMU_IFLA_EXT_MASK,
444     QEMU_IFLA_PROMISCUITY,
445     QEMU_IFLA_NUM_TX_QUEUES,
446     QEMU_IFLA_NUM_RX_QUEUES,
447     QEMU_IFLA_CARRIER,
448     QEMU_IFLA_PHYS_PORT_ID,
449     QEMU_IFLA_CARRIER_CHANGES,
450     QEMU_IFLA_PHYS_SWITCH_ID,
451     QEMU_IFLA_LINK_NETNSID,
452     QEMU_IFLA_PHYS_PORT_NAME,
453     QEMU_IFLA_PROTO_DOWN,
454     QEMU_IFLA_GSO_MAX_SEGS,
455     QEMU_IFLA_GSO_MAX_SIZE,
456     QEMU_IFLA_PAD,
457     QEMU_IFLA_XDP,
458     QEMU_IFLA_EVENT,
459     QEMU_IFLA_NEW_NETNSID,
460     QEMU_IFLA_IF_NETNSID,
461     QEMU_IFLA_CARRIER_UP_COUNT,
462     QEMU_IFLA_CARRIER_DOWN_COUNT,
463     QEMU_IFLA_NEW_IFINDEX,
464     QEMU___IFLA_MAX
465 };
466 
467 enum {
468     QEMU_IFLA_BRPORT_UNSPEC,
469     QEMU_IFLA_BRPORT_STATE,
470     QEMU_IFLA_BRPORT_PRIORITY,
471     QEMU_IFLA_BRPORT_COST,
472     QEMU_IFLA_BRPORT_MODE,
473     QEMU_IFLA_BRPORT_GUARD,
474     QEMU_IFLA_BRPORT_PROTECT,
475     QEMU_IFLA_BRPORT_FAST_LEAVE,
476     QEMU_IFLA_BRPORT_LEARNING,
477     QEMU_IFLA_BRPORT_UNICAST_FLOOD,
478     QEMU_IFLA_BRPORT_PROXYARP,
479     QEMU_IFLA_BRPORT_LEARNING_SYNC,
480     QEMU_IFLA_BRPORT_PROXYARP_WIFI,
481     QEMU_IFLA_BRPORT_ROOT_ID,
482     QEMU_IFLA_BRPORT_BRIDGE_ID,
483     QEMU_IFLA_BRPORT_DESIGNATED_PORT,
484     QEMU_IFLA_BRPORT_DESIGNATED_COST,
485     QEMU_IFLA_BRPORT_ID,
486     QEMU_IFLA_BRPORT_NO,
487     QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
488     QEMU_IFLA_BRPORT_CONFIG_PENDING,
489     QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
490     QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
491     QEMU_IFLA_BRPORT_HOLD_TIMER,
492     QEMU_IFLA_BRPORT_FLUSH,
493     QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
494     QEMU_IFLA_BRPORT_PAD,
495     QEMU_IFLA_BRPORT_MCAST_FLOOD,
496     QEMU_IFLA_BRPORT_MCAST_TO_UCAST,
497     QEMU_IFLA_BRPORT_VLAN_TUNNEL,
498     QEMU_IFLA_BRPORT_BCAST_FLOOD,
499     QEMU_IFLA_BRPORT_GROUP_FWD_MASK,
500     QEMU_IFLA_BRPORT_NEIGH_SUPPRESS,
501     QEMU___IFLA_BRPORT_MAX
502 };
503 
504 enum {
505     QEMU_IFLA_INFO_UNSPEC,
506     QEMU_IFLA_INFO_KIND,
507     QEMU_IFLA_INFO_DATA,
508     QEMU_IFLA_INFO_XSTATS,
509     QEMU_IFLA_INFO_SLAVE_KIND,
510     QEMU_IFLA_INFO_SLAVE_DATA,
511     QEMU___IFLA_INFO_MAX,
512 };
513 
514 enum {
515     QEMU_IFLA_INET_UNSPEC,
516     QEMU_IFLA_INET_CONF,
517     QEMU___IFLA_INET_MAX,
518 };
519 
520 enum {
521     QEMU_IFLA_INET6_UNSPEC,
522     QEMU_IFLA_INET6_FLAGS,
523     QEMU_IFLA_INET6_CONF,
524     QEMU_IFLA_INET6_STATS,
525     QEMU_IFLA_INET6_MCAST,
526     QEMU_IFLA_INET6_CACHEINFO,
527     QEMU_IFLA_INET6_ICMP6STATS,
528     QEMU_IFLA_INET6_TOKEN,
529     QEMU_IFLA_INET6_ADDR_GEN_MODE,
530     QEMU___IFLA_INET6_MAX
531 };
532 
533 enum {
534     QEMU_IFLA_XDP_UNSPEC,
535     QEMU_IFLA_XDP_FD,
536     QEMU_IFLA_XDP_ATTACHED,
537     QEMU_IFLA_XDP_FLAGS,
538     QEMU_IFLA_XDP_PROG_ID,
539     QEMU___IFLA_XDP_MAX,
540 };
541 
542 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
543 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
544 typedef struct TargetFdTrans {
545     TargetFdDataFunc host_to_target_data;
546     TargetFdDataFunc target_to_host_data;
547     TargetFdAddrFunc target_to_host_addr;
548 } TargetFdTrans;
549 
550 static TargetFdTrans **target_fd_trans;
551 
552 static unsigned int target_fd_max;
553 
554 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
555 {
556     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
557         return target_fd_trans[fd]->target_to_host_data;
558     }
559     return NULL;
560 }
561 
562 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
563 {
564     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
565         return target_fd_trans[fd]->host_to_target_data;
566     }
567     return NULL;
568 }
569 
570 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
571 {
572     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
573         return target_fd_trans[fd]->target_to_host_addr;
574     }
575     return NULL;
576 }
577 
578 static void fd_trans_register(int fd, TargetFdTrans *trans)
579 {
580     unsigned int oldmax;
581 
582     if (fd >= target_fd_max) {
583         oldmax = target_fd_max;
584         target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
585         target_fd_trans = g_renew(TargetFdTrans *,
586                                   target_fd_trans, target_fd_max);
587         memset((void *)(target_fd_trans + oldmax), 0,
588                (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
589     }
590     target_fd_trans[fd] = trans;
591 }
592 
593 static void fd_trans_unregister(int fd)
594 {
595     if (fd >= 0 && fd < target_fd_max) {
596         target_fd_trans[fd] = NULL;
597     }
598 }
599 
600 static void fd_trans_dup(int oldfd, int newfd)
601 {
602     fd_trans_unregister(newfd);
603     if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
604         fd_trans_register(newfd, target_fd_trans[oldfd]);
605     }
606 }
607 
608 static int sys_getcwd1(char *buf, size_t size)
609 {
610   if (getcwd(buf, size) == NULL) {
611       /* getcwd() sets errno */
612       return (-1);
613   }
614   return strlen(buf)+1;
615 }
616 
617 #ifdef TARGET_NR_utimensat
618 #if defined(__NR_utimensat)
619 #define __NR_sys_utimensat __NR_utimensat
620 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
621           const struct timespec *,tsp,int,flags)
622 #else
623 static int sys_utimensat(int dirfd, const char *pathname,
624                          const struct timespec times[2], int flags)
625 {
626     errno = ENOSYS;
627     return -1;
628 }
629 #endif
630 #endif /* TARGET_NR_utimensat */
631 
632 #ifdef TARGET_NR_renameat2
633 #if defined(__NR_renameat2)
634 #define __NR_sys_renameat2 __NR_renameat2
635 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
636           const char *, new, unsigned int, flags)
637 #else
638 static int sys_renameat2(int oldfd, const char *old,
639                          int newfd, const char *new, int flags)
640 {
641     if (flags == 0) {
642         return renameat(oldfd, old, newfd, new);
643     }
644     errno = ENOSYS;
645     return -1;
646 }
647 #endif
648 #endif /* TARGET_NR_renameat2 */
649 
650 #ifdef CONFIG_INOTIFY
651 #include <sys/inotify.h>
652 
653 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
654 static int sys_inotify_init(void)
655 {
656   return (inotify_init());
657 }
658 #endif
659 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
660 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
661 {
662   return (inotify_add_watch(fd, pathname, mask));
663 }
664 #endif
665 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
666 static int sys_inotify_rm_watch(int fd, int32_t wd)
667 {
668   return (inotify_rm_watch(fd, wd));
669 }
670 #endif
671 #ifdef CONFIG_INOTIFY1
672 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
673 static int sys_inotify_init1(int flags)
674 {
675   return (inotify_init1(flags));
676 }
677 #endif
678 #endif
679 #else
680 /* Userspace can usually survive runtime without inotify */
681 #undef TARGET_NR_inotify_init
682 #undef TARGET_NR_inotify_init1
683 #undef TARGET_NR_inotify_add_watch
684 #undef TARGET_NR_inotify_rm_watch
685 #endif /* CONFIG_INOTIFY  */
686 
687 #if defined(TARGET_NR_prlimit64)
688 #ifndef __NR_prlimit64
689 # define __NR_prlimit64 -1
690 #endif
691 #define __NR_sys_prlimit64 __NR_prlimit64
692 /* The glibc rlimit structure may not be that used by the underlying syscall */
693 struct host_rlimit64 {
694     uint64_t rlim_cur;
695     uint64_t rlim_max;
696 };
697 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
698           const struct host_rlimit64 *, new_limit,
699           struct host_rlimit64 *, old_limit)
700 #endif
701 
702 
703 #if defined(TARGET_NR_timer_create)
704 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
705 static timer_t g_posix_timers[32] = { 0, } ;
706 
707 static inline int next_free_host_timer(void)
708 {
709     int k ;
710     /* FIXME: Does finding the next free slot require a lock? */
711     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
712         if (g_posix_timers[k] == 0) {
713             g_posix_timers[k] = (timer_t) 1;
714             return k;
715         }
716     }
717     return -1;
718 }
719 #endif
720 
721 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
722 #ifdef TARGET_ARM
723 static inline int regpairs_aligned(void *cpu_env, int num)
724 {
725     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
726 }
727 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
728 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
729 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
730 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
731  * of registers which translates to the same as ARM/MIPS, because we start with
732  * r3 as arg1 */
733 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
734 #elif defined(TARGET_SH4)
735 /* SH4 doesn't align register pairs, except for p{read,write}64 */
736 static inline int regpairs_aligned(void *cpu_env, int num)
737 {
738     switch (num) {
739     case TARGET_NR_pread64:
740     case TARGET_NR_pwrite64:
741         return 1;
742 
743     default:
744         return 0;
745     }
746 }
747 #elif defined(TARGET_XTENSA)
748 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
749 #else
750 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
751 #endif
752 
753 #define ERRNO_TABLE_SIZE 1200
754 
755 /* target_to_host_errno_table[] is initialized from
756  * host_to_target_errno_table[] in syscall_init(). */
757 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
758 };
759 
760 /*
761  * This list is the union of errno values overridden in asm-<arch>/errno.h
762  * minus the errnos that are not actually generic to all archs.
763  */
764 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
765     [EAGAIN]		= TARGET_EAGAIN,
766     [EIDRM]		= TARGET_EIDRM,
767     [ECHRNG]		= TARGET_ECHRNG,
768     [EL2NSYNC]		= TARGET_EL2NSYNC,
769     [EL3HLT]		= TARGET_EL3HLT,
770     [EL3RST]		= TARGET_EL3RST,
771     [ELNRNG]		= TARGET_ELNRNG,
772     [EUNATCH]		= TARGET_EUNATCH,
773     [ENOCSI]		= TARGET_ENOCSI,
774     [EL2HLT]		= TARGET_EL2HLT,
775     [EDEADLK]		= TARGET_EDEADLK,
776     [ENOLCK]		= TARGET_ENOLCK,
777     [EBADE]		= TARGET_EBADE,
778     [EBADR]		= TARGET_EBADR,
779     [EXFULL]		= TARGET_EXFULL,
780     [ENOANO]		= TARGET_ENOANO,
781     [EBADRQC]		= TARGET_EBADRQC,
782     [EBADSLT]		= TARGET_EBADSLT,
783     [EBFONT]		= TARGET_EBFONT,
784     [ENOSTR]		= TARGET_ENOSTR,
785     [ENODATA]		= TARGET_ENODATA,
786     [ETIME]		= TARGET_ETIME,
787     [ENOSR]		= TARGET_ENOSR,
788     [ENONET]		= TARGET_ENONET,
789     [ENOPKG]		= TARGET_ENOPKG,
790     [EREMOTE]		= TARGET_EREMOTE,
791     [ENOLINK]		= TARGET_ENOLINK,
792     [EADV]		= TARGET_EADV,
793     [ESRMNT]		= TARGET_ESRMNT,
794     [ECOMM]		= TARGET_ECOMM,
795     [EPROTO]		= TARGET_EPROTO,
796     [EDOTDOT]		= TARGET_EDOTDOT,
797     [EMULTIHOP]		= TARGET_EMULTIHOP,
798     [EBADMSG]		= TARGET_EBADMSG,
799     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
800     [EOVERFLOW]		= TARGET_EOVERFLOW,
801     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
802     [EBADFD]		= TARGET_EBADFD,
803     [EREMCHG]		= TARGET_EREMCHG,
804     [ELIBACC]		= TARGET_ELIBACC,
805     [ELIBBAD]		= TARGET_ELIBBAD,
806     [ELIBSCN]		= TARGET_ELIBSCN,
807     [ELIBMAX]		= TARGET_ELIBMAX,
808     [ELIBEXEC]		= TARGET_ELIBEXEC,
809     [EILSEQ]		= TARGET_EILSEQ,
810     [ENOSYS]		= TARGET_ENOSYS,
811     [ELOOP]		= TARGET_ELOOP,
812     [ERESTART]		= TARGET_ERESTART,
813     [ESTRPIPE]		= TARGET_ESTRPIPE,
814     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
815     [EUSERS]		= TARGET_EUSERS,
816     [ENOTSOCK]		= TARGET_ENOTSOCK,
817     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
818     [EMSGSIZE]		= TARGET_EMSGSIZE,
819     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
820     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
821     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
822     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
823     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
824     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
825     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
826     [EADDRINUSE]	= TARGET_EADDRINUSE,
827     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
828     [ENETDOWN]		= TARGET_ENETDOWN,
829     [ENETUNREACH]	= TARGET_ENETUNREACH,
830     [ENETRESET]		= TARGET_ENETRESET,
831     [ECONNABORTED]	= TARGET_ECONNABORTED,
832     [ECONNRESET]	= TARGET_ECONNRESET,
833     [ENOBUFS]		= TARGET_ENOBUFS,
834     [EISCONN]		= TARGET_EISCONN,
835     [ENOTCONN]		= TARGET_ENOTCONN,
836     [EUCLEAN]		= TARGET_EUCLEAN,
837     [ENOTNAM]		= TARGET_ENOTNAM,
838     [ENAVAIL]		= TARGET_ENAVAIL,
839     [EISNAM]		= TARGET_EISNAM,
840     [EREMOTEIO]		= TARGET_EREMOTEIO,
841     [EDQUOT]            = TARGET_EDQUOT,
842     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
843     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
844     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
845     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
846     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
847     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
848     [EALREADY]		= TARGET_EALREADY,
849     [EINPROGRESS]	= TARGET_EINPROGRESS,
850     [ESTALE]		= TARGET_ESTALE,
851     [ECANCELED]		= TARGET_ECANCELED,
852     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
853     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
854 #ifdef ENOKEY
855     [ENOKEY]		= TARGET_ENOKEY,
856 #endif
857 #ifdef EKEYEXPIRED
858     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
859 #endif
860 #ifdef EKEYREVOKED
861     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
862 #endif
863 #ifdef EKEYREJECTED
864     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
865 #endif
866 #ifdef EOWNERDEAD
867     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
868 #endif
869 #ifdef ENOTRECOVERABLE
870     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
871 #endif
872 #ifdef ENOMSG
873     [ENOMSG]            = TARGET_ENOMSG,
874 #endif
875 #ifdef ERKFILL
876     [ERFKILL]           = TARGET_ERFKILL,
877 #endif
878 #ifdef EHWPOISON
879     [EHWPOISON]         = TARGET_EHWPOISON,
880 #endif
881 };
882 
883 static inline int host_to_target_errno(int err)
884 {
885     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
886         host_to_target_errno_table[err]) {
887         return host_to_target_errno_table[err];
888     }
889     return err;
890 }
891 
892 static inline int target_to_host_errno(int err)
893 {
894     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
895         target_to_host_errno_table[err]) {
896         return target_to_host_errno_table[err];
897     }
898     return err;
899 }
900 
901 static inline abi_long get_errno(abi_long ret)
902 {
903     if (ret == -1)
904         return -host_to_target_errno(errno);
905     else
906         return ret;
907 }
908 
909 const char *target_strerror(int err)
910 {
911     if (err == TARGET_ERESTARTSYS) {
912         return "To be restarted";
913     }
914     if (err == TARGET_QEMU_ESIGRETURN) {
915         return "Successful exit from sigreturn";
916     }
917 
918     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
919         return NULL;
920     }
921     return strerror(target_to_host_errno(err));
922 }
923 
924 #define safe_syscall0(type, name) \
925 static type safe_##name(void) \
926 { \
927     return safe_syscall(__NR_##name); \
928 }
929 
930 #define safe_syscall1(type, name, type1, arg1) \
931 static type safe_##name(type1 arg1) \
932 { \
933     return safe_syscall(__NR_##name, arg1); \
934 }
935 
936 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
937 static type safe_##name(type1 arg1, type2 arg2) \
938 { \
939     return safe_syscall(__NR_##name, arg1, arg2); \
940 }
941 
942 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
943 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
944 { \
945     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
946 }
947 
948 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
949     type4, arg4) \
950 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
951 { \
952     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
953 }
954 
955 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
956     type4, arg4, type5, arg5) \
957 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
958     type5 arg5) \
959 { \
960     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
961 }
962 
963 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
964     type4, arg4, type5, arg5, type6, arg6) \
965 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
966     type5 arg5, type6 arg6) \
967 { \
968     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
969 }
970 
971 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
972 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
973 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
974               int, flags, mode_t, mode)
975 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
976               struct rusage *, rusage)
977 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
978               int, options, struct rusage *, rusage)
979 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
980 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
981               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
982 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
983               struct timespec *, tsp, const sigset_t *, sigmask,
984               size_t, sigsetsize)
985 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
986               int, maxevents, int, timeout, const sigset_t *, sigmask,
987               size_t, sigsetsize)
988 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
989               const struct timespec *,timeout,int *,uaddr2,int,val3)
990 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
991 safe_syscall2(int, kill, pid_t, pid, int, sig)
992 safe_syscall2(int, tkill, int, tid, int, sig)
993 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
994 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
995 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
996 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
997               unsigned long, pos_l, unsigned long, pos_h)
998 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
999               unsigned long, pos_l, unsigned long, pos_h)
1000 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
1001               socklen_t, addrlen)
1002 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
1003               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
1004 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
1005               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
1006 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
1007 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
1008 safe_syscall2(int, flock, int, fd, int, operation)
1009 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
1010               const struct timespec *, uts, size_t, sigsetsize)
1011 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
1012               int, flags)
1013 safe_syscall2(int, nanosleep, const struct timespec *, req,
1014               struct timespec *, rem)
1015 #ifdef TARGET_NR_clock_nanosleep
1016 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
1017               const struct timespec *, req, struct timespec *, rem)
1018 #endif
1019 #ifdef __NR_msgsnd
1020 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
1021               int, flags)
1022 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
1023               long, msgtype, int, flags)
1024 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
1025               unsigned, nsops, const struct timespec *, timeout)
1026 #else
1027 /* This host kernel architecture uses a single ipc syscall; fake up
1028  * wrappers for the sub-operations to hide this implementation detail.
1029  * Annoyingly we can't include linux/ipc.h to get the constant definitions
1030  * for the call parameter because some structs in there conflict with the
1031  * sys/ipc.h ones. So we just define them here, and rely on them being
1032  * the same for all host architectures.
1033  */
1034 #define Q_SEMTIMEDOP 4
1035 #define Q_MSGSND 11
1036 #define Q_MSGRCV 12
1037 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1038 
1039 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1040               void *, ptr, long, fifth)
1041 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1042 {
1043     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1044 }
1045 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1046 {
1047     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1048 }
1049 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1050                            const struct timespec *timeout)
1051 {
1052     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1053                     (long)timeout);
1054 }
1055 #endif
1056 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1057 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1058               size_t, len, unsigned, prio, const struct timespec *, timeout)
1059 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1060               size_t, len, unsigned *, prio, const struct timespec *, timeout)
1061 #endif
1062 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1063  * "third argument might be integer or pointer or not present" behaviour of
1064  * the libc function.
1065  */
1066 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1067 /* Similarly for fcntl. Note that callers must always:
1068  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1069  *  use the flock64 struct rather than unsuffixed flock
1070  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1071  */
1072 #ifdef __NR_fcntl64
1073 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1074 #else
1075 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1076 #endif
1077 
1078 static inline int host_to_target_sock_type(int host_type)
1079 {
1080     int target_type;
1081 
1082     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1083     case SOCK_DGRAM:
1084         target_type = TARGET_SOCK_DGRAM;
1085         break;
1086     case SOCK_STREAM:
1087         target_type = TARGET_SOCK_STREAM;
1088         break;
1089     default:
1090         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1091         break;
1092     }
1093 
1094 #if defined(SOCK_CLOEXEC)
1095     if (host_type & SOCK_CLOEXEC) {
1096         target_type |= TARGET_SOCK_CLOEXEC;
1097     }
1098 #endif
1099 
1100 #if defined(SOCK_NONBLOCK)
1101     if (host_type & SOCK_NONBLOCK) {
1102         target_type |= TARGET_SOCK_NONBLOCK;
1103     }
1104 #endif
1105 
1106     return target_type;
1107 }
1108 
1109 static abi_ulong target_brk;
1110 static abi_ulong target_original_brk;
1111 static abi_ulong brk_page;
1112 
1113 void target_set_brk(abi_ulong new_brk)
1114 {
1115     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1116     brk_page = HOST_PAGE_ALIGN(target_brk);
1117 }
1118 
1119 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1120 #define DEBUGF_BRK(message, args...)
1121 
1122 /* do_brk() must return target values and target errnos. */
1123 abi_long do_brk(abi_ulong new_brk)
1124 {
1125     abi_long mapped_addr;
1126     abi_ulong new_alloc_size;
1127 
1128     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1129 
1130     if (!new_brk) {
1131         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1132         return target_brk;
1133     }
1134     if (new_brk < target_original_brk) {
1135         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1136                    target_brk);
1137         return target_brk;
1138     }
1139 
1140     /* If the new brk is less than the highest page reserved to the
1141      * target heap allocation, set it and we're almost done...  */
1142     if (new_brk <= brk_page) {
1143         /* Heap contents are initialized to zero, as for anonymous
1144          * mapped pages.  */
1145         if (new_brk > target_brk) {
1146             memset(g2h(target_brk), 0, new_brk - target_brk);
1147         }
1148 	target_brk = new_brk;
1149         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1150     	return target_brk;
1151     }
1152 
1153     /* We need to allocate more memory after the brk... Note that
1154      * we don't use MAP_FIXED because that will map over the top of
1155      * any existing mapping (like the one with the host libc or qemu
1156      * itself); instead we treat "mapped but at wrong address" as
1157      * a failure and unmap again.
1158      */
1159     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1160     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1161                                         PROT_READ|PROT_WRITE,
1162                                         MAP_ANON|MAP_PRIVATE, 0, 0));
1163 
1164     if (mapped_addr == brk_page) {
1165         /* Heap contents are initialized to zero, as for anonymous
1166          * mapped pages.  Technically the new pages are already
1167          * initialized to zero since they *are* anonymous mapped
1168          * pages, however we have to take care with the contents that
1169          * come from the remaining part of the previous page: it may
1170          * contains garbage data due to a previous heap usage (grown
1171          * then shrunken).  */
1172         memset(g2h(target_brk), 0, brk_page - target_brk);
1173 
1174         target_brk = new_brk;
1175         brk_page = HOST_PAGE_ALIGN(target_brk);
1176         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1177             target_brk);
1178         return target_brk;
1179     } else if (mapped_addr != -1) {
1180         /* Mapped but at wrong address, meaning there wasn't actually
1181          * enough space for this brk.
1182          */
1183         target_munmap(mapped_addr, new_alloc_size);
1184         mapped_addr = -1;
1185         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1186     }
1187     else {
1188         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1189     }
1190 
1191 #if defined(TARGET_ALPHA)
1192     /* We (partially) emulate OSF/1 on Alpha, which requires we
1193        return a proper errno, not an unchanged brk value.  */
1194     return -TARGET_ENOMEM;
1195 #endif
1196     /* For everything else, return the previous break. */
1197     return target_brk;
1198 }
1199 
1200 static inline abi_long copy_from_user_fdset(fd_set *fds,
1201                                             abi_ulong target_fds_addr,
1202                                             int n)
1203 {
1204     int i, nw, j, k;
1205     abi_ulong b, *target_fds;
1206 
1207     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1208     if (!(target_fds = lock_user(VERIFY_READ,
1209                                  target_fds_addr,
1210                                  sizeof(abi_ulong) * nw,
1211                                  1)))
1212         return -TARGET_EFAULT;
1213 
1214     FD_ZERO(fds);
1215     k = 0;
1216     for (i = 0; i < nw; i++) {
1217         /* grab the abi_ulong */
1218         __get_user(b, &target_fds[i]);
1219         for (j = 0; j < TARGET_ABI_BITS; j++) {
1220             /* check the bit inside the abi_ulong */
1221             if ((b >> j) & 1)
1222                 FD_SET(k, fds);
1223             k++;
1224         }
1225     }
1226 
1227     unlock_user(target_fds, target_fds_addr, 0);
1228 
1229     return 0;
1230 }
1231 
1232 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1233                                                  abi_ulong target_fds_addr,
1234                                                  int n)
1235 {
1236     if (target_fds_addr) {
1237         if (copy_from_user_fdset(fds, target_fds_addr, n))
1238             return -TARGET_EFAULT;
1239         *fds_ptr = fds;
1240     } else {
1241         *fds_ptr = NULL;
1242     }
1243     return 0;
1244 }
1245 
1246 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1247                                           const fd_set *fds,
1248                                           int n)
1249 {
1250     int i, nw, j, k;
1251     abi_long v;
1252     abi_ulong *target_fds;
1253 
1254     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1255     if (!(target_fds = lock_user(VERIFY_WRITE,
1256                                  target_fds_addr,
1257                                  sizeof(abi_ulong) * nw,
1258                                  0)))
1259         return -TARGET_EFAULT;
1260 
1261     k = 0;
1262     for (i = 0; i < nw; i++) {
1263         v = 0;
1264         for (j = 0; j < TARGET_ABI_BITS; j++) {
1265             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1266             k++;
1267         }
1268         __put_user(v, &target_fds[i]);
1269     }
1270 
1271     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1272 
1273     return 0;
1274 }
1275 
1276 #if defined(__alpha__)
1277 #define HOST_HZ 1024
1278 #else
1279 #define HOST_HZ 100
1280 #endif
1281 
1282 static inline abi_long host_to_target_clock_t(long ticks)
1283 {
1284 #if HOST_HZ == TARGET_HZ
1285     return ticks;
1286 #else
1287     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1288 #endif
1289 }
1290 
1291 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1292                                              const struct rusage *rusage)
1293 {
1294     struct target_rusage *target_rusage;
1295 
1296     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1297         return -TARGET_EFAULT;
1298     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1299     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1300     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1301     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1302     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1303     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1304     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1305     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1306     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1307     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1308     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1309     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1310     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1311     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1312     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1313     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1314     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1315     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1316     unlock_user_struct(target_rusage, target_addr, 1);
1317 
1318     return 0;
1319 }
1320 
1321 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1322 {
1323     abi_ulong target_rlim_swap;
1324     rlim_t result;
1325 
1326     target_rlim_swap = tswapal(target_rlim);
1327     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1328         return RLIM_INFINITY;
1329 
1330     result = target_rlim_swap;
1331     if (target_rlim_swap != (rlim_t)result)
1332         return RLIM_INFINITY;
1333 
1334     return result;
1335 }
1336 
1337 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1338 {
1339     abi_ulong target_rlim_swap;
1340     abi_ulong result;
1341 
1342     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1343         target_rlim_swap = TARGET_RLIM_INFINITY;
1344     else
1345         target_rlim_swap = rlim;
1346     result = tswapal(target_rlim_swap);
1347 
1348     return result;
1349 }
1350 
1351 static inline int target_to_host_resource(int code)
1352 {
1353     switch (code) {
1354     case TARGET_RLIMIT_AS:
1355         return RLIMIT_AS;
1356     case TARGET_RLIMIT_CORE:
1357         return RLIMIT_CORE;
1358     case TARGET_RLIMIT_CPU:
1359         return RLIMIT_CPU;
1360     case TARGET_RLIMIT_DATA:
1361         return RLIMIT_DATA;
1362     case TARGET_RLIMIT_FSIZE:
1363         return RLIMIT_FSIZE;
1364     case TARGET_RLIMIT_LOCKS:
1365         return RLIMIT_LOCKS;
1366     case TARGET_RLIMIT_MEMLOCK:
1367         return RLIMIT_MEMLOCK;
1368     case TARGET_RLIMIT_MSGQUEUE:
1369         return RLIMIT_MSGQUEUE;
1370     case TARGET_RLIMIT_NICE:
1371         return RLIMIT_NICE;
1372     case TARGET_RLIMIT_NOFILE:
1373         return RLIMIT_NOFILE;
1374     case TARGET_RLIMIT_NPROC:
1375         return RLIMIT_NPROC;
1376     case TARGET_RLIMIT_RSS:
1377         return RLIMIT_RSS;
1378     case TARGET_RLIMIT_RTPRIO:
1379         return RLIMIT_RTPRIO;
1380     case TARGET_RLIMIT_SIGPENDING:
1381         return RLIMIT_SIGPENDING;
1382     case TARGET_RLIMIT_STACK:
1383         return RLIMIT_STACK;
1384     default:
1385         return code;
1386     }
1387 }
1388 
1389 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1390                                               abi_ulong target_tv_addr)
1391 {
1392     struct target_timeval *target_tv;
1393 
1394     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1395         return -TARGET_EFAULT;
1396 
1397     __get_user(tv->tv_sec, &target_tv->tv_sec);
1398     __get_user(tv->tv_usec, &target_tv->tv_usec);
1399 
1400     unlock_user_struct(target_tv, target_tv_addr, 0);
1401 
1402     return 0;
1403 }
1404 
1405 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1406                                             const struct timeval *tv)
1407 {
1408     struct target_timeval *target_tv;
1409 
1410     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1411         return -TARGET_EFAULT;
1412 
1413     __put_user(tv->tv_sec, &target_tv->tv_sec);
1414     __put_user(tv->tv_usec, &target_tv->tv_usec);
1415 
1416     unlock_user_struct(target_tv, target_tv_addr, 1);
1417 
1418     return 0;
1419 }
1420 
1421 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1422                                                abi_ulong target_tz_addr)
1423 {
1424     struct target_timezone *target_tz;
1425 
1426     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1427         return -TARGET_EFAULT;
1428     }
1429 
1430     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1431     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1432 
1433     unlock_user_struct(target_tz, target_tz_addr, 0);
1434 
1435     return 0;
1436 }
1437 
1438 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1439 #include <mqueue.h>
1440 
1441 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1442                                               abi_ulong target_mq_attr_addr)
1443 {
1444     struct target_mq_attr *target_mq_attr;
1445 
1446     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1447                           target_mq_attr_addr, 1))
1448         return -TARGET_EFAULT;
1449 
1450     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1451     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1452     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1453     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1454 
1455     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1456 
1457     return 0;
1458 }
1459 
1460 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1461                                             const struct mq_attr *attr)
1462 {
1463     struct target_mq_attr *target_mq_attr;
1464 
1465     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1466                           target_mq_attr_addr, 0))
1467         return -TARGET_EFAULT;
1468 
1469     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1470     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1471     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1472     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1473 
1474     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1475 
1476     return 0;
1477 }
1478 #endif
1479 
1480 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1481 /* do_select() must return target values and target errnos. */
1482 static abi_long do_select(int n,
1483                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1484                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1485 {
1486     fd_set rfds, wfds, efds;
1487     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1488     struct timeval tv;
1489     struct timespec ts, *ts_ptr;
1490     abi_long ret;
1491 
1492     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1493     if (ret) {
1494         return ret;
1495     }
1496     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1497     if (ret) {
1498         return ret;
1499     }
1500     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1501     if (ret) {
1502         return ret;
1503     }
1504 
1505     if (target_tv_addr) {
1506         if (copy_from_user_timeval(&tv, target_tv_addr))
1507             return -TARGET_EFAULT;
1508         ts.tv_sec = tv.tv_sec;
1509         ts.tv_nsec = tv.tv_usec * 1000;
1510         ts_ptr = &ts;
1511     } else {
1512         ts_ptr = NULL;
1513     }
1514 
1515     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1516                                   ts_ptr, NULL));
1517 
1518     if (!is_error(ret)) {
1519         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1520             return -TARGET_EFAULT;
1521         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1522             return -TARGET_EFAULT;
1523         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1524             return -TARGET_EFAULT;
1525 
1526         if (target_tv_addr) {
1527             tv.tv_sec = ts.tv_sec;
1528             tv.tv_usec = ts.tv_nsec / 1000;
1529             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1530                 return -TARGET_EFAULT;
1531             }
1532         }
1533     }
1534 
1535     return ret;
1536 }
1537 
1538 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1539 static abi_long do_old_select(abi_ulong arg1)
1540 {
1541     struct target_sel_arg_struct *sel;
1542     abi_ulong inp, outp, exp, tvp;
1543     long nsel;
1544 
1545     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1546         return -TARGET_EFAULT;
1547     }
1548 
1549     nsel = tswapal(sel->n);
1550     inp = tswapal(sel->inp);
1551     outp = tswapal(sel->outp);
1552     exp = tswapal(sel->exp);
1553     tvp = tswapal(sel->tvp);
1554 
1555     unlock_user_struct(sel, arg1, 0);
1556 
1557     return do_select(nsel, inp, outp, exp, tvp);
1558 }
1559 #endif
1560 #endif
1561 
1562 static abi_long do_pipe2(int host_pipe[], int flags)
1563 {
1564 #ifdef CONFIG_PIPE2
1565     return pipe2(host_pipe, flags);
1566 #else
1567     return -ENOSYS;
1568 #endif
1569 }
1570 
1571 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1572                         int flags, int is_pipe2)
1573 {
1574     int host_pipe[2];
1575     abi_long ret;
1576     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1577 
1578     if (is_error(ret))
1579         return get_errno(ret);
1580 
1581     /* Several targets have special calling conventions for the original
1582        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1583     if (!is_pipe2) {
1584 #if defined(TARGET_ALPHA)
1585         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1586         return host_pipe[0];
1587 #elif defined(TARGET_MIPS)
1588         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1589         return host_pipe[0];
1590 #elif defined(TARGET_SH4)
1591         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1592         return host_pipe[0];
1593 #elif defined(TARGET_SPARC)
1594         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1595         return host_pipe[0];
1596 #endif
1597     }
1598 
1599     if (put_user_s32(host_pipe[0], pipedes)
1600         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1601         return -TARGET_EFAULT;
1602     return get_errno(ret);
1603 }
1604 
1605 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1606                                               abi_ulong target_addr,
1607                                               socklen_t len)
1608 {
1609     struct target_ip_mreqn *target_smreqn;
1610 
1611     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1612     if (!target_smreqn)
1613         return -TARGET_EFAULT;
1614     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1615     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1616     if (len == sizeof(struct target_ip_mreqn))
1617         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1618     unlock_user(target_smreqn, target_addr, 0);
1619 
1620     return 0;
1621 }
1622 
1623 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1624                                                abi_ulong target_addr,
1625                                                socklen_t len)
1626 {
1627     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1628     sa_family_t sa_family;
1629     struct target_sockaddr *target_saddr;
1630 
1631     if (fd_trans_target_to_host_addr(fd)) {
1632         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1633     }
1634 
1635     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1636     if (!target_saddr)
1637         return -TARGET_EFAULT;
1638 
1639     sa_family = tswap16(target_saddr->sa_family);
1640 
1641     /* Oops. The caller might send a incomplete sun_path; sun_path
1642      * must be terminated by \0 (see the manual page), but
1643      * unfortunately it is quite common to specify sockaddr_un
1644      * length as "strlen(x->sun_path)" while it should be
1645      * "strlen(...) + 1". We'll fix that here if needed.
1646      * Linux kernel has a similar feature.
1647      */
1648 
1649     if (sa_family == AF_UNIX) {
1650         if (len < unix_maxlen && len > 0) {
1651             char *cp = (char*)target_saddr;
1652 
1653             if ( cp[len-1] && !cp[len] )
1654                 len++;
1655         }
1656         if (len > unix_maxlen)
1657             len = unix_maxlen;
1658     }
1659 
1660     memcpy(addr, target_saddr, len);
1661     addr->sa_family = sa_family;
1662     if (sa_family == AF_NETLINK) {
1663         struct sockaddr_nl *nladdr;
1664 
1665         nladdr = (struct sockaddr_nl *)addr;
1666         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1667         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1668     } else if (sa_family == AF_PACKET) {
1669 	struct target_sockaddr_ll *lladdr;
1670 
1671 	lladdr = (struct target_sockaddr_ll *)addr;
1672 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1673 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1674     }
1675     unlock_user(target_saddr, target_addr, 0);
1676 
1677     return 0;
1678 }
1679 
1680 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1681                                                struct sockaddr *addr,
1682                                                socklen_t len)
1683 {
1684     struct target_sockaddr *target_saddr;
1685 
1686     if (len == 0) {
1687         return 0;
1688     }
1689     assert(addr);
1690 
1691     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1692     if (!target_saddr)
1693         return -TARGET_EFAULT;
1694     memcpy(target_saddr, addr, len);
1695     if (len >= offsetof(struct target_sockaddr, sa_family) +
1696         sizeof(target_saddr->sa_family)) {
1697         target_saddr->sa_family = tswap16(addr->sa_family);
1698     }
1699     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1700         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1701         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1702         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1703     } else if (addr->sa_family == AF_PACKET) {
1704         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1705         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1706         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1707     } else if (addr->sa_family == AF_INET6 &&
1708                len >= sizeof(struct target_sockaddr_in6)) {
1709         struct target_sockaddr_in6 *target_in6 =
1710                (struct target_sockaddr_in6 *)target_saddr;
1711         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1712     }
1713     unlock_user(target_saddr, target_addr, len);
1714 
1715     return 0;
1716 }
1717 
1718 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1719                                            struct target_msghdr *target_msgh)
1720 {
1721     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1722     abi_long msg_controllen;
1723     abi_ulong target_cmsg_addr;
1724     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1725     socklen_t space = 0;
1726 
1727     msg_controllen = tswapal(target_msgh->msg_controllen);
1728     if (msg_controllen < sizeof (struct target_cmsghdr))
1729         goto the_end;
1730     target_cmsg_addr = tswapal(target_msgh->msg_control);
1731     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1732     target_cmsg_start = target_cmsg;
1733     if (!target_cmsg)
1734         return -TARGET_EFAULT;
1735 
1736     while (cmsg && target_cmsg) {
1737         void *data = CMSG_DATA(cmsg);
1738         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1739 
1740         int len = tswapal(target_cmsg->cmsg_len)
1741             - sizeof(struct target_cmsghdr);
1742 
1743         space += CMSG_SPACE(len);
1744         if (space > msgh->msg_controllen) {
1745             space -= CMSG_SPACE(len);
1746             /* This is a QEMU bug, since we allocated the payload
1747              * area ourselves (unlike overflow in host-to-target
1748              * conversion, which is just the guest giving us a buffer
1749              * that's too small). It can't happen for the payload types
1750              * we currently support; if it becomes an issue in future
1751              * we would need to improve our allocation strategy to
1752              * something more intelligent than "twice the size of the
1753              * target buffer we're reading from".
1754              */
1755             gemu_log("Host cmsg overflow\n");
1756             break;
1757         }
1758 
1759         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1760             cmsg->cmsg_level = SOL_SOCKET;
1761         } else {
1762             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1763         }
1764         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1765         cmsg->cmsg_len = CMSG_LEN(len);
1766 
1767         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1768             int *fd = (int *)data;
1769             int *target_fd = (int *)target_data;
1770             int i, numfds = len / sizeof(int);
1771 
1772             for (i = 0; i < numfds; i++) {
1773                 __get_user(fd[i], target_fd + i);
1774             }
1775         } else if (cmsg->cmsg_level == SOL_SOCKET
1776                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1777             struct ucred *cred = (struct ucred *)data;
1778             struct target_ucred *target_cred =
1779                 (struct target_ucred *)target_data;
1780 
1781             __get_user(cred->pid, &target_cred->pid);
1782             __get_user(cred->uid, &target_cred->uid);
1783             __get_user(cred->gid, &target_cred->gid);
1784         } else {
1785             gemu_log("Unsupported ancillary data: %d/%d\n",
1786                                         cmsg->cmsg_level, cmsg->cmsg_type);
1787             memcpy(data, target_data, len);
1788         }
1789 
1790         cmsg = CMSG_NXTHDR(msgh, cmsg);
1791         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1792                                          target_cmsg_start);
1793     }
1794     unlock_user(target_cmsg, target_cmsg_addr, 0);
1795  the_end:
1796     msgh->msg_controllen = space;
1797     return 0;
1798 }
1799 
1800 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1801                                            struct msghdr *msgh)
1802 {
1803     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1804     abi_long msg_controllen;
1805     abi_ulong target_cmsg_addr;
1806     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1807     socklen_t space = 0;
1808 
1809     msg_controllen = tswapal(target_msgh->msg_controllen);
1810     if (msg_controllen < sizeof (struct target_cmsghdr))
1811         goto the_end;
1812     target_cmsg_addr = tswapal(target_msgh->msg_control);
1813     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1814     target_cmsg_start = target_cmsg;
1815     if (!target_cmsg)
1816         return -TARGET_EFAULT;
1817 
1818     while (cmsg && target_cmsg) {
1819         void *data = CMSG_DATA(cmsg);
1820         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1821 
1822         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1823         int tgt_len, tgt_space;
1824 
1825         /* We never copy a half-header but may copy half-data;
1826          * this is Linux's behaviour in put_cmsg(). Note that
1827          * truncation here is a guest problem (which we report
1828          * to the guest via the CTRUNC bit), unlike truncation
1829          * in target_to_host_cmsg, which is a QEMU bug.
1830          */
1831         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1832             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1833             break;
1834         }
1835 
1836         if (cmsg->cmsg_level == SOL_SOCKET) {
1837             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1838         } else {
1839             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1840         }
1841         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1842 
1843         /* Payload types which need a different size of payload on
1844          * the target must adjust tgt_len here.
1845          */
1846         tgt_len = len;
1847         switch (cmsg->cmsg_level) {
1848         case SOL_SOCKET:
1849             switch (cmsg->cmsg_type) {
1850             case SO_TIMESTAMP:
1851                 tgt_len = sizeof(struct target_timeval);
1852                 break;
1853             default:
1854                 break;
1855             }
1856             break;
1857         default:
1858             break;
1859         }
1860 
1861         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1862             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1863             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1864         }
1865 
1866         /* We must now copy-and-convert len bytes of payload
1867          * into tgt_len bytes of destination space. Bear in mind
1868          * that in both source and destination we may be dealing
1869          * with a truncated value!
1870          */
1871         switch (cmsg->cmsg_level) {
1872         case SOL_SOCKET:
1873             switch (cmsg->cmsg_type) {
1874             case SCM_RIGHTS:
1875             {
1876                 int *fd = (int *)data;
1877                 int *target_fd = (int *)target_data;
1878                 int i, numfds = tgt_len / sizeof(int);
1879 
1880                 for (i = 0; i < numfds; i++) {
1881                     __put_user(fd[i], target_fd + i);
1882                 }
1883                 break;
1884             }
1885             case SO_TIMESTAMP:
1886             {
1887                 struct timeval *tv = (struct timeval *)data;
1888                 struct target_timeval *target_tv =
1889                     (struct target_timeval *)target_data;
1890 
1891                 if (len != sizeof(struct timeval) ||
1892                     tgt_len != sizeof(struct target_timeval)) {
1893                     goto unimplemented;
1894                 }
1895 
1896                 /* copy struct timeval to target */
1897                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1898                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1899                 break;
1900             }
1901             case SCM_CREDENTIALS:
1902             {
1903                 struct ucred *cred = (struct ucred *)data;
1904                 struct target_ucred *target_cred =
1905                     (struct target_ucred *)target_data;
1906 
1907                 __put_user(cred->pid, &target_cred->pid);
1908                 __put_user(cred->uid, &target_cred->uid);
1909                 __put_user(cred->gid, &target_cred->gid);
1910                 break;
1911             }
1912             default:
1913                 goto unimplemented;
1914             }
1915             break;
1916 
1917         case SOL_IP:
1918             switch (cmsg->cmsg_type) {
1919             case IP_TTL:
1920             {
1921                 uint32_t *v = (uint32_t *)data;
1922                 uint32_t *t_int = (uint32_t *)target_data;
1923 
1924                 if (len != sizeof(uint32_t) ||
1925                     tgt_len != sizeof(uint32_t)) {
1926                     goto unimplemented;
1927                 }
1928                 __put_user(*v, t_int);
1929                 break;
1930             }
1931             case IP_RECVERR:
1932             {
1933                 struct errhdr_t {
1934                    struct sock_extended_err ee;
1935                    struct sockaddr_in offender;
1936                 };
1937                 struct errhdr_t *errh = (struct errhdr_t *)data;
1938                 struct errhdr_t *target_errh =
1939                     (struct errhdr_t *)target_data;
1940 
1941                 if (len != sizeof(struct errhdr_t) ||
1942                     tgt_len != sizeof(struct errhdr_t)) {
1943                     goto unimplemented;
1944                 }
1945                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1946                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1947                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1948                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1949                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1950                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1951                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1952                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1953                     (void *) &errh->offender, sizeof(errh->offender));
1954                 break;
1955             }
1956             default:
1957                 goto unimplemented;
1958             }
1959             break;
1960 
1961         case SOL_IPV6:
1962             switch (cmsg->cmsg_type) {
1963             case IPV6_HOPLIMIT:
1964             {
1965                 uint32_t *v = (uint32_t *)data;
1966                 uint32_t *t_int = (uint32_t *)target_data;
1967 
1968                 if (len != sizeof(uint32_t) ||
1969                     tgt_len != sizeof(uint32_t)) {
1970                     goto unimplemented;
1971                 }
1972                 __put_user(*v, t_int);
1973                 break;
1974             }
1975             case IPV6_RECVERR:
1976             {
1977                 struct errhdr6_t {
1978                    struct sock_extended_err ee;
1979                    struct sockaddr_in6 offender;
1980                 };
1981                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1982                 struct errhdr6_t *target_errh =
1983                     (struct errhdr6_t *)target_data;
1984 
1985                 if (len != sizeof(struct errhdr6_t) ||
1986                     tgt_len != sizeof(struct errhdr6_t)) {
1987                     goto unimplemented;
1988                 }
1989                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1990                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1991                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1992                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1993                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1994                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1995                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1996                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1997                     (void *) &errh->offender, sizeof(errh->offender));
1998                 break;
1999             }
2000             default:
2001                 goto unimplemented;
2002             }
2003             break;
2004 
2005         default:
2006         unimplemented:
2007             gemu_log("Unsupported ancillary data: %d/%d\n",
2008                                         cmsg->cmsg_level, cmsg->cmsg_type);
2009             memcpy(target_data, data, MIN(len, tgt_len));
2010             if (tgt_len > len) {
2011                 memset(target_data + len, 0, tgt_len - len);
2012             }
2013         }
2014 
2015         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2016         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2017         if (msg_controllen < tgt_space) {
2018             tgt_space = msg_controllen;
2019         }
2020         msg_controllen -= tgt_space;
2021         space += tgt_space;
2022         cmsg = CMSG_NXTHDR(msgh, cmsg);
2023         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2024                                          target_cmsg_start);
2025     }
2026     unlock_user(target_cmsg, target_cmsg_addr, space);
2027  the_end:
2028     target_msgh->msg_controllen = tswapal(space);
2029     return 0;
2030 }
2031 
2032 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2033 {
2034     nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2035     nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2036     nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2037     nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2038     nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2039 }
2040 
2041 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2042                                               size_t len,
2043                                               abi_long (*host_to_target_nlmsg)
2044                                                        (struct nlmsghdr *))
2045 {
2046     uint32_t nlmsg_len;
2047     abi_long ret;
2048 
2049     while (len > sizeof(struct nlmsghdr)) {
2050 
2051         nlmsg_len = nlh->nlmsg_len;
2052         if (nlmsg_len < sizeof(struct nlmsghdr) ||
2053             nlmsg_len > len) {
2054             break;
2055         }
2056 
2057         switch (nlh->nlmsg_type) {
2058         case NLMSG_DONE:
2059             tswap_nlmsghdr(nlh);
2060             return 0;
2061         case NLMSG_NOOP:
2062             break;
2063         case NLMSG_ERROR:
2064         {
2065             struct nlmsgerr *e = NLMSG_DATA(nlh);
2066             e->error = tswap32(e->error);
2067             tswap_nlmsghdr(&e->msg);
2068             tswap_nlmsghdr(nlh);
2069             return 0;
2070         }
2071         default:
2072             ret = host_to_target_nlmsg(nlh);
2073             if (ret < 0) {
2074                 tswap_nlmsghdr(nlh);
2075                 return ret;
2076             }
2077             break;
2078         }
2079         tswap_nlmsghdr(nlh);
2080         len -= NLMSG_ALIGN(nlmsg_len);
2081         nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2082     }
2083     return 0;
2084 }
2085 
2086 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2087                                               size_t len,
2088                                               abi_long (*target_to_host_nlmsg)
2089                                                        (struct nlmsghdr *))
2090 {
2091     int ret;
2092 
2093     while (len > sizeof(struct nlmsghdr)) {
2094         if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2095             tswap32(nlh->nlmsg_len) > len) {
2096             break;
2097         }
2098         tswap_nlmsghdr(nlh);
2099         switch (nlh->nlmsg_type) {
2100         case NLMSG_DONE:
2101             return 0;
2102         case NLMSG_NOOP:
2103             break;
2104         case NLMSG_ERROR:
2105         {
2106             struct nlmsgerr *e = NLMSG_DATA(nlh);
2107             e->error = tswap32(e->error);
2108             tswap_nlmsghdr(&e->msg);
2109             return 0;
2110         }
2111         default:
2112             ret = target_to_host_nlmsg(nlh);
2113             if (ret < 0) {
2114                 return ret;
2115             }
2116         }
2117         len -= NLMSG_ALIGN(nlh->nlmsg_len);
2118         nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2119     }
2120     return 0;
2121 }
2122 
2123 #ifdef CONFIG_RTNETLINK
2124 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2125                                                size_t len, void *context,
2126                                                abi_long (*host_to_target_nlattr)
2127                                                         (struct nlattr *,
2128                                                          void *context))
2129 {
2130     unsigned short nla_len;
2131     abi_long ret;
2132 
2133     while (len > sizeof(struct nlattr)) {
2134         nla_len = nlattr->nla_len;
2135         if (nla_len < sizeof(struct nlattr) ||
2136             nla_len > len) {
2137             break;
2138         }
2139         ret = host_to_target_nlattr(nlattr, context);
2140         nlattr->nla_len = tswap16(nlattr->nla_len);
2141         nlattr->nla_type = tswap16(nlattr->nla_type);
2142         if (ret < 0) {
2143             return ret;
2144         }
2145         len -= NLA_ALIGN(nla_len);
2146         nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2147     }
2148     return 0;
2149 }
2150 
2151 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2152                                                size_t len,
2153                                                abi_long (*host_to_target_rtattr)
2154                                                         (struct rtattr *))
2155 {
2156     unsigned short rta_len;
2157     abi_long ret;
2158 
2159     while (len > sizeof(struct rtattr)) {
2160         rta_len = rtattr->rta_len;
2161         if (rta_len < sizeof(struct rtattr) ||
2162             rta_len > len) {
2163             break;
2164         }
2165         ret = host_to_target_rtattr(rtattr);
2166         rtattr->rta_len = tswap16(rtattr->rta_len);
2167         rtattr->rta_type = tswap16(rtattr->rta_type);
2168         if (ret < 0) {
2169             return ret;
2170         }
2171         len -= RTA_ALIGN(rta_len);
2172         rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2173     }
2174     return 0;
2175 }
2176 
2177 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2178 
2179 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2180                                                   void *context)
2181 {
2182     uint16_t *u16;
2183     uint32_t *u32;
2184     uint64_t *u64;
2185 
2186     switch (nlattr->nla_type) {
2187     /* no data */
2188     case QEMU_IFLA_BR_FDB_FLUSH:
2189         break;
2190     /* binary */
2191     case QEMU_IFLA_BR_GROUP_ADDR:
2192         break;
2193     /* uint8_t */
2194     case QEMU_IFLA_BR_VLAN_FILTERING:
2195     case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2196     case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2197     case QEMU_IFLA_BR_MCAST_ROUTER:
2198     case QEMU_IFLA_BR_MCAST_SNOOPING:
2199     case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2200     case QEMU_IFLA_BR_MCAST_QUERIER:
2201     case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2202     case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2203     case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2204     case QEMU_IFLA_BR_VLAN_STATS_ENABLED:
2205     case QEMU_IFLA_BR_MCAST_STATS_ENABLED:
2206     case QEMU_IFLA_BR_MCAST_IGMP_VERSION:
2207     case QEMU_IFLA_BR_MCAST_MLD_VERSION:
2208         break;
2209     /* uint16_t */
2210     case QEMU_IFLA_BR_PRIORITY:
2211     case QEMU_IFLA_BR_VLAN_PROTOCOL:
2212     case QEMU_IFLA_BR_GROUP_FWD_MASK:
2213     case QEMU_IFLA_BR_ROOT_PORT:
2214     case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2215         u16 = NLA_DATA(nlattr);
2216         *u16 = tswap16(*u16);
2217         break;
2218     /* uint32_t */
2219     case QEMU_IFLA_BR_FORWARD_DELAY:
2220     case QEMU_IFLA_BR_HELLO_TIME:
2221     case QEMU_IFLA_BR_MAX_AGE:
2222     case QEMU_IFLA_BR_AGEING_TIME:
2223     case QEMU_IFLA_BR_STP_STATE:
2224     case QEMU_IFLA_BR_ROOT_PATH_COST:
2225     case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2226     case QEMU_IFLA_BR_MCAST_HASH_MAX:
2227     case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2228     case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2229         u32 = NLA_DATA(nlattr);
2230         *u32 = tswap32(*u32);
2231         break;
2232     /* uint64_t */
2233     case QEMU_IFLA_BR_HELLO_TIMER:
2234     case QEMU_IFLA_BR_TCN_TIMER:
2235     case QEMU_IFLA_BR_GC_TIMER:
2236     case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2237     case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2238     case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2239     case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2240     case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2241     case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2242     case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2243         u64 = NLA_DATA(nlattr);
2244         *u64 = tswap64(*u64);
2245         break;
2246     /* ifla_bridge_id: uin8_t[] */
2247     case QEMU_IFLA_BR_ROOT_ID:
2248     case QEMU_IFLA_BR_BRIDGE_ID:
2249         break;
2250     default:
2251         gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2252         break;
2253     }
2254     return 0;
2255 }
2256 
2257 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2258                                                         void *context)
2259 {
2260     uint16_t *u16;
2261     uint32_t *u32;
2262     uint64_t *u64;
2263 
2264     switch (nlattr->nla_type) {
2265     /* uint8_t */
2266     case QEMU_IFLA_BRPORT_STATE:
2267     case QEMU_IFLA_BRPORT_MODE:
2268     case QEMU_IFLA_BRPORT_GUARD:
2269     case QEMU_IFLA_BRPORT_PROTECT:
2270     case QEMU_IFLA_BRPORT_FAST_LEAVE:
2271     case QEMU_IFLA_BRPORT_LEARNING:
2272     case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2273     case QEMU_IFLA_BRPORT_PROXYARP:
2274     case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2275     case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2276     case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2277     case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2278     case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2279     case QEMU_IFLA_BRPORT_MCAST_FLOOD:
2280     case QEMU_IFLA_BRPORT_MCAST_TO_UCAST:
2281     case QEMU_IFLA_BRPORT_VLAN_TUNNEL:
2282     case QEMU_IFLA_BRPORT_BCAST_FLOOD:
2283     case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS:
2284         break;
2285     /* uint16_t */
2286     case QEMU_IFLA_BRPORT_PRIORITY:
2287     case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2288     case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2289     case QEMU_IFLA_BRPORT_ID:
2290     case QEMU_IFLA_BRPORT_NO:
2291     case QEMU_IFLA_BRPORT_GROUP_FWD_MASK:
2292         u16 = NLA_DATA(nlattr);
2293         *u16 = tswap16(*u16);
2294         break;
2295     /* uin32_t */
2296     case QEMU_IFLA_BRPORT_COST:
2297         u32 = NLA_DATA(nlattr);
2298         *u32 = tswap32(*u32);
2299         break;
2300     /* uint64_t */
2301     case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2302     case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2303     case QEMU_IFLA_BRPORT_HOLD_TIMER:
2304         u64 = NLA_DATA(nlattr);
2305         *u64 = tswap64(*u64);
2306         break;
2307     /* ifla_bridge_id: uint8_t[] */
2308     case QEMU_IFLA_BRPORT_ROOT_ID:
2309     case QEMU_IFLA_BRPORT_BRIDGE_ID:
2310         break;
2311     default:
2312         gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2313         break;
2314     }
2315     return 0;
2316 }
2317 
2318 struct linkinfo_context {
2319     int len;
2320     char *name;
2321     int slave_len;
2322     char *slave_name;
2323 };
2324 
2325 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2326                                                     void *context)
2327 {
2328     struct linkinfo_context *li_context = context;
2329 
2330     switch (nlattr->nla_type) {
2331     /* string */
2332     case QEMU_IFLA_INFO_KIND:
2333         li_context->name = NLA_DATA(nlattr);
2334         li_context->len = nlattr->nla_len - NLA_HDRLEN;
2335         break;
2336     case QEMU_IFLA_INFO_SLAVE_KIND:
2337         li_context->slave_name = NLA_DATA(nlattr);
2338         li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2339         break;
2340     /* stats */
2341     case QEMU_IFLA_INFO_XSTATS:
2342         /* FIXME: only used by CAN */
2343         break;
2344     /* nested */
2345     case QEMU_IFLA_INFO_DATA:
2346         if (strncmp(li_context->name, "bridge",
2347                     li_context->len) == 0) {
2348             return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2349                                                   nlattr->nla_len,
2350                                                   NULL,
2351                                              host_to_target_data_bridge_nlattr);
2352         } else {
2353             gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2354         }
2355         break;
2356     case QEMU_IFLA_INFO_SLAVE_DATA:
2357         if (strncmp(li_context->slave_name, "bridge",
2358                     li_context->slave_len) == 0) {
2359             return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2360                                                   nlattr->nla_len,
2361                                                   NULL,
2362                                        host_to_target_slave_data_bridge_nlattr);
2363         } else {
2364             gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2365                      li_context->slave_name);
2366         }
2367         break;
2368     default:
2369         gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2370         break;
2371     }
2372 
2373     return 0;
2374 }
2375 
2376 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2377                                                 void *context)
2378 {
2379     uint32_t *u32;
2380     int i;
2381 
2382     switch (nlattr->nla_type) {
2383     case QEMU_IFLA_INET_CONF:
2384         u32 = NLA_DATA(nlattr);
2385         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2386              i++) {
2387             u32[i] = tswap32(u32[i]);
2388         }
2389         break;
2390     default:
2391         gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2392     }
2393     return 0;
2394 }
2395 
2396 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2397                                                 void *context)
2398 {
2399     uint32_t *u32;
2400     uint64_t *u64;
2401     struct ifla_cacheinfo *ci;
2402     int i;
2403 
2404     switch (nlattr->nla_type) {
2405     /* binaries */
2406     case QEMU_IFLA_INET6_TOKEN:
2407         break;
2408     /* uint8_t */
2409     case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2410         break;
2411     /* uint32_t */
2412     case QEMU_IFLA_INET6_FLAGS:
2413         u32 = NLA_DATA(nlattr);
2414         *u32 = tswap32(*u32);
2415         break;
2416     /* uint32_t[] */
2417     case QEMU_IFLA_INET6_CONF:
2418         u32 = NLA_DATA(nlattr);
2419         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2420              i++) {
2421             u32[i] = tswap32(u32[i]);
2422         }
2423         break;
2424     /* ifla_cacheinfo */
2425     case QEMU_IFLA_INET6_CACHEINFO:
2426         ci = NLA_DATA(nlattr);
2427         ci->max_reasm_len = tswap32(ci->max_reasm_len);
2428         ci->tstamp = tswap32(ci->tstamp);
2429         ci->reachable_time = tswap32(ci->reachable_time);
2430         ci->retrans_time = tswap32(ci->retrans_time);
2431         break;
2432     /* uint64_t[] */
2433     case QEMU_IFLA_INET6_STATS:
2434     case QEMU_IFLA_INET6_ICMP6STATS:
2435         u64 = NLA_DATA(nlattr);
2436         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2437              i++) {
2438             u64[i] = tswap64(u64[i]);
2439         }
2440         break;
2441     default:
2442         gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2443     }
2444     return 0;
2445 }
2446 
2447 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2448                                                     void *context)
2449 {
2450     switch (nlattr->nla_type) {
2451     case AF_INET:
2452         return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2453                                               NULL,
2454                                              host_to_target_data_inet_nlattr);
2455     case AF_INET6:
2456         return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2457                                               NULL,
2458                                              host_to_target_data_inet6_nlattr);
2459     default:
2460         gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2461         break;
2462     }
2463     return 0;
2464 }
2465 
2466 static abi_long host_to_target_data_xdp_nlattr(struct nlattr *nlattr,
2467                                                void *context)
2468 {
2469     uint32_t *u32;
2470 
2471     switch (nlattr->nla_type) {
2472     /* uint8_t */
2473     case QEMU_IFLA_XDP_ATTACHED:
2474         break;
2475     /* uint32_t */
2476     case QEMU_IFLA_XDP_PROG_ID:
2477         u32 = NLA_DATA(nlattr);
2478         *u32 = tswap32(*u32);
2479         break;
2480     default:
2481         gemu_log("Unknown host XDP type: %d\n", nlattr->nla_type);
2482         break;
2483     }
2484     return 0;
2485 }
2486 
2487 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2488 {
2489     uint32_t *u32;
2490     struct rtnl_link_stats *st;
2491     struct rtnl_link_stats64 *st64;
2492     struct rtnl_link_ifmap *map;
2493     struct linkinfo_context li_context;
2494 
2495     switch (rtattr->rta_type) {
2496     /* binary stream */
2497     case QEMU_IFLA_ADDRESS:
2498     case QEMU_IFLA_BROADCAST:
2499     /* string */
2500     case QEMU_IFLA_IFNAME:
2501     case QEMU_IFLA_QDISC:
2502         break;
2503     /* uin8_t */
2504     case QEMU_IFLA_OPERSTATE:
2505     case QEMU_IFLA_LINKMODE:
2506     case QEMU_IFLA_CARRIER:
2507     case QEMU_IFLA_PROTO_DOWN:
2508         break;
2509     /* uint32_t */
2510     case QEMU_IFLA_MTU:
2511     case QEMU_IFLA_LINK:
2512     case QEMU_IFLA_WEIGHT:
2513     case QEMU_IFLA_TXQLEN:
2514     case QEMU_IFLA_CARRIER_CHANGES:
2515     case QEMU_IFLA_NUM_RX_QUEUES:
2516     case QEMU_IFLA_NUM_TX_QUEUES:
2517     case QEMU_IFLA_PROMISCUITY:
2518     case QEMU_IFLA_EXT_MASK:
2519     case QEMU_IFLA_LINK_NETNSID:
2520     case QEMU_IFLA_GROUP:
2521     case QEMU_IFLA_MASTER:
2522     case QEMU_IFLA_NUM_VF:
2523     case QEMU_IFLA_GSO_MAX_SEGS:
2524     case QEMU_IFLA_GSO_MAX_SIZE:
2525     case QEMU_IFLA_CARRIER_UP_COUNT:
2526     case QEMU_IFLA_CARRIER_DOWN_COUNT:
2527         u32 = RTA_DATA(rtattr);
2528         *u32 = tswap32(*u32);
2529         break;
2530     /* struct rtnl_link_stats */
2531     case QEMU_IFLA_STATS:
2532         st = RTA_DATA(rtattr);
2533         st->rx_packets = tswap32(st->rx_packets);
2534         st->tx_packets = tswap32(st->tx_packets);
2535         st->rx_bytes = tswap32(st->rx_bytes);
2536         st->tx_bytes = tswap32(st->tx_bytes);
2537         st->rx_errors = tswap32(st->rx_errors);
2538         st->tx_errors = tswap32(st->tx_errors);
2539         st->rx_dropped = tswap32(st->rx_dropped);
2540         st->tx_dropped = tswap32(st->tx_dropped);
2541         st->multicast = tswap32(st->multicast);
2542         st->collisions = tswap32(st->collisions);
2543 
2544         /* detailed rx_errors: */
2545         st->rx_length_errors = tswap32(st->rx_length_errors);
2546         st->rx_over_errors = tswap32(st->rx_over_errors);
2547         st->rx_crc_errors = tswap32(st->rx_crc_errors);
2548         st->rx_frame_errors = tswap32(st->rx_frame_errors);
2549         st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2550         st->rx_missed_errors = tswap32(st->rx_missed_errors);
2551 
2552         /* detailed tx_errors */
2553         st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2554         st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2555         st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2556         st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2557         st->tx_window_errors = tswap32(st->tx_window_errors);
2558 
2559         /* for cslip etc */
2560         st->rx_compressed = tswap32(st->rx_compressed);
2561         st->tx_compressed = tswap32(st->tx_compressed);
2562         break;
2563     /* struct rtnl_link_stats64 */
2564     case QEMU_IFLA_STATS64:
2565         st64 = RTA_DATA(rtattr);
2566         st64->rx_packets = tswap64(st64->rx_packets);
2567         st64->tx_packets = tswap64(st64->tx_packets);
2568         st64->rx_bytes = tswap64(st64->rx_bytes);
2569         st64->tx_bytes = tswap64(st64->tx_bytes);
2570         st64->rx_errors = tswap64(st64->rx_errors);
2571         st64->tx_errors = tswap64(st64->tx_errors);
2572         st64->rx_dropped = tswap64(st64->rx_dropped);
2573         st64->tx_dropped = tswap64(st64->tx_dropped);
2574         st64->multicast = tswap64(st64->multicast);
2575         st64->collisions = tswap64(st64->collisions);
2576 
2577         /* detailed rx_errors: */
2578         st64->rx_length_errors = tswap64(st64->rx_length_errors);
2579         st64->rx_over_errors = tswap64(st64->rx_over_errors);
2580         st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2581         st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2582         st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2583         st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2584 
2585         /* detailed tx_errors */
2586         st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2587         st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2588         st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2589         st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2590         st64->tx_window_errors = tswap64(st64->tx_window_errors);
2591 
2592         /* for cslip etc */
2593         st64->rx_compressed = tswap64(st64->rx_compressed);
2594         st64->tx_compressed = tswap64(st64->tx_compressed);
2595         break;
2596     /* struct rtnl_link_ifmap */
2597     case QEMU_IFLA_MAP:
2598         map = RTA_DATA(rtattr);
2599         map->mem_start = tswap64(map->mem_start);
2600         map->mem_end = tswap64(map->mem_end);
2601         map->base_addr = tswap64(map->base_addr);
2602         map->irq = tswap16(map->irq);
2603         break;
2604     /* nested */
2605     case QEMU_IFLA_LINKINFO:
2606         memset(&li_context, 0, sizeof(li_context));
2607         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2608                                               &li_context,
2609                                            host_to_target_data_linkinfo_nlattr);
2610     case QEMU_IFLA_AF_SPEC:
2611         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2612                                               NULL,
2613                                              host_to_target_data_spec_nlattr);
2614     case QEMU_IFLA_XDP:
2615         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2616                                               NULL,
2617                                                 host_to_target_data_xdp_nlattr);
2618     default:
2619         gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2620         break;
2621     }
2622     return 0;
2623 }
2624 
2625 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2626 {
2627     uint32_t *u32;
2628     struct ifa_cacheinfo *ci;
2629 
2630     switch (rtattr->rta_type) {
2631     /* binary: depends on family type */
2632     case IFA_ADDRESS:
2633     case IFA_LOCAL:
2634         break;
2635     /* string */
2636     case IFA_LABEL:
2637         break;
2638     /* u32 */
2639     case IFA_FLAGS:
2640     case IFA_BROADCAST:
2641         u32 = RTA_DATA(rtattr);
2642         *u32 = tswap32(*u32);
2643         break;
2644     /* struct ifa_cacheinfo */
2645     case IFA_CACHEINFO:
2646         ci = RTA_DATA(rtattr);
2647         ci->ifa_prefered = tswap32(ci->ifa_prefered);
2648         ci->ifa_valid = tswap32(ci->ifa_valid);
2649         ci->cstamp = tswap32(ci->cstamp);
2650         ci->tstamp = tswap32(ci->tstamp);
2651         break;
2652     default:
2653         gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2654         break;
2655     }
2656     return 0;
2657 }
2658 
2659 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2660 {
2661     uint32_t *u32;
2662     switch (rtattr->rta_type) {
2663     /* binary: depends on family type */
2664     case RTA_GATEWAY:
2665     case RTA_DST:
2666     case RTA_PREFSRC:
2667         break;
2668     /* u32 */
2669     case RTA_PRIORITY:
2670     case RTA_TABLE:
2671     case RTA_OIF:
2672         u32 = RTA_DATA(rtattr);
2673         *u32 = tswap32(*u32);
2674         break;
2675     default:
2676         gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2677         break;
2678     }
2679     return 0;
2680 }
2681 
2682 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2683                                          uint32_t rtattr_len)
2684 {
2685     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2686                                           host_to_target_data_link_rtattr);
2687 }
2688 
2689 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2690                                          uint32_t rtattr_len)
2691 {
2692     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2693                                           host_to_target_data_addr_rtattr);
2694 }
2695 
2696 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2697                                          uint32_t rtattr_len)
2698 {
2699     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2700                                           host_to_target_data_route_rtattr);
2701 }
2702 
2703 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2704 {
2705     uint32_t nlmsg_len;
2706     struct ifinfomsg *ifi;
2707     struct ifaddrmsg *ifa;
2708     struct rtmsg *rtm;
2709 
2710     nlmsg_len = nlh->nlmsg_len;
2711     switch (nlh->nlmsg_type) {
2712     case RTM_NEWLINK:
2713     case RTM_DELLINK:
2714     case RTM_GETLINK:
2715         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2716             ifi = NLMSG_DATA(nlh);
2717             ifi->ifi_type = tswap16(ifi->ifi_type);
2718             ifi->ifi_index = tswap32(ifi->ifi_index);
2719             ifi->ifi_flags = tswap32(ifi->ifi_flags);
2720             ifi->ifi_change = tswap32(ifi->ifi_change);
2721             host_to_target_link_rtattr(IFLA_RTA(ifi),
2722                                        nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2723         }
2724         break;
2725     case RTM_NEWADDR:
2726     case RTM_DELADDR:
2727     case RTM_GETADDR:
2728         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2729             ifa = NLMSG_DATA(nlh);
2730             ifa->ifa_index = tswap32(ifa->ifa_index);
2731             host_to_target_addr_rtattr(IFA_RTA(ifa),
2732                                        nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2733         }
2734         break;
2735     case RTM_NEWROUTE:
2736     case RTM_DELROUTE:
2737     case RTM_GETROUTE:
2738         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2739             rtm = NLMSG_DATA(nlh);
2740             rtm->rtm_flags = tswap32(rtm->rtm_flags);
2741             host_to_target_route_rtattr(RTM_RTA(rtm),
2742                                         nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2743         }
2744         break;
2745     default:
2746         return -TARGET_EINVAL;
2747     }
2748     return 0;
2749 }
2750 
2751 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2752                                                   size_t len)
2753 {
2754     return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2755 }
2756 
2757 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2758                                                size_t len,
2759                                                abi_long (*target_to_host_rtattr)
2760                                                         (struct rtattr *))
2761 {
2762     abi_long ret;
2763 
2764     while (len >= sizeof(struct rtattr)) {
2765         if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2766             tswap16(rtattr->rta_len) > len) {
2767             break;
2768         }
2769         rtattr->rta_len = tswap16(rtattr->rta_len);
2770         rtattr->rta_type = tswap16(rtattr->rta_type);
2771         ret = target_to_host_rtattr(rtattr);
2772         if (ret < 0) {
2773             return ret;
2774         }
2775         len -= RTA_ALIGN(rtattr->rta_len);
2776         rtattr = (struct rtattr *)(((char *)rtattr) +
2777                  RTA_ALIGN(rtattr->rta_len));
2778     }
2779     return 0;
2780 }
2781 
2782 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2783 {
2784     switch (rtattr->rta_type) {
2785     default:
2786         gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2787         break;
2788     }
2789     return 0;
2790 }
2791 
2792 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2793 {
2794     switch (rtattr->rta_type) {
2795     /* binary: depends on family type */
2796     case IFA_LOCAL:
2797     case IFA_ADDRESS:
2798         break;
2799     default:
2800         gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2801         break;
2802     }
2803     return 0;
2804 }
2805 
2806 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2807 {
2808     uint32_t *u32;
2809     switch (rtattr->rta_type) {
2810     /* binary: depends on family type */
2811     case RTA_DST:
2812     case RTA_SRC:
2813     case RTA_GATEWAY:
2814         break;
2815     /* u32 */
2816     case RTA_PRIORITY:
2817     case RTA_OIF:
2818         u32 = RTA_DATA(rtattr);
2819         *u32 = tswap32(*u32);
2820         break;
2821     default:
2822         gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2823         break;
2824     }
2825     return 0;
2826 }
2827 
2828 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2829                                        uint32_t rtattr_len)
2830 {
2831     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2832                                    target_to_host_data_link_rtattr);
2833 }
2834 
2835 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2836                                      uint32_t rtattr_len)
2837 {
2838     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2839                                    target_to_host_data_addr_rtattr);
2840 }
2841 
2842 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2843                                      uint32_t rtattr_len)
2844 {
2845     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2846                                    target_to_host_data_route_rtattr);
2847 }
2848 
2849 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2850 {
2851     struct ifinfomsg *ifi;
2852     struct ifaddrmsg *ifa;
2853     struct rtmsg *rtm;
2854 
2855     switch (nlh->nlmsg_type) {
2856     case RTM_GETLINK:
2857         break;
2858     case RTM_NEWLINK:
2859     case RTM_DELLINK:
2860         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2861             ifi = NLMSG_DATA(nlh);
2862             ifi->ifi_type = tswap16(ifi->ifi_type);
2863             ifi->ifi_index = tswap32(ifi->ifi_index);
2864             ifi->ifi_flags = tswap32(ifi->ifi_flags);
2865             ifi->ifi_change = tswap32(ifi->ifi_change);
2866             target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2867                                        NLMSG_LENGTH(sizeof(*ifi)));
2868         }
2869         break;
2870     case RTM_GETADDR:
2871     case RTM_NEWADDR:
2872     case RTM_DELADDR:
2873         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2874             ifa = NLMSG_DATA(nlh);
2875             ifa->ifa_index = tswap32(ifa->ifa_index);
2876             target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2877                                        NLMSG_LENGTH(sizeof(*ifa)));
2878         }
2879         break;
2880     case RTM_GETROUTE:
2881         break;
2882     case RTM_NEWROUTE:
2883     case RTM_DELROUTE:
2884         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2885             rtm = NLMSG_DATA(nlh);
2886             rtm->rtm_flags = tswap32(rtm->rtm_flags);
2887             target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2888                                         NLMSG_LENGTH(sizeof(*rtm)));
2889         }
2890         break;
2891     default:
2892         return -TARGET_EOPNOTSUPP;
2893     }
2894     return 0;
2895 }
2896 
2897 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2898 {
2899     return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2900 }
2901 #endif /* CONFIG_RTNETLINK */
2902 
2903 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2904 {
2905     switch (nlh->nlmsg_type) {
2906     default:
2907         gemu_log("Unknown host audit message type %d\n",
2908                  nlh->nlmsg_type);
2909         return -TARGET_EINVAL;
2910     }
2911     return 0;
2912 }
2913 
2914 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2915                                                   size_t len)
2916 {
2917     return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2918 }
2919 
2920 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2921 {
2922     switch (nlh->nlmsg_type) {
2923     case AUDIT_USER:
2924     case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2925     case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2926         break;
2927     default:
2928         gemu_log("Unknown target audit message type %d\n",
2929                  nlh->nlmsg_type);
2930         return -TARGET_EINVAL;
2931     }
2932 
2933     return 0;
2934 }
2935 
2936 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2937 {
2938     return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2939 }
2940 
2941 /* do_setsockopt() Must return target values and target errnos. */
2942 static abi_long do_setsockopt(int sockfd, int level, int optname,
2943                               abi_ulong optval_addr, socklen_t optlen)
2944 {
2945     abi_long ret;
2946     int val;
2947     struct ip_mreqn *ip_mreq;
2948     struct ip_mreq_source *ip_mreq_source;
2949 
2950     switch(level) {
2951     case SOL_TCP:
2952         /* TCP options all take an 'int' value.  */
2953         if (optlen < sizeof(uint32_t))
2954             return -TARGET_EINVAL;
2955 
2956         if (get_user_u32(val, optval_addr))
2957             return -TARGET_EFAULT;
2958         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2959         break;
2960     case SOL_IP:
2961         switch(optname) {
2962         case IP_TOS:
2963         case IP_TTL:
2964         case IP_HDRINCL:
2965         case IP_ROUTER_ALERT:
2966         case IP_RECVOPTS:
2967         case IP_RETOPTS:
2968         case IP_PKTINFO:
2969         case IP_MTU_DISCOVER:
2970         case IP_RECVERR:
2971         case IP_RECVTTL:
2972         case IP_RECVTOS:
2973 #ifdef IP_FREEBIND
2974         case IP_FREEBIND:
2975 #endif
2976         case IP_MULTICAST_TTL:
2977         case IP_MULTICAST_LOOP:
2978             val = 0;
2979             if (optlen >= sizeof(uint32_t)) {
2980                 if (get_user_u32(val, optval_addr))
2981                     return -TARGET_EFAULT;
2982             } else if (optlen >= 1) {
2983                 if (get_user_u8(val, optval_addr))
2984                     return -TARGET_EFAULT;
2985             }
2986             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2987             break;
2988         case IP_ADD_MEMBERSHIP:
2989         case IP_DROP_MEMBERSHIP:
2990             if (optlen < sizeof (struct target_ip_mreq) ||
2991                 optlen > sizeof (struct target_ip_mreqn))
2992                 return -TARGET_EINVAL;
2993 
2994             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2995             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2996             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2997             break;
2998 
2999         case IP_BLOCK_SOURCE:
3000         case IP_UNBLOCK_SOURCE:
3001         case IP_ADD_SOURCE_MEMBERSHIP:
3002         case IP_DROP_SOURCE_MEMBERSHIP:
3003             if (optlen != sizeof (struct target_ip_mreq_source))
3004                 return -TARGET_EINVAL;
3005 
3006             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3007             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
3008             unlock_user (ip_mreq_source, optval_addr, 0);
3009             break;
3010 
3011         default:
3012             goto unimplemented;
3013         }
3014         break;
3015     case SOL_IPV6:
3016         switch (optname) {
3017         case IPV6_MTU_DISCOVER:
3018         case IPV6_MTU:
3019         case IPV6_V6ONLY:
3020         case IPV6_RECVPKTINFO:
3021         case IPV6_UNICAST_HOPS:
3022         case IPV6_MULTICAST_HOPS:
3023         case IPV6_MULTICAST_LOOP:
3024         case IPV6_RECVERR:
3025         case IPV6_RECVHOPLIMIT:
3026         case IPV6_2292HOPLIMIT:
3027         case IPV6_CHECKSUM:
3028             val = 0;
3029             if (optlen < sizeof(uint32_t)) {
3030                 return -TARGET_EINVAL;
3031             }
3032             if (get_user_u32(val, optval_addr)) {
3033                 return -TARGET_EFAULT;
3034             }
3035             ret = get_errno(setsockopt(sockfd, level, optname,
3036                                        &val, sizeof(val)));
3037             break;
3038         case IPV6_PKTINFO:
3039         {
3040             struct in6_pktinfo pki;
3041 
3042             if (optlen < sizeof(pki)) {
3043                 return -TARGET_EINVAL;
3044             }
3045 
3046             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
3047                 return -TARGET_EFAULT;
3048             }
3049 
3050             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
3051 
3052             ret = get_errno(setsockopt(sockfd, level, optname,
3053                                        &pki, sizeof(pki)));
3054             break;
3055         }
3056         default:
3057             goto unimplemented;
3058         }
3059         break;
3060     case SOL_ICMPV6:
3061         switch (optname) {
3062         case ICMPV6_FILTER:
3063         {
3064             struct icmp6_filter icmp6f;
3065 
3066             if (optlen > sizeof(icmp6f)) {
3067                 optlen = sizeof(icmp6f);
3068             }
3069 
3070             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
3071                 return -TARGET_EFAULT;
3072             }
3073 
3074             for (val = 0; val < 8; val++) {
3075                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3076             }
3077 
3078             ret = get_errno(setsockopt(sockfd, level, optname,
3079                                        &icmp6f, optlen));
3080             break;
3081         }
3082         default:
3083             goto unimplemented;
3084         }
3085         break;
3086     case SOL_RAW:
3087         switch (optname) {
3088         case ICMP_FILTER:
3089         case IPV6_CHECKSUM:
3090             /* those take an u32 value */
3091             if (optlen < sizeof(uint32_t)) {
3092                 return -TARGET_EINVAL;
3093             }
3094 
3095             if (get_user_u32(val, optval_addr)) {
3096                 return -TARGET_EFAULT;
3097             }
3098             ret = get_errno(setsockopt(sockfd, level, optname,
3099                                        &val, sizeof(val)));
3100             break;
3101 
3102         default:
3103             goto unimplemented;
3104         }
3105         break;
3106     case TARGET_SOL_SOCKET:
3107         switch (optname) {
3108         case TARGET_SO_RCVTIMEO:
3109         {
3110                 struct timeval tv;
3111 
3112                 optname = SO_RCVTIMEO;
3113 
3114 set_timeout:
3115                 if (optlen != sizeof(struct target_timeval)) {
3116                     return -TARGET_EINVAL;
3117                 }
3118 
3119                 if (copy_from_user_timeval(&tv, optval_addr)) {
3120                     return -TARGET_EFAULT;
3121                 }
3122 
3123                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3124                                 &tv, sizeof(tv)));
3125                 return ret;
3126         }
3127         case TARGET_SO_SNDTIMEO:
3128                 optname = SO_SNDTIMEO;
3129                 goto set_timeout;
3130         case TARGET_SO_ATTACH_FILTER:
3131         {
3132                 struct target_sock_fprog *tfprog;
3133                 struct target_sock_filter *tfilter;
3134                 struct sock_fprog fprog;
3135                 struct sock_filter *filter;
3136                 int i;
3137 
3138                 if (optlen != sizeof(*tfprog)) {
3139                     return -TARGET_EINVAL;
3140                 }
3141                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3142                     return -TARGET_EFAULT;
3143                 }
3144                 if (!lock_user_struct(VERIFY_READ, tfilter,
3145                                       tswapal(tfprog->filter), 0)) {
3146                     unlock_user_struct(tfprog, optval_addr, 1);
3147                     return -TARGET_EFAULT;
3148                 }
3149 
3150                 fprog.len = tswap16(tfprog->len);
3151                 filter = g_try_new(struct sock_filter, fprog.len);
3152                 if (filter == NULL) {
3153                     unlock_user_struct(tfilter, tfprog->filter, 1);
3154                     unlock_user_struct(tfprog, optval_addr, 1);
3155                     return -TARGET_ENOMEM;
3156                 }
3157                 for (i = 0; i < fprog.len; i++) {
3158                     filter[i].code = tswap16(tfilter[i].code);
3159                     filter[i].jt = tfilter[i].jt;
3160                     filter[i].jf = tfilter[i].jf;
3161                     filter[i].k = tswap32(tfilter[i].k);
3162                 }
3163                 fprog.filter = filter;
3164 
3165                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3166                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3167                 g_free(filter);
3168 
3169                 unlock_user_struct(tfilter, tfprog->filter, 1);
3170                 unlock_user_struct(tfprog, optval_addr, 1);
3171                 return ret;
3172         }
3173 	case TARGET_SO_BINDTODEVICE:
3174 	{
3175 		char *dev_ifname, *addr_ifname;
3176 
3177 		if (optlen > IFNAMSIZ - 1) {
3178 		    optlen = IFNAMSIZ - 1;
3179 		}
3180 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3181 		if (!dev_ifname) {
3182 		    return -TARGET_EFAULT;
3183 		}
3184 		optname = SO_BINDTODEVICE;
3185 		addr_ifname = alloca(IFNAMSIZ);
3186 		memcpy(addr_ifname, dev_ifname, optlen);
3187 		addr_ifname[optlen] = 0;
3188 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3189                                            addr_ifname, optlen));
3190 		unlock_user (dev_ifname, optval_addr, 0);
3191 		return ret;
3192 	}
3193             /* Options with 'int' argument.  */
3194         case TARGET_SO_DEBUG:
3195 		optname = SO_DEBUG;
3196 		break;
3197         case TARGET_SO_REUSEADDR:
3198 		optname = SO_REUSEADDR;
3199 		break;
3200         case TARGET_SO_TYPE:
3201 		optname = SO_TYPE;
3202 		break;
3203         case TARGET_SO_ERROR:
3204 		optname = SO_ERROR;
3205 		break;
3206         case TARGET_SO_DONTROUTE:
3207 		optname = SO_DONTROUTE;
3208 		break;
3209         case TARGET_SO_BROADCAST:
3210 		optname = SO_BROADCAST;
3211 		break;
3212         case TARGET_SO_SNDBUF:
3213 		optname = SO_SNDBUF;
3214 		break;
3215         case TARGET_SO_SNDBUFFORCE:
3216                 optname = SO_SNDBUFFORCE;
3217                 break;
3218         case TARGET_SO_RCVBUF:
3219 		optname = SO_RCVBUF;
3220 		break;
3221         case TARGET_SO_RCVBUFFORCE:
3222                 optname = SO_RCVBUFFORCE;
3223                 break;
3224         case TARGET_SO_KEEPALIVE:
3225 		optname = SO_KEEPALIVE;
3226 		break;
3227         case TARGET_SO_OOBINLINE:
3228 		optname = SO_OOBINLINE;
3229 		break;
3230         case TARGET_SO_NO_CHECK:
3231 		optname = SO_NO_CHECK;
3232 		break;
3233         case TARGET_SO_PRIORITY:
3234 		optname = SO_PRIORITY;
3235 		break;
3236 #ifdef SO_BSDCOMPAT
3237         case TARGET_SO_BSDCOMPAT:
3238 		optname = SO_BSDCOMPAT;
3239 		break;
3240 #endif
3241         case TARGET_SO_PASSCRED:
3242 		optname = SO_PASSCRED;
3243 		break;
3244         case TARGET_SO_PASSSEC:
3245                 optname = SO_PASSSEC;
3246                 break;
3247         case TARGET_SO_TIMESTAMP:
3248 		optname = SO_TIMESTAMP;
3249 		break;
3250         case TARGET_SO_RCVLOWAT:
3251 		optname = SO_RCVLOWAT;
3252 		break;
3253         default:
3254             goto unimplemented;
3255         }
3256 	if (optlen < sizeof(uint32_t))
3257             return -TARGET_EINVAL;
3258 
3259 	if (get_user_u32(val, optval_addr))
3260             return -TARGET_EFAULT;
3261 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3262         break;
3263     default:
3264     unimplemented:
3265         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3266         ret = -TARGET_ENOPROTOOPT;
3267     }
3268     return ret;
3269 }
3270 
3271 /* do_getsockopt() Must return target values and target errnos. */
3272 static abi_long do_getsockopt(int sockfd, int level, int optname,
3273                               abi_ulong optval_addr, abi_ulong optlen)
3274 {
3275     abi_long ret;
3276     int len, val;
3277     socklen_t lv;
3278 
3279     switch(level) {
3280     case TARGET_SOL_SOCKET:
3281         level = SOL_SOCKET;
3282         switch (optname) {
3283         /* These don't just return a single integer */
3284         case TARGET_SO_LINGER:
3285         case TARGET_SO_RCVTIMEO:
3286         case TARGET_SO_SNDTIMEO:
3287         case TARGET_SO_PEERNAME:
3288             goto unimplemented;
3289         case TARGET_SO_PEERCRED: {
3290             struct ucred cr;
3291             socklen_t crlen;
3292             struct target_ucred *tcr;
3293 
3294             if (get_user_u32(len, optlen)) {
3295                 return -TARGET_EFAULT;
3296             }
3297             if (len < 0) {
3298                 return -TARGET_EINVAL;
3299             }
3300 
3301             crlen = sizeof(cr);
3302             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3303                                        &cr, &crlen));
3304             if (ret < 0) {
3305                 return ret;
3306             }
3307             if (len > crlen) {
3308                 len = crlen;
3309             }
3310             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3311                 return -TARGET_EFAULT;
3312             }
3313             __put_user(cr.pid, &tcr->pid);
3314             __put_user(cr.uid, &tcr->uid);
3315             __put_user(cr.gid, &tcr->gid);
3316             unlock_user_struct(tcr, optval_addr, 1);
3317             if (put_user_u32(len, optlen)) {
3318                 return -TARGET_EFAULT;
3319             }
3320             break;
3321         }
3322         /* Options with 'int' argument.  */
3323         case TARGET_SO_DEBUG:
3324             optname = SO_DEBUG;
3325             goto int_case;
3326         case TARGET_SO_REUSEADDR:
3327             optname = SO_REUSEADDR;
3328             goto int_case;
3329         case TARGET_SO_TYPE:
3330             optname = SO_TYPE;
3331             goto int_case;
3332         case TARGET_SO_ERROR:
3333             optname = SO_ERROR;
3334             goto int_case;
3335         case TARGET_SO_DONTROUTE:
3336             optname = SO_DONTROUTE;
3337             goto int_case;
3338         case TARGET_SO_BROADCAST:
3339             optname = SO_BROADCAST;
3340             goto int_case;
3341         case TARGET_SO_SNDBUF:
3342             optname = SO_SNDBUF;
3343             goto int_case;
3344         case TARGET_SO_RCVBUF:
3345             optname = SO_RCVBUF;
3346             goto int_case;
3347         case TARGET_SO_KEEPALIVE:
3348             optname = SO_KEEPALIVE;
3349             goto int_case;
3350         case TARGET_SO_OOBINLINE:
3351             optname = SO_OOBINLINE;
3352             goto int_case;
3353         case TARGET_SO_NO_CHECK:
3354             optname = SO_NO_CHECK;
3355             goto int_case;
3356         case TARGET_SO_PRIORITY:
3357             optname = SO_PRIORITY;
3358             goto int_case;
3359 #ifdef SO_BSDCOMPAT
3360         case TARGET_SO_BSDCOMPAT:
3361             optname = SO_BSDCOMPAT;
3362             goto int_case;
3363 #endif
3364         case TARGET_SO_PASSCRED:
3365             optname = SO_PASSCRED;
3366             goto int_case;
3367         case TARGET_SO_TIMESTAMP:
3368             optname = SO_TIMESTAMP;
3369             goto int_case;
3370         case TARGET_SO_RCVLOWAT:
3371             optname = SO_RCVLOWAT;
3372             goto int_case;
3373         case TARGET_SO_ACCEPTCONN:
3374             optname = SO_ACCEPTCONN;
3375             goto int_case;
3376         default:
3377             goto int_case;
3378         }
3379         break;
3380     case SOL_TCP:
3381         /* TCP options all take an 'int' value.  */
3382     int_case:
3383         if (get_user_u32(len, optlen))
3384             return -TARGET_EFAULT;
3385         if (len < 0)
3386             return -TARGET_EINVAL;
3387         lv = sizeof(lv);
3388         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3389         if (ret < 0)
3390             return ret;
3391         if (optname == SO_TYPE) {
3392             val = host_to_target_sock_type(val);
3393         }
3394         if (len > lv)
3395             len = lv;
3396         if (len == 4) {
3397             if (put_user_u32(val, optval_addr))
3398                 return -TARGET_EFAULT;
3399         } else {
3400             if (put_user_u8(val, optval_addr))
3401                 return -TARGET_EFAULT;
3402         }
3403         if (put_user_u32(len, optlen))
3404             return -TARGET_EFAULT;
3405         break;
3406     case SOL_IP:
3407         switch(optname) {
3408         case IP_TOS:
3409         case IP_TTL:
3410         case IP_HDRINCL:
3411         case IP_ROUTER_ALERT:
3412         case IP_RECVOPTS:
3413         case IP_RETOPTS:
3414         case IP_PKTINFO:
3415         case IP_MTU_DISCOVER:
3416         case IP_RECVERR:
3417         case IP_RECVTOS:
3418 #ifdef IP_FREEBIND
3419         case IP_FREEBIND:
3420 #endif
3421         case IP_MULTICAST_TTL:
3422         case IP_MULTICAST_LOOP:
3423             if (get_user_u32(len, optlen))
3424                 return -TARGET_EFAULT;
3425             if (len < 0)
3426                 return -TARGET_EINVAL;
3427             lv = sizeof(lv);
3428             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3429             if (ret < 0)
3430                 return ret;
3431             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3432                 len = 1;
3433                 if (put_user_u32(len, optlen)
3434                     || put_user_u8(val, optval_addr))
3435                     return -TARGET_EFAULT;
3436             } else {
3437                 if (len > sizeof(int))
3438                     len = sizeof(int);
3439                 if (put_user_u32(len, optlen)
3440                     || put_user_u32(val, optval_addr))
3441                     return -TARGET_EFAULT;
3442             }
3443             break;
3444         default:
3445             ret = -TARGET_ENOPROTOOPT;
3446             break;
3447         }
3448         break;
3449     default:
3450     unimplemented:
3451         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3452                  level, optname);
3453         ret = -TARGET_EOPNOTSUPP;
3454         break;
3455     }
3456     return ret;
3457 }
3458 
3459 /* Convert target low/high pair representing file offset into the host
3460  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3461  * as the kernel doesn't handle them either.
3462  */
3463 static void target_to_host_low_high(abi_ulong tlow,
3464                                     abi_ulong thigh,
3465                                     unsigned long *hlow,
3466                                     unsigned long *hhigh)
3467 {
3468     uint64_t off = tlow |
3469         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3470         TARGET_LONG_BITS / 2;
3471 
3472     *hlow = off;
3473     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3474 }
3475 
3476 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3477                                 abi_ulong count, int copy)
3478 {
3479     struct target_iovec *target_vec;
3480     struct iovec *vec;
3481     abi_ulong total_len, max_len;
3482     int i;
3483     int err = 0;
3484     bool bad_address = false;
3485 
3486     if (count == 0) {
3487         errno = 0;
3488         return NULL;
3489     }
3490     if (count > IOV_MAX) {
3491         errno = EINVAL;
3492         return NULL;
3493     }
3494 
3495     vec = g_try_new0(struct iovec, count);
3496     if (vec == NULL) {
3497         errno = ENOMEM;
3498         return NULL;
3499     }
3500 
3501     target_vec = lock_user(VERIFY_READ, target_addr,
3502                            count * sizeof(struct target_iovec), 1);
3503     if (target_vec == NULL) {
3504         err = EFAULT;
3505         goto fail2;
3506     }
3507 
3508     /* ??? If host page size > target page size, this will result in a
3509        value larger than what we can actually support.  */
3510     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3511     total_len = 0;
3512 
3513     for (i = 0; i < count; i++) {
3514         abi_ulong base = tswapal(target_vec[i].iov_base);
3515         abi_long len = tswapal(target_vec[i].iov_len);
3516 
3517         if (len < 0) {
3518             err = EINVAL;
3519             goto fail;
3520         } else if (len == 0) {
3521             /* Zero length pointer is ignored.  */
3522             vec[i].iov_base = 0;
3523         } else {
3524             vec[i].iov_base = lock_user(type, base, len, copy);
3525             /* If the first buffer pointer is bad, this is a fault.  But
3526              * subsequent bad buffers will result in a partial write; this
3527              * is realized by filling the vector with null pointers and
3528              * zero lengths. */
3529             if (!vec[i].iov_base) {
3530                 if (i == 0) {
3531                     err = EFAULT;
3532                     goto fail;
3533                 } else {
3534                     bad_address = true;
3535                 }
3536             }
3537             if (bad_address) {
3538                 len = 0;
3539             }
3540             if (len > max_len - total_len) {
3541                 len = max_len - total_len;
3542             }
3543         }
3544         vec[i].iov_len = len;
3545         total_len += len;
3546     }
3547 
3548     unlock_user(target_vec, target_addr, 0);
3549     return vec;
3550 
3551  fail:
3552     while (--i >= 0) {
3553         if (tswapal(target_vec[i].iov_len) > 0) {
3554             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3555         }
3556     }
3557     unlock_user(target_vec, target_addr, 0);
3558  fail2:
3559     g_free(vec);
3560     errno = err;
3561     return NULL;
3562 }
3563 
3564 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3565                          abi_ulong count, int copy)
3566 {
3567     struct target_iovec *target_vec;
3568     int i;
3569 
3570     target_vec = lock_user(VERIFY_READ, target_addr,
3571                            count * sizeof(struct target_iovec), 1);
3572     if (target_vec) {
3573         for (i = 0; i < count; i++) {
3574             abi_ulong base = tswapal(target_vec[i].iov_base);
3575             abi_long len = tswapal(target_vec[i].iov_len);
3576             if (len < 0) {
3577                 break;
3578             }
3579             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3580         }
3581         unlock_user(target_vec, target_addr, 0);
3582     }
3583 
3584     g_free(vec);
3585 }
3586 
3587 static inline int target_to_host_sock_type(int *type)
3588 {
3589     int host_type = 0;
3590     int target_type = *type;
3591 
3592     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3593     case TARGET_SOCK_DGRAM:
3594         host_type = SOCK_DGRAM;
3595         break;
3596     case TARGET_SOCK_STREAM:
3597         host_type = SOCK_STREAM;
3598         break;
3599     default:
3600         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3601         break;
3602     }
3603     if (target_type & TARGET_SOCK_CLOEXEC) {
3604 #if defined(SOCK_CLOEXEC)
3605         host_type |= SOCK_CLOEXEC;
3606 #else
3607         return -TARGET_EINVAL;
3608 #endif
3609     }
3610     if (target_type & TARGET_SOCK_NONBLOCK) {
3611 #if defined(SOCK_NONBLOCK)
3612         host_type |= SOCK_NONBLOCK;
3613 #elif !defined(O_NONBLOCK)
3614         return -TARGET_EINVAL;
3615 #endif
3616     }
3617     *type = host_type;
3618     return 0;
3619 }
3620 
3621 /* Try to emulate socket type flags after socket creation.  */
3622 static int sock_flags_fixup(int fd, int target_type)
3623 {
3624 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3625     if (target_type & TARGET_SOCK_NONBLOCK) {
3626         int flags = fcntl(fd, F_GETFL);
3627         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3628             close(fd);
3629             return -TARGET_EINVAL;
3630         }
3631     }
3632 #endif
3633     return fd;
3634 }
3635 
3636 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3637                                                abi_ulong target_addr,
3638                                                socklen_t len)
3639 {
3640     struct sockaddr *addr = host_addr;
3641     struct target_sockaddr *target_saddr;
3642 
3643     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3644     if (!target_saddr) {
3645         return -TARGET_EFAULT;
3646     }
3647 
3648     memcpy(addr, target_saddr, len);
3649     addr->sa_family = tswap16(target_saddr->sa_family);
3650     /* spkt_protocol is big-endian */
3651 
3652     unlock_user(target_saddr, target_addr, 0);
3653     return 0;
3654 }
3655 
3656 static TargetFdTrans target_packet_trans = {
3657     .target_to_host_addr = packet_target_to_host_sockaddr,
3658 };
3659 
3660 #ifdef CONFIG_RTNETLINK
3661 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3662 {
3663     abi_long ret;
3664 
3665     ret = target_to_host_nlmsg_route(buf, len);
3666     if (ret < 0) {
3667         return ret;
3668     }
3669 
3670     return len;
3671 }
3672 
3673 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3674 {
3675     abi_long ret;
3676 
3677     ret = host_to_target_nlmsg_route(buf, len);
3678     if (ret < 0) {
3679         return ret;
3680     }
3681 
3682     return len;
3683 }
3684 
3685 static TargetFdTrans target_netlink_route_trans = {
3686     .target_to_host_data = netlink_route_target_to_host,
3687     .host_to_target_data = netlink_route_host_to_target,
3688 };
3689 #endif /* CONFIG_RTNETLINK */
3690 
3691 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3692 {
3693     abi_long ret;
3694 
3695     ret = target_to_host_nlmsg_audit(buf, len);
3696     if (ret < 0) {
3697         return ret;
3698     }
3699 
3700     return len;
3701 }
3702 
3703 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3704 {
3705     abi_long ret;
3706 
3707     ret = host_to_target_nlmsg_audit(buf, len);
3708     if (ret < 0) {
3709         return ret;
3710     }
3711 
3712     return len;
3713 }
3714 
3715 static TargetFdTrans target_netlink_audit_trans = {
3716     .target_to_host_data = netlink_audit_target_to_host,
3717     .host_to_target_data = netlink_audit_host_to_target,
3718 };
3719 
3720 /* do_socket() Must return target values and target errnos. */
3721 static abi_long do_socket(int domain, int type, int protocol)
3722 {
3723     int target_type = type;
3724     int ret;
3725 
3726     ret = target_to_host_sock_type(&type);
3727     if (ret) {
3728         return ret;
3729     }
3730 
3731     if (domain == PF_NETLINK && !(
3732 #ifdef CONFIG_RTNETLINK
3733          protocol == NETLINK_ROUTE ||
3734 #endif
3735          protocol == NETLINK_KOBJECT_UEVENT ||
3736          protocol == NETLINK_AUDIT)) {
3737         return -EPFNOSUPPORT;
3738     }
3739 
3740     if (domain == AF_PACKET ||
3741         (domain == AF_INET && type == SOCK_PACKET)) {
3742         protocol = tswap16(protocol);
3743     }
3744 
3745     ret = get_errno(socket(domain, type, protocol));
3746     if (ret >= 0) {
3747         ret = sock_flags_fixup(ret, target_type);
3748         if (type == SOCK_PACKET) {
3749             /* Manage an obsolete case :
3750              * if socket type is SOCK_PACKET, bind by name
3751              */
3752             fd_trans_register(ret, &target_packet_trans);
3753         } else if (domain == PF_NETLINK) {
3754             switch (protocol) {
3755 #ifdef CONFIG_RTNETLINK
3756             case NETLINK_ROUTE:
3757                 fd_trans_register(ret, &target_netlink_route_trans);
3758                 break;
3759 #endif
3760             case NETLINK_KOBJECT_UEVENT:
3761                 /* nothing to do: messages are strings */
3762                 break;
3763             case NETLINK_AUDIT:
3764                 fd_trans_register(ret, &target_netlink_audit_trans);
3765                 break;
3766             default:
3767                 g_assert_not_reached();
3768             }
3769         }
3770     }
3771     return ret;
3772 }
3773 
3774 /* do_bind() Must return target values and target errnos. */
3775 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3776                         socklen_t addrlen)
3777 {
3778     void *addr;
3779     abi_long ret;
3780 
3781     if ((int)addrlen < 0) {
3782         return -TARGET_EINVAL;
3783     }
3784 
3785     addr = alloca(addrlen+1);
3786 
3787     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3788     if (ret)
3789         return ret;
3790 
3791     return get_errno(bind(sockfd, addr, addrlen));
3792 }
3793 
3794 /* do_connect() Must return target values and target errnos. */
3795 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3796                            socklen_t addrlen)
3797 {
3798     void *addr;
3799     abi_long ret;
3800 
3801     if ((int)addrlen < 0) {
3802         return -TARGET_EINVAL;
3803     }
3804 
3805     addr = alloca(addrlen+1);
3806 
3807     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3808     if (ret)
3809         return ret;
3810 
3811     return get_errno(safe_connect(sockfd, addr, addrlen));
3812 }
3813 
3814 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3815 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3816                                       int flags, int send)
3817 {
3818     abi_long ret, len;
3819     struct msghdr msg;
3820     abi_ulong count;
3821     struct iovec *vec;
3822     abi_ulong target_vec;
3823 
3824     if (msgp->msg_name) {
3825         msg.msg_namelen = tswap32(msgp->msg_namelen);
3826         msg.msg_name = alloca(msg.msg_namelen+1);
3827         ret = target_to_host_sockaddr(fd, msg.msg_name,
3828                                       tswapal(msgp->msg_name),
3829                                       msg.msg_namelen);
3830         if (ret == -TARGET_EFAULT) {
3831             /* For connected sockets msg_name and msg_namelen must
3832              * be ignored, so returning EFAULT immediately is wrong.
3833              * Instead, pass a bad msg_name to the host kernel, and
3834              * let it decide whether to return EFAULT or not.
3835              */
3836             msg.msg_name = (void *)-1;
3837         } else if (ret) {
3838             goto out2;
3839         }
3840     } else {
3841         msg.msg_name = NULL;
3842         msg.msg_namelen = 0;
3843     }
3844     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3845     msg.msg_control = alloca(msg.msg_controllen);
3846     msg.msg_flags = tswap32(msgp->msg_flags);
3847 
3848     count = tswapal(msgp->msg_iovlen);
3849     target_vec = tswapal(msgp->msg_iov);
3850 
3851     if (count > IOV_MAX) {
3852         /* sendrcvmsg returns a different errno for this condition than
3853          * readv/writev, so we must catch it here before lock_iovec() does.
3854          */
3855         ret = -TARGET_EMSGSIZE;
3856         goto out2;
3857     }
3858 
3859     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3860                      target_vec, count, send);
3861     if (vec == NULL) {
3862         ret = -host_to_target_errno(errno);
3863         goto out2;
3864     }
3865     msg.msg_iovlen = count;
3866     msg.msg_iov = vec;
3867 
3868     if (send) {
3869         if (fd_trans_target_to_host_data(fd)) {
3870             void *host_msg;
3871 
3872             host_msg = g_malloc(msg.msg_iov->iov_len);
3873             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3874             ret = fd_trans_target_to_host_data(fd)(host_msg,
3875                                                    msg.msg_iov->iov_len);
3876             if (ret >= 0) {
3877                 msg.msg_iov->iov_base = host_msg;
3878                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3879             }
3880             g_free(host_msg);
3881         } else {
3882             ret = target_to_host_cmsg(&msg, msgp);
3883             if (ret == 0) {
3884                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3885             }
3886         }
3887     } else {
3888         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3889         if (!is_error(ret)) {
3890             len = ret;
3891             if (fd_trans_host_to_target_data(fd)) {
3892                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3893                                                        len);
3894             } else {
3895                 ret = host_to_target_cmsg(msgp, &msg);
3896             }
3897             if (!is_error(ret)) {
3898                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3899                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3900                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3901                                     msg.msg_name, msg.msg_namelen);
3902                     if (ret) {
3903                         goto out;
3904                     }
3905                 }
3906 
3907                 ret = len;
3908             }
3909         }
3910     }
3911 
3912 out:
3913     unlock_iovec(vec, target_vec, count, !send);
3914 out2:
3915     return ret;
3916 }
3917 
3918 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3919                                int flags, int send)
3920 {
3921     abi_long ret;
3922     struct target_msghdr *msgp;
3923 
3924     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3925                           msgp,
3926                           target_msg,
3927                           send ? 1 : 0)) {
3928         return -TARGET_EFAULT;
3929     }
3930     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3931     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3932     return ret;
3933 }
3934 
3935 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3936  * so it might not have this *mmsg-specific flag either.
3937  */
3938 #ifndef MSG_WAITFORONE
3939 #define MSG_WAITFORONE 0x10000
3940 #endif
3941 
3942 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3943                                 unsigned int vlen, unsigned int flags,
3944                                 int send)
3945 {
3946     struct target_mmsghdr *mmsgp;
3947     abi_long ret = 0;
3948     int i;
3949 
3950     if (vlen > UIO_MAXIOV) {
3951         vlen = UIO_MAXIOV;
3952     }
3953 
3954     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3955     if (!mmsgp) {
3956         return -TARGET_EFAULT;
3957     }
3958 
3959     for (i = 0; i < vlen; i++) {
3960         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3961         if (is_error(ret)) {
3962             break;
3963         }
3964         mmsgp[i].msg_len = tswap32(ret);
3965         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3966         if (flags & MSG_WAITFORONE) {
3967             flags |= MSG_DONTWAIT;
3968         }
3969     }
3970 
3971     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3972 
3973     /* Return number of datagrams sent if we sent any at all;
3974      * otherwise return the error.
3975      */
3976     if (i) {
3977         return i;
3978     }
3979     return ret;
3980 }
3981 
3982 /* do_accept4() Must return target values and target errnos. */
3983 static abi_long do_accept4(int fd, abi_ulong target_addr,
3984                            abi_ulong target_addrlen_addr, int flags)
3985 {
3986     socklen_t addrlen;
3987     void *addr;
3988     abi_long ret;
3989     int host_flags;
3990 
3991     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3992 
3993     if (target_addr == 0) {
3994         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3995     }
3996 
3997     /* linux returns EINVAL if addrlen pointer is invalid */
3998     if (get_user_u32(addrlen, target_addrlen_addr))
3999         return -TARGET_EINVAL;
4000 
4001     if ((int)addrlen < 0) {
4002         return -TARGET_EINVAL;
4003     }
4004 
4005     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4006         return -TARGET_EINVAL;
4007 
4008     addr = alloca(addrlen);
4009 
4010     ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
4011     if (!is_error(ret)) {
4012         host_to_target_sockaddr(target_addr, addr, addrlen);
4013         if (put_user_u32(addrlen, target_addrlen_addr))
4014             ret = -TARGET_EFAULT;
4015     }
4016     return ret;
4017 }
4018 
4019 /* do_getpeername() Must return target values and target errnos. */
4020 static abi_long do_getpeername(int fd, abi_ulong target_addr,
4021                                abi_ulong target_addrlen_addr)
4022 {
4023     socklen_t addrlen;
4024     void *addr;
4025     abi_long ret;
4026 
4027     if (get_user_u32(addrlen, target_addrlen_addr))
4028         return -TARGET_EFAULT;
4029 
4030     if ((int)addrlen < 0) {
4031         return -TARGET_EINVAL;
4032     }
4033 
4034     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4035         return -TARGET_EFAULT;
4036 
4037     addr = alloca(addrlen);
4038 
4039     ret = get_errno(getpeername(fd, addr, &addrlen));
4040     if (!is_error(ret)) {
4041         host_to_target_sockaddr(target_addr, addr, addrlen);
4042         if (put_user_u32(addrlen, target_addrlen_addr))
4043             ret = -TARGET_EFAULT;
4044     }
4045     return ret;
4046 }
4047 
4048 /* do_getsockname() Must return target values and target errnos. */
4049 static abi_long do_getsockname(int fd, abi_ulong target_addr,
4050                                abi_ulong target_addrlen_addr)
4051 {
4052     socklen_t addrlen;
4053     void *addr;
4054     abi_long ret;
4055 
4056     if (get_user_u32(addrlen, target_addrlen_addr))
4057         return -TARGET_EFAULT;
4058 
4059     if ((int)addrlen < 0) {
4060         return -TARGET_EINVAL;
4061     }
4062 
4063     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4064         return -TARGET_EFAULT;
4065 
4066     addr = alloca(addrlen);
4067 
4068     ret = get_errno(getsockname(fd, addr, &addrlen));
4069     if (!is_error(ret)) {
4070         host_to_target_sockaddr(target_addr, addr, addrlen);
4071         if (put_user_u32(addrlen, target_addrlen_addr))
4072             ret = -TARGET_EFAULT;
4073     }
4074     return ret;
4075 }
4076 
4077 /* do_socketpair() Must return target values and target errnos. */
4078 static abi_long do_socketpair(int domain, int type, int protocol,
4079                               abi_ulong target_tab_addr)
4080 {
4081     int tab[2];
4082     abi_long ret;
4083 
4084     target_to_host_sock_type(&type);
4085 
4086     ret = get_errno(socketpair(domain, type, protocol, tab));
4087     if (!is_error(ret)) {
4088         if (put_user_s32(tab[0], target_tab_addr)
4089             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4090             ret = -TARGET_EFAULT;
4091     }
4092     return ret;
4093 }
4094 
4095 /* do_sendto() Must return target values and target errnos. */
4096 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4097                           abi_ulong target_addr, socklen_t addrlen)
4098 {
4099     void *addr;
4100     void *host_msg;
4101     void *copy_msg = NULL;
4102     abi_long ret;
4103 
4104     if ((int)addrlen < 0) {
4105         return -TARGET_EINVAL;
4106     }
4107 
4108     host_msg = lock_user(VERIFY_READ, msg, len, 1);
4109     if (!host_msg)
4110         return -TARGET_EFAULT;
4111     if (fd_trans_target_to_host_data(fd)) {
4112         copy_msg = host_msg;
4113         host_msg = g_malloc(len);
4114         memcpy(host_msg, copy_msg, len);
4115         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4116         if (ret < 0) {
4117             goto fail;
4118         }
4119     }
4120     if (target_addr) {
4121         addr = alloca(addrlen+1);
4122         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4123         if (ret) {
4124             goto fail;
4125         }
4126         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4127     } else {
4128         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4129     }
4130 fail:
4131     if (copy_msg) {
4132         g_free(host_msg);
4133         host_msg = copy_msg;
4134     }
4135     unlock_user(host_msg, msg, 0);
4136     return ret;
4137 }
4138 
4139 /* do_recvfrom() Must return target values and target errnos. */
4140 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4141                             abi_ulong target_addr,
4142                             abi_ulong target_addrlen)
4143 {
4144     socklen_t addrlen;
4145     void *addr;
4146     void *host_msg;
4147     abi_long ret;
4148 
4149     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4150     if (!host_msg)
4151         return -TARGET_EFAULT;
4152     if (target_addr) {
4153         if (get_user_u32(addrlen, target_addrlen)) {
4154             ret = -TARGET_EFAULT;
4155             goto fail;
4156         }
4157         if ((int)addrlen < 0) {
4158             ret = -TARGET_EINVAL;
4159             goto fail;
4160         }
4161         addr = alloca(addrlen);
4162         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4163                                       addr, &addrlen));
4164     } else {
4165         addr = NULL; /* To keep compiler quiet.  */
4166         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4167     }
4168     if (!is_error(ret)) {
4169         if (fd_trans_host_to_target_data(fd)) {
4170             ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4171         }
4172         if (target_addr) {
4173             host_to_target_sockaddr(target_addr, addr, addrlen);
4174             if (put_user_u32(addrlen, target_addrlen)) {
4175                 ret = -TARGET_EFAULT;
4176                 goto fail;
4177             }
4178         }
4179         unlock_user(host_msg, msg, len);
4180     } else {
4181 fail:
4182         unlock_user(host_msg, msg, 0);
4183     }
4184     return ret;
4185 }
4186 
4187 #ifdef TARGET_NR_socketcall
4188 /* do_socketcall() must return target values and target errnos. */
4189 static abi_long do_socketcall(int num, abi_ulong vptr)
4190 {
4191     static const unsigned nargs[] = { /* number of arguments per operation */
4192         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
4193         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
4194         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
4195         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
4196         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
4197         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4198         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4199         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
4200         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
4201         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
4202         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
4203         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
4204         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
4205         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
4206         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
4207         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
4208         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
4209         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
4210         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
4211         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
4212     };
4213     abi_long a[6]; /* max 6 args */
4214     unsigned i;
4215 
4216     /* check the range of the first argument num */
4217     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4218     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4219         return -TARGET_EINVAL;
4220     }
4221     /* ensure we have space for args */
4222     if (nargs[num] > ARRAY_SIZE(a)) {
4223         return -TARGET_EINVAL;
4224     }
4225     /* collect the arguments in a[] according to nargs[] */
4226     for (i = 0; i < nargs[num]; ++i) {
4227         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4228             return -TARGET_EFAULT;
4229         }
4230     }
4231     /* now when we have the args, invoke the appropriate underlying function */
4232     switch (num) {
4233     case TARGET_SYS_SOCKET: /* domain, type, protocol */
4234         return do_socket(a[0], a[1], a[2]);
4235     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4236         return do_bind(a[0], a[1], a[2]);
4237     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4238         return do_connect(a[0], a[1], a[2]);
4239     case TARGET_SYS_LISTEN: /* sockfd, backlog */
4240         return get_errno(listen(a[0], a[1]));
4241     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4242         return do_accept4(a[0], a[1], a[2], 0);
4243     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4244         return do_getsockname(a[0], a[1], a[2]);
4245     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4246         return do_getpeername(a[0], a[1], a[2]);
4247     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4248         return do_socketpair(a[0], a[1], a[2], a[3]);
4249     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4250         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4251     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4252         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4253     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4254         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4255     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4256         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4257     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4258         return get_errno(shutdown(a[0], a[1]));
4259     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4260         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4261     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4262         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4263     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4264         return do_sendrecvmsg(a[0], a[1], a[2], 1);
4265     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4266         return do_sendrecvmsg(a[0], a[1], a[2], 0);
4267     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4268         return do_accept4(a[0], a[1], a[2], a[3]);
4269     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4270         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4271     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4272         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4273     default:
4274         gemu_log("Unsupported socketcall: %d\n", num);
4275         return -TARGET_EINVAL;
4276     }
4277 }
4278 #endif
4279 
4280 #define N_SHM_REGIONS	32
4281 
4282 static struct shm_region {
4283     abi_ulong start;
4284     abi_ulong size;
4285     bool in_use;
4286 } shm_regions[N_SHM_REGIONS];
4287 
4288 #ifndef TARGET_SEMID64_DS
4289 /* asm-generic version of this struct */
4290 struct target_semid64_ds
4291 {
4292   struct target_ipc_perm sem_perm;
4293   abi_ulong sem_otime;
4294 #if TARGET_ABI_BITS == 32
4295   abi_ulong __unused1;
4296 #endif
4297   abi_ulong sem_ctime;
4298 #if TARGET_ABI_BITS == 32
4299   abi_ulong __unused2;
4300 #endif
4301   abi_ulong sem_nsems;
4302   abi_ulong __unused3;
4303   abi_ulong __unused4;
4304 };
4305 #endif
4306 
4307 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4308                                                abi_ulong target_addr)
4309 {
4310     struct target_ipc_perm *target_ip;
4311     struct target_semid64_ds *target_sd;
4312 
4313     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4314         return -TARGET_EFAULT;
4315     target_ip = &(target_sd->sem_perm);
4316     host_ip->__key = tswap32(target_ip->__key);
4317     host_ip->uid = tswap32(target_ip->uid);
4318     host_ip->gid = tswap32(target_ip->gid);
4319     host_ip->cuid = tswap32(target_ip->cuid);
4320     host_ip->cgid = tswap32(target_ip->cgid);
4321 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4322     host_ip->mode = tswap32(target_ip->mode);
4323 #else
4324     host_ip->mode = tswap16(target_ip->mode);
4325 #endif
4326 #if defined(TARGET_PPC)
4327     host_ip->__seq = tswap32(target_ip->__seq);
4328 #else
4329     host_ip->__seq = tswap16(target_ip->__seq);
4330 #endif
4331     unlock_user_struct(target_sd, target_addr, 0);
4332     return 0;
4333 }
4334 
4335 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4336                                                struct ipc_perm *host_ip)
4337 {
4338     struct target_ipc_perm *target_ip;
4339     struct target_semid64_ds *target_sd;
4340 
4341     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4342         return -TARGET_EFAULT;
4343     target_ip = &(target_sd->sem_perm);
4344     target_ip->__key = tswap32(host_ip->__key);
4345     target_ip->uid = tswap32(host_ip->uid);
4346     target_ip->gid = tswap32(host_ip->gid);
4347     target_ip->cuid = tswap32(host_ip->cuid);
4348     target_ip->cgid = tswap32(host_ip->cgid);
4349 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4350     target_ip->mode = tswap32(host_ip->mode);
4351 #else
4352     target_ip->mode = tswap16(host_ip->mode);
4353 #endif
4354 #if defined(TARGET_PPC)
4355     target_ip->__seq = tswap32(host_ip->__seq);
4356 #else
4357     target_ip->__seq = tswap16(host_ip->__seq);
4358 #endif
4359     unlock_user_struct(target_sd, target_addr, 1);
4360     return 0;
4361 }
4362 
4363 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4364                                                abi_ulong target_addr)
4365 {
4366     struct target_semid64_ds *target_sd;
4367 
4368     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4369         return -TARGET_EFAULT;
4370     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4371         return -TARGET_EFAULT;
4372     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4373     host_sd->sem_otime = tswapal(target_sd->sem_otime);
4374     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4375     unlock_user_struct(target_sd, target_addr, 0);
4376     return 0;
4377 }
4378 
4379 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4380                                                struct semid_ds *host_sd)
4381 {
4382     struct target_semid64_ds *target_sd;
4383 
4384     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4385         return -TARGET_EFAULT;
4386     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4387         return -TARGET_EFAULT;
4388     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4389     target_sd->sem_otime = tswapal(host_sd->sem_otime);
4390     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4391     unlock_user_struct(target_sd, target_addr, 1);
4392     return 0;
4393 }
4394 
4395 struct target_seminfo {
4396     int semmap;
4397     int semmni;
4398     int semmns;
4399     int semmnu;
4400     int semmsl;
4401     int semopm;
4402     int semume;
4403     int semusz;
4404     int semvmx;
4405     int semaem;
4406 };
4407 
4408 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4409                                               struct seminfo *host_seminfo)
4410 {
4411     struct target_seminfo *target_seminfo;
4412     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4413         return -TARGET_EFAULT;
4414     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4415     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4416     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4417     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4418     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4419     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4420     __put_user(host_seminfo->semume, &target_seminfo->semume);
4421     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4422     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4423     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4424     unlock_user_struct(target_seminfo, target_addr, 1);
4425     return 0;
4426 }
4427 
4428 union semun {
4429 	int val;
4430 	struct semid_ds *buf;
4431 	unsigned short *array;
4432 	struct seminfo *__buf;
4433 };
4434 
4435 union target_semun {
4436 	int val;
4437 	abi_ulong buf;
4438 	abi_ulong array;
4439 	abi_ulong __buf;
4440 };
4441 
4442 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4443                                                abi_ulong target_addr)
4444 {
4445     int nsems;
4446     unsigned short *array;
4447     union semun semun;
4448     struct semid_ds semid_ds;
4449     int i, ret;
4450 
4451     semun.buf = &semid_ds;
4452 
4453     ret = semctl(semid, 0, IPC_STAT, semun);
4454     if (ret == -1)
4455         return get_errno(ret);
4456 
4457     nsems = semid_ds.sem_nsems;
4458 
4459     *host_array = g_try_new(unsigned short, nsems);
4460     if (!*host_array) {
4461         return -TARGET_ENOMEM;
4462     }
4463     array = lock_user(VERIFY_READ, target_addr,
4464                       nsems*sizeof(unsigned short), 1);
4465     if (!array) {
4466         g_free(*host_array);
4467         return -TARGET_EFAULT;
4468     }
4469 
4470     for(i=0; i<nsems; i++) {
4471         __get_user((*host_array)[i], &array[i]);
4472     }
4473     unlock_user(array, target_addr, 0);
4474 
4475     return 0;
4476 }
4477 
4478 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4479                                                unsigned short **host_array)
4480 {
4481     int nsems;
4482     unsigned short *array;
4483     union semun semun;
4484     struct semid_ds semid_ds;
4485     int i, ret;
4486 
4487     semun.buf = &semid_ds;
4488 
4489     ret = semctl(semid, 0, IPC_STAT, semun);
4490     if (ret == -1)
4491         return get_errno(ret);
4492 
4493     nsems = semid_ds.sem_nsems;
4494 
4495     array = lock_user(VERIFY_WRITE, target_addr,
4496                       nsems*sizeof(unsigned short), 0);
4497     if (!array)
4498         return -TARGET_EFAULT;
4499 
4500     for(i=0; i<nsems; i++) {
4501         __put_user((*host_array)[i], &array[i]);
4502     }
4503     g_free(*host_array);
4504     unlock_user(array, target_addr, 1);
4505 
4506     return 0;
4507 }
4508 
4509 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4510                                  abi_ulong target_arg)
4511 {
4512     union target_semun target_su = { .buf = target_arg };
4513     union semun arg;
4514     struct semid_ds dsarg;
4515     unsigned short *array = NULL;
4516     struct seminfo seminfo;
4517     abi_long ret = -TARGET_EINVAL;
4518     abi_long err;
4519     cmd &= 0xff;
4520 
4521     switch( cmd ) {
4522 	case GETVAL:
4523 	case SETVAL:
4524             /* In 64 bit cross-endian situations, we will erroneously pick up
4525              * the wrong half of the union for the "val" element.  To rectify
4526              * this, the entire 8-byte structure is byteswapped, followed by
4527 	     * a swap of the 4 byte val field. In other cases, the data is
4528 	     * already in proper host byte order. */
4529 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4530 		target_su.buf = tswapal(target_su.buf);
4531 		arg.val = tswap32(target_su.val);
4532 	    } else {
4533 		arg.val = target_su.val;
4534 	    }
4535             ret = get_errno(semctl(semid, semnum, cmd, arg));
4536             break;
4537 	case GETALL:
4538 	case SETALL:
4539             err = target_to_host_semarray(semid, &array, target_su.array);
4540             if (err)
4541                 return err;
4542             arg.array = array;
4543             ret = get_errno(semctl(semid, semnum, cmd, arg));
4544             err = host_to_target_semarray(semid, target_su.array, &array);
4545             if (err)
4546                 return err;
4547             break;
4548 	case IPC_STAT:
4549 	case IPC_SET:
4550 	case SEM_STAT:
4551             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4552             if (err)
4553                 return err;
4554             arg.buf = &dsarg;
4555             ret = get_errno(semctl(semid, semnum, cmd, arg));
4556             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4557             if (err)
4558                 return err;
4559             break;
4560 	case IPC_INFO:
4561 	case SEM_INFO:
4562             arg.__buf = &seminfo;
4563             ret = get_errno(semctl(semid, semnum, cmd, arg));
4564             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4565             if (err)
4566                 return err;
4567             break;
4568 	case IPC_RMID:
4569 	case GETPID:
4570 	case GETNCNT:
4571 	case GETZCNT:
4572             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4573             break;
4574     }
4575 
4576     return ret;
4577 }
4578 
4579 struct target_sembuf {
4580     unsigned short sem_num;
4581     short sem_op;
4582     short sem_flg;
4583 };
4584 
4585 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4586                                              abi_ulong target_addr,
4587                                              unsigned nsops)
4588 {
4589     struct target_sembuf *target_sembuf;
4590     int i;
4591 
4592     target_sembuf = lock_user(VERIFY_READ, target_addr,
4593                               nsops*sizeof(struct target_sembuf), 1);
4594     if (!target_sembuf)
4595         return -TARGET_EFAULT;
4596 
4597     for(i=0; i<nsops; i++) {
4598         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4599         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4600         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4601     }
4602 
4603     unlock_user(target_sembuf, target_addr, 0);
4604 
4605     return 0;
4606 }
4607 
4608 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4609 {
4610     struct sembuf sops[nsops];
4611 
4612     if (target_to_host_sembuf(sops, ptr, nsops))
4613         return -TARGET_EFAULT;
4614 
4615     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4616 }
4617 
4618 struct target_msqid_ds
4619 {
4620     struct target_ipc_perm msg_perm;
4621     abi_ulong msg_stime;
4622 #if TARGET_ABI_BITS == 32
4623     abi_ulong __unused1;
4624 #endif
4625     abi_ulong msg_rtime;
4626 #if TARGET_ABI_BITS == 32
4627     abi_ulong __unused2;
4628 #endif
4629     abi_ulong msg_ctime;
4630 #if TARGET_ABI_BITS == 32
4631     abi_ulong __unused3;
4632 #endif
4633     abi_ulong __msg_cbytes;
4634     abi_ulong msg_qnum;
4635     abi_ulong msg_qbytes;
4636     abi_ulong msg_lspid;
4637     abi_ulong msg_lrpid;
4638     abi_ulong __unused4;
4639     abi_ulong __unused5;
4640 };
4641 
4642 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4643                                                abi_ulong target_addr)
4644 {
4645     struct target_msqid_ds *target_md;
4646 
4647     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4648         return -TARGET_EFAULT;
4649     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4650         return -TARGET_EFAULT;
4651     host_md->msg_stime = tswapal(target_md->msg_stime);
4652     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4653     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4654     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4655     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4656     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4657     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4658     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4659     unlock_user_struct(target_md, target_addr, 0);
4660     return 0;
4661 }
4662 
4663 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4664                                                struct msqid_ds *host_md)
4665 {
4666     struct target_msqid_ds *target_md;
4667 
4668     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4669         return -TARGET_EFAULT;
4670     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4671         return -TARGET_EFAULT;
4672     target_md->msg_stime = tswapal(host_md->msg_stime);
4673     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4674     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4675     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4676     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4677     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4678     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4679     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4680     unlock_user_struct(target_md, target_addr, 1);
4681     return 0;
4682 }
4683 
4684 struct target_msginfo {
4685     int msgpool;
4686     int msgmap;
4687     int msgmax;
4688     int msgmnb;
4689     int msgmni;
4690     int msgssz;
4691     int msgtql;
4692     unsigned short int msgseg;
4693 };
4694 
4695 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4696                                               struct msginfo *host_msginfo)
4697 {
4698     struct target_msginfo *target_msginfo;
4699     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4700         return -TARGET_EFAULT;
4701     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4702     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4703     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4704     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4705     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4706     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4707     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4708     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4709     unlock_user_struct(target_msginfo, target_addr, 1);
4710     return 0;
4711 }
4712 
4713 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4714 {
4715     struct msqid_ds dsarg;
4716     struct msginfo msginfo;
4717     abi_long ret = -TARGET_EINVAL;
4718 
4719     cmd &= 0xff;
4720 
4721     switch (cmd) {
4722     case IPC_STAT:
4723     case IPC_SET:
4724     case MSG_STAT:
4725         if (target_to_host_msqid_ds(&dsarg,ptr))
4726             return -TARGET_EFAULT;
4727         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4728         if (host_to_target_msqid_ds(ptr,&dsarg))
4729             return -TARGET_EFAULT;
4730         break;
4731     case IPC_RMID:
4732         ret = get_errno(msgctl(msgid, cmd, NULL));
4733         break;
4734     case IPC_INFO:
4735     case MSG_INFO:
4736         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4737         if (host_to_target_msginfo(ptr, &msginfo))
4738             return -TARGET_EFAULT;
4739         break;
4740     }
4741 
4742     return ret;
4743 }
4744 
4745 struct target_msgbuf {
4746     abi_long mtype;
4747     char	mtext[1];
4748 };
4749 
4750 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4751                                  ssize_t msgsz, int msgflg)
4752 {
4753     struct target_msgbuf *target_mb;
4754     struct msgbuf *host_mb;
4755     abi_long ret = 0;
4756 
4757     if (msgsz < 0) {
4758         return -TARGET_EINVAL;
4759     }
4760 
4761     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4762         return -TARGET_EFAULT;
4763     host_mb = g_try_malloc(msgsz + sizeof(long));
4764     if (!host_mb) {
4765         unlock_user_struct(target_mb, msgp, 0);
4766         return -TARGET_ENOMEM;
4767     }
4768     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4769     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4770     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4771     g_free(host_mb);
4772     unlock_user_struct(target_mb, msgp, 0);
4773 
4774     return ret;
4775 }
4776 
4777 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4778                                  ssize_t msgsz, abi_long msgtyp,
4779                                  int msgflg)
4780 {
4781     struct target_msgbuf *target_mb;
4782     char *target_mtext;
4783     struct msgbuf *host_mb;
4784     abi_long ret = 0;
4785 
4786     if (msgsz < 0) {
4787         return -TARGET_EINVAL;
4788     }
4789 
4790     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4791         return -TARGET_EFAULT;
4792 
4793     host_mb = g_try_malloc(msgsz + sizeof(long));
4794     if (!host_mb) {
4795         ret = -TARGET_ENOMEM;
4796         goto end;
4797     }
4798     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4799 
4800     if (ret > 0) {
4801         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4802         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4803         if (!target_mtext) {
4804             ret = -TARGET_EFAULT;
4805             goto end;
4806         }
4807         memcpy(target_mb->mtext, host_mb->mtext, ret);
4808         unlock_user(target_mtext, target_mtext_addr, ret);
4809     }
4810 
4811     target_mb->mtype = tswapal(host_mb->mtype);
4812 
4813 end:
4814     if (target_mb)
4815         unlock_user_struct(target_mb, msgp, 1);
4816     g_free(host_mb);
4817     return ret;
4818 }
4819 
4820 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4821                                                abi_ulong target_addr)
4822 {
4823     struct target_shmid_ds *target_sd;
4824 
4825     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4826         return -TARGET_EFAULT;
4827     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4828         return -TARGET_EFAULT;
4829     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4830     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4831     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4832     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4833     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4834     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4835     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4836     unlock_user_struct(target_sd, target_addr, 0);
4837     return 0;
4838 }
4839 
4840 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4841                                                struct shmid_ds *host_sd)
4842 {
4843     struct target_shmid_ds *target_sd;
4844 
4845     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4846         return -TARGET_EFAULT;
4847     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4848         return -TARGET_EFAULT;
4849     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4850     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4851     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4852     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4853     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4854     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4855     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4856     unlock_user_struct(target_sd, target_addr, 1);
4857     return 0;
4858 }
4859 
4860 struct  target_shminfo {
4861     abi_ulong shmmax;
4862     abi_ulong shmmin;
4863     abi_ulong shmmni;
4864     abi_ulong shmseg;
4865     abi_ulong shmall;
4866 };
4867 
4868 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4869                                               struct shminfo *host_shminfo)
4870 {
4871     struct target_shminfo *target_shminfo;
4872     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4873         return -TARGET_EFAULT;
4874     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4875     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4876     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4877     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4878     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4879     unlock_user_struct(target_shminfo, target_addr, 1);
4880     return 0;
4881 }
4882 
4883 struct target_shm_info {
4884     int used_ids;
4885     abi_ulong shm_tot;
4886     abi_ulong shm_rss;
4887     abi_ulong shm_swp;
4888     abi_ulong swap_attempts;
4889     abi_ulong swap_successes;
4890 };
4891 
4892 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4893                                                struct shm_info *host_shm_info)
4894 {
4895     struct target_shm_info *target_shm_info;
4896     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4897         return -TARGET_EFAULT;
4898     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4899     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4900     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4901     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4902     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4903     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4904     unlock_user_struct(target_shm_info, target_addr, 1);
4905     return 0;
4906 }
4907 
4908 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4909 {
4910     struct shmid_ds dsarg;
4911     struct shminfo shminfo;
4912     struct shm_info shm_info;
4913     abi_long ret = -TARGET_EINVAL;
4914 
4915     cmd &= 0xff;
4916 
4917     switch(cmd) {
4918     case IPC_STAT:
4919     case IPC_SET:
4920     case SHM_STAT:
4921         if (target_to_host_shmid_ds(&dsarg, buf))
4922             return -TARGET_EFAULT;
4923         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4924         if (host_to_target_shmid_ds(buf, &dsarg))
4925             return -TARGET_EFAULT;
4926         break;
4927     case IPC_INFO:
4928         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4929         if (host_to_target_shminfo(buf, &shminfo))
4930             return -TARGET_EFAULT;
4931         break;
4932     case SHM_INFO:
4933         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4934         if (host_to_target_shm_info(buf, &shm_info))
4935             return -TARGET_EFAULT;
4936         break;
4937     case IPC_RMID:
4938     case SHM_LOCK:
4939     case SHM_UNLOCK:
4940         ret = get_errno(shmctl(shmid, cmd, NULL));
4941         break;
4942     }
4943 
4944     return ret;
4945 }
4946 
4947 #ifndef TARGET_FORCE_SHMLBA
4948 /* For most architectures, SHMLBA is the same as the page size;
4949  * some architectures have larger values, in which case they should
4950  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4951  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4952  * and defining its own value for SHMLBA.
4953  *
4954  * The kernel also permits SHMLBA to be set by the architecture to a
4955  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4956  * this means that addresses are rounded to the large size if
4957  * SHM_RND is set but addresses not aligned to that size are not rejected
4958  * as long as they are at least page-aligned. Since the only architecture
4959  * which uses this is ia64 this code doesn't provide for that oddity.
4960  */
4961 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4962 {
4963     return TARGET_PAGE_SIZE;
4964 }
4965 #endif
4966 
4967 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4968                                  int shmid, abi_ulong shmaddr, int shmflg)
4969 {
4970     abi_long raddr;
4971     void *host_raddr;
4972     struct shmid_ds shm_info;
4973     int i,ret;
4974     abi_ulong shmlba;
4975 
4976     /* find out the length of the shared memory segment */
4977     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4978     if (is_error(ret)) {
4979         /* can't get length, bail out */
4980         return ret;
4981     }
4982 
4983     shmlba = target_shmlba(cpu_env);
4984 
4985     if (shmaddr & (shmlba - 1)) {
4986         if (shmflg & SHM_RND) {
4987             shmaddr &= ~(shmlba - 1);
4988         } else {
4989             return -TARGET_EINVAL;
4990         }
4991     }
4992     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4993         return -TARGET_EINVAL;
4994     }
4995 
4996     mmap_lock();
4997 
4998     if (shmaddr)
4999         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
5000     else {
5001         abi_ulong mmap_start;
5002 
5003         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
5004 
5005         if (mmap_start == -1) {
5006             errno = ENOMEM;
5007             host_raddr = (void *)-1;
5008         } else
5009             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
5010     }
5011 
5012     if (host_raddr == (void *)-1) {
5013         mmap_unlock();
5014         return get_errno((long)host_raddr);
5015     }
5016     raddr=h2g((unsigned long)host_raddr);
5017 
5018     page_set_flags(raddr, raddr + shm_info.shm_segsz,
5019                    PAGE_VALID | PAGE_READ |
5020                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
5021 
5022     for (i = 0; i < N_SHM_REGIONS; i++) {
5023         if (!shm_regions[i].in_use) {
5024             shm_regions[i].in_use = true;
5025             shm_regions[i].start = raddr;
5026             shm_regions[i].size = shm_info.shm_segsz;
5027             break;
5028         }
5029     }
5030 
5031     mmap_unlock();
5032     return raddr;
5033 
5034 }
5035 
5036 static inline abi_long do_shmdt(abi_ulong shmaddr)
5037 {
5038     int i;
5039     abi_long rv;
5040 
5041     mmap_lock();
5042 
5043     for (i = 0; i < N_SHM_REGIONS; ++i) {
5044         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
5045             shm_regions[i].in_use = false;
5046             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
5047             break;
5048         }
5049     }
5050     rv = get_errno(shmdt(g2h(shmaddr)));
5051 
5052     mmap_unlock();
5053 
5054     return rv;
5055 }
5056 
5057 #ifdef TARGET_NR_ipc
5058 /* ??? This only works with linear mappings.  */
5059 /* do_ipc() must return target values and target errnos. */
5060 static abi_long do_ipc(CPUArchState *cpu_env,
5061                        unsigned int call, abi_long first,
5062                        abi_long second, abi_long third,
5063                        abi_long ptr, abi_long fifth)
5064 {
5065     int version;
5066     abi_long ret = 0;
5067 
5068     version = call >> 16;
5069     call &= 0xffff;
5070 
5071     switch (call) {
5072     case IPCOP_semop:
5073         ret = do_semop(first, ptr, second);
5074         break;
5075 
5076     case IPCOP_semget:
5077         ret = get_errno(semget(first, second, third));
5078         break;
5079 
5080     case IPCOP_semctl: {
5081         /* The semun argument to semctl is passed by value, so dereference the
5082          * ptr argument. */
5083         abi_ulong atptr;
5084         get_user_ual(atptr, ptr);
5085         ret = do_semctl(first, second, third, atptr);
5086         break;
5087     }
5088 
5089     case IPCOP_msgget:
5090         ret = get_errno(msgget(first, second));
5091         break;
5092 
5093     case IPCOP_msgsnd:
5094         ret = do_msgsnd(first, ptr, second, third);
5095         break;
5096 
5097     case IPCOP_msgctl:
5098         ret = do_msgctl(first, second, ptr);
5099         break;
5100 
5101     case IPCOP_msgrcv:
5102         switch (version) {
5103         case 0:
5104             {
5105                 struct target_ipc_kludge {
5106                     abi_long msgp;
5107                     abi_long msgtyp;
5108                 } *tmp;
5109 
5110                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5111                     ret = -TARGET_EFAULT;
5112                     break;
5113                 }
5114 
5115                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5116 
5117                 unlock_user_struct(tmp, ptr, 0);
5118                 break;
5119             }
5120         default:
5121             ret = do_msgrcv(first, ptr, second, fifth, third);
5122         }
5123         break;
5124 
5125     case IPCOP_shmat:
5126         switch (version) {
5127         default:
5128         {
5129             abi_ulong raddr;
5130             raddr = do_shmat(cpu_env, first, ptr, second);
5131             if (is_error(raddr))
5132                 return get_errno(raddr);
5133             if (put_user_ual(raddr, third))
5134                 return -TARGET_EFAULT;
5135             break;
5136         }
5137         case 1:
5138             ret = -TARGET_EINVAL;
5139             break;
5140         }
5141 	break;
5142     case IPCOP_shmdt:
5143         ret = do_shmdt(ptr);
5144 	break;
5145 
5146     case IPCOP_shmget:
5147 	/* IPC_* flag values are the same on all linux platforms */
5148 	ret = get_errno(shmget(first, second, third));
5149 	break;
5150 
5151 	/* IPC_* and SHM_* command values are the same on all linux platforms */
5152     case IPCOP_shmctl:
5153         ret = do_shmctl(first, second, ptr);
5154         break;
5155     default:
5156 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5157 	ret = -TARGET_ENOSYS;
5158 	break;
5159     }
5160     return ret;
5161 }
5162 #endif
5163 
5164 /* kernel structure types definitions */
5165 
5166 #define STRUCT(name, ...) STRUCT_ ## name,
5167 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5168 enum {
5169 #include "syscall_types.h"
5170 STRUCT_MAX
5171 };
5172 #undef STRUCT
5173 #undef STRUCT_SPECIAL
5174 
5175 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
5176 #define STRUCT_SPECIAL(name)
5177 #include "syscall_types.h"
5178 #undef STRUCT
5179 #undef STRUCT_SPECIAL
5180 
5181 typedef struct IOCTLEntry IOCTLEntry;
5182 
5183 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5184                              int fd, int cmd, abi_long arg);
5185 
5186 struct IOCTLEntry {
5187     int target_cmd;
5188     unsigned int host_cmd;
5189     const char *name;
5190     int access;
5191     do_ioctl_fn *do_ioctl;
5192     const argtype arg_type[5];
5193 };
5194 
5195 #define IOC_R 0x0001
5196 #define IOC_W 0x0002
5197 #define IOC_RW (IOC_R | IOC_W)
5198 
5199 #define MAX_STRUCT_SIZE 4096
5200 
5201 #ifdef CONFIG_FIEMAP
5202 /* So fiemap access checks don't overflow on 32 bit systems.
5203  * This is very slightly smaller than the limit imposed by
5204  * the underlying kernel.
5205  */
5206 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
5207                             / sizeof(struct fiemap_extent))
5208 
5209 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5210                                        int fd, int cmd, abi_long arg)
5211 {
5212     /* The parameter for this ioctl is a struct fiemap followed
5213      * by an array of struct fiemap_extent whose size is set
5214      * in fiemap->fm_extent_count. The array is filled in by the
5215      * ioctl.
5216      */
5217     int target_size_in, target_size_out;
5218     struct fiemap *fm;
5219     const argtype *arg_type = ie->arg_type;
5220     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5221     void *argptr, *p;
5222     abi_long ret;
5223     int i, extent_size = thunk_type_size(extent_arg_type, 0);
5224     uint32_t outbufsz;
5225     int free_fm = 0;
5226 
5227     assert(arg_type[0] == TYPE_PTR);
5228     assert(ie->access == IOC_RW);
5229     arg_type++;
5230     target_size_in = thunk_type_size(arg_type, 0);
5231     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5232     if (!argptr) {
5233         return -TARGET_EFAULT;
5234     }
5235     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5236     unlock_user(argptr, arg, 0);
5237     fm = (struct fiemap *)buf_temp;
5238     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5239         return -TARGET_EINVAL;
5240     }
5241 
5242     outbufsz = sizeof (*fm) +
5243         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5244 
5245     if (outbufsz > MAX_STRUCT_SIZE) {
5246         /* We can't fit all the extents into the fixed size buffer.
5247          * Allocate one that is large enough and use it instead.
5248          */
5249         fm = g_try_malloc(outbufsz);
5250         if (!fm) {
5251             return -TARGET_ENOMEM;
5252         }
5253         memcpy(fm, buf_temp, sizeof(struct fiemap));
5254         free_fm = 1;
5255     }
5256     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5257     if (!is_error(ret)) {
5258         target_size_out = target_size_in;
5259         /* An extent_count of 0 means we were only counting the extents
5260          * so there are no structs to copy
5261          */
5262         if (fm->fm_extent_count != 0) {
5263             target_size_out += fm->fm_mapped_extents * extent_size;
5264         }
5265         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5266         if (!argptr) {
5267             ret = -TARGET_EFAULT;
5268         } else {
5269             /* Convert the struct fiemap */
5270             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5271             if (fm->fm_extent_count != 0) {
5272                 p = argptr + target_size_in;
5273                 /* ...and then all the struct fiemap_extents */
5274                 for (i = 0; i < fm->fm_mapped_extents; i++) {
5275                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5276                                   THUNK_TARGET);
5277                     p += extent_size;
5278                 }
5279             }
5280             unlock_user(argptr, arg, target_size_out);
5281         }
5282     }
5283     if (free_fm) {
5284         g_free(fm);
5285     }
5286     return ret;
5287 }
5288 #endif
5289 
5290 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5291                                 int fd, int cmd, abi_long arg)
5292 {
5293     const argtype *arg_type = ie->arg_type;
5294     int target_size;
5295     void *argptr;
5296     int ret;
5297     struct ifconf *host_ifconf;
5298     uint32_t outbufsz;
5299     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5300     int target_ifreq_size;
5301     int nb_ifreq;
5302     int free_buf = 0;
5303     int i;
5304     int target_ifc_len;
5305     abi_long target_ifc_buf;
5306     int host_ifc_len;
5307     char *host_ifc_buf;
5308 
5309     assert(arg_type[0] == TYPE_PTR);
5310     assert(ie->access == IOC_RW);
5311 
5312     arg_type++;
5313     target_size = thunk_type_size(arg_type, 0);
5314 
5315     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5316     if (!argptr)
5317         return -TARGET_EFAULT;
5318     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5319     unlock_user(argptr, arg, 0);
5320 
5321     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5322     target_ifc_len = host_ifconf->ifc_len;
5323     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5324 
5325     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5326     nb_ifreq = target_ifc_len / target_ifreq_size;
5327     host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5328 
5329     outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5330     if (outbufsz > MAX_STRUCT_SIZE) {
5331         /* We can't fit all the extents into the fixed size buffer.
5332          * Allocate one that is large enough and use it instead.
5333          */
5334         host_ifconf = malloc(outbufsz);
5335         if (!host_ifconf) {
5336             return -TARGET_ENOMEM;
5337         }
5338         memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5339         free_buf = 1;
5340     }
5341     host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5342 
5343     host_ifconf->ifc_len = host_ifc_len;
5344     host_ifconf->ifc_buf = host_ifc_buf;
5345 
5346     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5347     if (!is_error(ret)) {
5348 	/* convert host ifc_len to target ifc_len */
5349 
5350         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5351         target_ifc_len = nb_ifreq * target_ifreq_size;
5352         host_ifconf->ifc_len = target_ifc_len;
5353 
5354 	/* restore target ifc_buf */
5355 
5356         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5357 
5358 	/* copy struct ifconf to target user */
5359 
5360         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5361         if (!argptr)
5362             return -TARGET_EFAULT;
5363         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5364         unlock_user(argptr, arg, target_size);
5365 
5366 	/* copy ifreq[] to target user */
5367 
5368         argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5369         for (i = 0; i < nb_ifreq ; i++) {
5370             thunk_convert(argptr + i * target_ifreq_size,
5371                           host_ifc_buf + i * sizeof(struct ifreq),
5372                           ifreq_arg_type, THUNK_TARGET);
5373         }
5374         unlock_user(argptr, target_ifc_buf, target_ifc_len);
5375     }
5376 
5377     if (free_buf) {
5378         free(host_ifconf);
5379     }
5380 
5381     return ret;
5382 }
5383 
5384 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5385                             int cmd, abi_long arg)
5386 {
5387     void *argptr;
5388     struct dm_ioctl *host_dm;
5389     abi_long guest_data;
5390     uint32_t guest_data_size;
5391     int target_size;
5392     const argtype *arg_type = ie->arg_type;
5393     abi_long ret;
5394     void *big_buf = NULL;
5395     char *host_data;
5396 
5397     arg_type++;
5398     target_size = thunk_type_size(arg_type, 0);
5399     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5400     if (!argptr) {
5401         ret = -TARGET_EFAULT;
5402         goto out;
5403     }
5404     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5405     unlock_user(argptr, arg, 0);
5406 
5407     /* buf_temp is too small, so fetch things into a bigger buffer */
5408     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5409     memcpy(big_buf, buf_temp, target_size);
5410     buf_temp = big_buf;
5411     host_dm = big_buf;
5412 
5413     guest_data = arg + host_dm->data_start;
5414     if ((guest_data - arg) < 0) {
5415         ret = -TARGET_EINVAL;
5416         goto out;
5417     }
5418     guest_data_size = host_dm->data_size - host_dm->data_start;
5419     host_data = (char*)host_dm + host_dm->data_start;
5420 
5421     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5422     if (!argptr) {
5423         ret = -TARGET_EFAULT;
5424         goto out;
5425     }
5426 
5427     switch (ie->host_cmd) {
5428     case DM_REMOVE_ALL:
5429     case DM_LIST_DEVICES:
5430     case DM_DEV_CREATE:
5431     case DM_DEV_REMOVE:
5432     case DM_DEV_SUSPEND:
5433     case DM_DEV_STATUS:
5434     case DM_DEV_WAIT:
5435     case DM_TABLE_STATUS:
5436     case DM_TABLE_CLEAR:
5437     case DM_TABLE_DEPS:
5438     case DM_LIST_VERSIONS:
5439         /* no input data */
5440         break;
5441     case DM_DEV_RENAME:
5442     case DM_DEV_SET_GEOMETRY:
5443         /* data contains only strings */
5444         memcpy(host_data, argptr, guest_data_size);
5445         break;
5446     case DM_TARGET_MSG:
5447         memcpy(host_data, argptr, guest_data_size);
5448         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5449         break;
5450     case DM_TABLE_LOAD:
5451     {
5452         void *gspec = argptr;
5453         void *cur_data = host_data;
5454         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5455         int spec_size = thunk_type_size(arg_type, 0);
5456         int i;
5457 
5458         for (i = 0; i < host_dm->target_count; i++) {
5459             struct dm_target_spec *spec = cur_data;
5460             uint32_t next;
5461             int slen;
5462 
5463             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5464             slen = strlen((char*)gspec + spec_size) + 1;
5465             next = spec->next;
5466             spec->next = sizeof(*spec) + slen;
5467             strcpy((char*)&spec[1], gspec + spec_size);
5468             gspec += next;
5469             cur_data += spec->next;
5470         }
5471         break;
5472     }
5473     default:
5474         ret = -TARGET_EINVAL;
5475         unlock_user(argptr, guest_data, 0);
5476         goto out;
5477     }
5478     unlock_user(argptr, guest_data, 0);
5479 
5480     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5481     if (!is_error(ret)) {
5482         guest_data = arg + host_dm->data_start;
5483         guest_data_size = host_dm->data_size - host_dm->data_start;
5484         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5485         switch (ie->host_cmd) {
5486         case DM_REMOVE_ALL:
5487         case DM_DEV_CREATE:
5488         case DM_DEV_REMOVE:
5489         case DM_DEV_RENAME:
5490         case DM_DEV_SUSPEND:
5491         case DM_DEV_STATUS:
5492         case DM_TABLE_LOAD:
5493         case DM_TABLE_CLEAR:
5494         case DM_TARGET_MSG:
5495         case DM_DEV_SET_GEOMETRY:
5496             /* no return data */
5497             break;
5498         case DM_LIST_DEVICES:
5499         {
5500             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5501             uint32_t remaining_data = guest_data_size;
5502             void *cur_data = argptr;
5503             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5504             int nl_size = 12; /* can't use thunk_size due to alignment */
5505 
5506             while (1) {
5507                 uint32_t next = nl->next;
5508                 if (next) {
5509                     nl->next = nl_size + (strlen(nl->name) + 1);
5510                 }
5511                 if (remaining_data < nl->next) {
5512                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5513                     break;
5514                 }
5515                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5516                 strcpy(cur_data + nl_size, nl->name);
5517                 cur_data += nl->next;
5518                 remaining_data -= nl->next;
5519                 if (!next) {
5520                     break;
5521                 }
5522                 nl = (void*)nl + next;
5523             }
5524             break;
5525         }
5526         case DM_DEV_WAIT:
5527         case DM_TABLE_STATUS:
5528         {
5529             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5530             void *cur_data = argptr;
5531             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5532             int spec_size = thunk_type_size(arg_type, 0);
5533             int i;
5534 
5535             for (i = 0; i < host_dm->target_count; i++) {
5536                 uint32_t next = spec->next;
5537                 int slen = strlen((char*)&spec[1]) + 1;
5538                 spec->next = (cur_data - argptr) + spec_size + slen;
5539                 if (guest_data_size < spec->next) {
5540                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5541                     break;
5542                 }
5543                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5544                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5545                 cur_data = argptr + spec->next;
5546                 spec = (void*)host_dm + host_dm->data_start + next;
5547             }
5548             break;
5549         }
5550         case DM_TABLE_DEPS:
5551         {
5552             void *hdata = (void*)host_dm + host_dm->data_start;
5553             int count = *(uint32_t*)hdata;
5554             uint64_t *hdev = hdata + 8;
5555             uint64_t *gdev = argptr + 8;
5556             int i;
5557 
5558             *(uint32_t*)argptr = tswap32(count);
5559             for (i = 0; i < count; i++) {
5560                 *gdev = tswap64(*hdev);
5561                 gdev++;
5562                 hdev++;
5563             }
5564             break;
5565         }
5566         case DM_LIST_VERSIONS:
5567         {
5568             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5569             uint32_t remaining_data = guest_data_size;
5570             void *cur_data = argptr;
5571             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5572             int vers_size = thunk_type_size(arg_type, 0);
5573 
5574             while (1) {
5575                 uint32_t next = vers->next;
5576                 if (next) {
5577                     vers->next = vers_size + (strlen(vers->name) + 1);
5578                 }
5579                 if (remaining_data < vers->next) {
5580                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5581                     break;
5582                 }
5583                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5584                 strcpy(cur_data + vers_size, vers->name);
5585                 cur_data += vers->next;
5586                 remaining_data -= vers->next;
5587                 if (!next) {
5588                     break;
5589                 }
5590                 vers = (void*)vers + next;
5591             }
5592             break;
5593         }
5594         default:
5595             unlock_user(argptr, guest_data, 0);
5596             ret = -TARGET_EINVAL;
5597             goto out;
5598         }
5599         unlock_user(argptr, guest_data, guest_data_size);
5600 
5601         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5602         if (!argptr) {
5603             ret = -TARGET_EFAULT;
5604             goto out;
5605         }
5606         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5607         unlock_user(argptr, arg, target_size);
5608     }
5609 out:
5610     g_free(big_buf);
5611     return ret;
5612 }
5613 
5614 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5615                                int cmd, abi_long arg)
5616 {
5617     void *argptr;
5618     int target_size;
5619     const argtype *arg_type = ie->arg_type;
5620     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5621     abi_long ret;
5622 
5623     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5624     struct blkpg_partition host_part;
5625 
5626     /* Read and convert blkpg */
5627     arg_type++;
5628     target_size = thunk_type_size(arg_type, 0);
5629     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5630     if (!argptr) {
5631         ret = -TARGET_EFAULT;
5632         goto out;
5633     }
5634     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5635     unlock_user(argptr, arg, 0);
5636 
5637     switch (host_blkpg->op) {
5638     case BLKPG_ADD_PARTITION:
5639     case BLKPG_DEL_PARTITION:
5640         /* payload is struct blkpg_partition */
5641         break;
5642     default:
5643         /* Unknown opcode */
5644         ret = -TARGET_EINVAL;
5645         goto out;
5646     }
5647 
5648     /* Read and convert blkpg->data */
5649     arg = (abi_long)(uintptr_t)host_blkpg->data;
5650     target_size = thunk_type_size(part_arg_type, 0);
5651     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5652     if (!argptr) {
5653         ret = -TARGET_EFAULT;
5654         goto out;
5655     }
5656     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5657     unlock_user(argptr, arg, 0);
5658 
5659     /* Swizzle the data pointer to our local copy and call! */
5660     host_blkpg->data = &host_part;
5661     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5662 
5663 out:
5664     return ret;
5665 }
5666 
5667 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5668                                 int fd, int cmd, abi_long arg)
5669 {
5670     const argtype *arg_type = ie->arg_type;
5671     const StructEntry *se;
5672     const argtype *field_types;
5673     const int *dst_offsets, *src_offsets;
5674     int target_size;
5675     void *argptr;
5676     abi_ulong *target_rt_dev_ptr;
5677     unsigned long *host_rt_dev_ptr;
5678     abi_long ret;
5679     int i;
5680 
5681     assert(ie->access == IOC_W);
5682     assert(*arg_type == TYPE_PTR);
5683     arg_type++;
5684     assert(*arg_type == TYPE_STRUCT);
5685     target_size = thunk_type_size(arg_type, 0);
5686     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5687     if (!argptr) {
5688         return -TARGET_EFAULT;
5689     }
5690     arg_type++;
5691     assert(*arg_type == (int)STRUCT_rtentry);
5692     se = struct_entries + *arg_type++;
5693     assert(se->convert[0] == NULL);
5694     /* convert struct here to be able to catch rt_dev string */
5695     field_types = se->field_types;
5696     dst_offsets = se->field_offsets[THUNK_HOST];
5697     src_offsets = se->field_offsets[THUNK_TARGET];
5698     for (i = 0; i < se->nb_fields; i++) {
5699         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5700             assert(*field_types == TYPE_PTRVOID);
5701             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5702             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5703             if (*target_rt_dev_ptr != 0) {
5704                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5705                                                   tswapal(*target_rt_dev_ptr));
5706                 if (!*host_rt_dev_ptr) {
5707                     unlock_user(argptr, arg, 0);
5708                     return -TARGET_EFAULT;
5709                 }
5710             } else {
5711                 *host_rt_dev_ptr = 0;
5712             }
5713             field_types++;
5714             continue;
5715         }
5716         field_types = thunk_convert(buf_temp + dst_offsets[i],
5717                                     argptr + src_offsets[i],
5718                                     field_types, THUNK_HOST);
5719     }
5720     unlock_user(argptr, arg, 0);
5721 
5722     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5723     if (*host_rt_dev_ptr != 0) {
5724         unlock_user((void *)*host_rt_dev_ptr,
5725                     *target_rt_dev_ptr, 0);
5726     }
5727     return ret;
5728 }
5729 
5730 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5731                                      int fd, int cmd, abi_long arg)
5732 {
5733     int sig = target_to_host_signal(arg);
5734     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5735 }
5736 
5737 #ifdef TIOCGPTPEER
5738 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5739                                      int fd, int cmd, abi_long arg)
5740 {
5741     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5742     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5743 }
5744 #endif
5745 
5746 static IOCTLEntry ioctl_entries[] = {
5747 #define IOCTL(cmd, access, ...) \
5748     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5749 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5750     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5751 #define IOCTL_IGNORE(cmd) \
5752     { TARGET_ ## cmd, 0, #cmd },
5753 #include "ioctls.h"
5754     { 0, 0, },
5755 };
5756 
5757 /* ??? Implement proper locking for ioctls.  */
5758 /* do_ioctl() Must return target values and target errnos. */
5759 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5760 {
5761     const IOCTLEntry *ie;
5762     const argtype *arg_type;
5763     abi_long ret;
5764     uint8_t buf_temp[MAX_STRUCT_SIZE];
5765     int target_size;
5766     void *argptr;
5767 
5768     ie = ioctl_entries;
5769     for(;;) {
5770         if (ie->target_cmd == 0) {
5771             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5772             return -TARGET_ENOSYS;
5773         }
5774         if (ie->target_cmd == cmd)
5775             break;
5776         ie++;
5777     }
5778     arg_type = ie->arg_type;
5779 #if defined(DEBUG)
5780     gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5781 #endif
5782     if (ie->do_ioctl) {
5783         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5784     } else if (!ie->host_cmd) {
5785         /* Some architectures define BSD ioctls in their headers
5786            that are not implemented in Linux.  */
5787         return -TARGET_ENOSYS;
5788     }
5789 
5790     switch(arg_type[0]) {
5791     case TYPE_NULL:
5792         /* no argument */
5793         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5794         break;
5795     case TYPE_PTRVOID:
5796     case TYPE_INT:
5797         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5798         break;
5799     case TYPE_PTR:
5800         arg_type++;
5801         target_size = thunk_type_size(arg_type, 0);
5802         switch(ie->access) {
5803         case IOC_R:
5804             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5805             if (!is_error(ret)) {
5806                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5807                 if (!argptr)
5808                     return -TARGET_EFAULT;
5809                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5810                 unlock_user(argptr, arg, target_size);
5811             }
5812             break;
5813         case IOC_W:
5814             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5815             if (!argptr)
5816                 return -TARGET_EFAULT;
5817             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5818             unlock_user(argptr, arg, 0);
5819             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5820             break;
5821         default:
5822         case IOC_RW:
5823             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5824             if (!argptr)
5825                 return -TARGET_EFAULT;
5826             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5827             unlock_user(argptr, arg, 0);
5828             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5829             if (!is_error(ret)) {
5830                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5831                 if (!argptr)
5832                     return -TARGET_EFAULT;
5833                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5834                 unlock_user(argptr, arg, target_size);
5835             }
5836             break;
5837         }
5838         break;
5839     default:
5840         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5841                  (long)cmd, arg_type[0]);
5842         ret = -TARGET_ENOSYS;
5843         break;
5844     }
5845     return ret;
5846 }
5847 
5848 static const bitmask_transtbl iflag_tbl[] = {
5849         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5850         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5851         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5852         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5853         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5854         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5855         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5856         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5857         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5858         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5859         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5860         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5861         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5862         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5863         { 0, 0, 0, 0 }
5864 };
5865 
5866 static const bitmask_transtbl oflag_tbl[] = {
5867 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5868 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5869 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5870 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5871 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5872 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5873 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5874 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5875 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5876 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5877 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5878 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5879 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5880 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5881 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5882 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5883 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5884 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5885 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5886 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5887 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5888 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5889 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5890 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5891 	{ 0, 0, 0, 0 }
5892 };
5893 
5894 static const bitmask_transtbl cflag_tbl[] = {
5895 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5896 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5897 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5898 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5899 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5900 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5901 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5902 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5903 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5904 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5905 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5906 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5907 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5908 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5909 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5910 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5911 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5912 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5913 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5914 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5915 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5916 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5917 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5918 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5919 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5920 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5921 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5922 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5923 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5924 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5925 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5926 	{ 0, 0, 0, 0 }
5927 };
5928 
5929 static const bitmask_transtbl lflag_tbl[] = {
5930 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5931 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5932 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5933 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5934 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5935 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5936 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5937 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5938 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5939 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5940 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5941 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5942 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5943 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5944 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5945 	{ 0, 0, 0, 0 }
5946 };
5947 
5948 static void target_to_host_termios (void *dst, const void *src)
5949 {
5950     struct host_termios *host = dst;
5951     const struct target_termios *target = src;
5952 
5953     host->c_iflag =
5954         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5955     host->c_oflag =
5956         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5957     host->c_cflag =
5958         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5959     host->c_lflag =
5960         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5961     host->c_line = target->c_line;
5962 
5963     memset(host->c_cc, 0, sizeof(host->c_cc));
5964     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5965     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5966     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5967     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5968     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5969     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5970     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5971     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5972     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5973     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5974     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5975     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5976     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5977     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5978     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5979     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5980     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5981 }
5982 
5983 static void host_to_target_termios (void *dst, const void *src)
5984 {
5985     struct target_termios *target = dst;
5986     const struct host_termios *host = src;
5987 
5988     target->c_iflag =
5989         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5990     target->c_oflag =
5991         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5992     target->c_cflag =
5993         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5994     target->c_lflag =
5995         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5996     target->c_line = host->c_line;
5997 
5998     memset(target->c_cc, 0, sizeof(target->c_cc));
5999     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6000     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6001     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6002     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6003     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6004     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6005     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6006     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6007     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6008     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6009     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6010     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6011     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6012     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6013     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6014     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6015     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6016 }
6017 
6018 static const StructEntry struct_termios_def = {
6019     .convert = { host_to_target_termios, target_to_host_termios },
6020     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6021     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6022 };
6023 
6024 static bitmask_transtbl mmap_flags_tbl[] = {
6025     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6026     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6027     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6028     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6029       MAP_ANONYMOUS, MAP_ANONYMOUS },
6030     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6031       MAP_GROWSDOWN, MAP_GROWSDOWN },
6032     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6033       MAP_DENYWRITE, MAP_DENYWRITE },
6034     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6035       MAP_EXECUTABLE, MAP_EXECUTABLE },
6036     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6037     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6038       MAP_NORESERVE, MAP_NORESERVE },
6039     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6040     /* MAP_STACK had been ignored by the kernel for quite some time.
6041        Recognize it for the target insofar as we do not want to pass
6042        it through to the host.  */
6043     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6044     { 0, 0, 0, 0 }
6045 };
6046 
6047 #if defined(TARGET_I386)
6048 
6049 /* NOTE: there is really one LDT for all the threads */
6050 static uint8_t *ldt_table;
6051 
6052 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6053 {
6054     int size;
6055     void *p;
6056 
6057     if (!ldt_table)
6058         return 0;
6059     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6060     if (size > bytecount)
6061         size = bytecount;
6062     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6063     if (!p)
6064         return -TARGET_EFAULT;
6065     /* ??? Should this by byteswapped?  */
6066     memcpy(p, ldt_table, size);
6067     unlock_user(p, ptr, size);
6068     return size;
6069 }
6070 
6071 /* XXX: add locking support */
6072 static abi_long write_ldt(CPUX86State *env,
6073                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6074 {
6075     struct target_modify_ldt_ldt_s ldt_info;
6076     struct target_modify_ldt_ldt_s *target_ldt_info;
6077     int seg_32bit, contents, read_exec_only, limit_in_pages;
6078     int seg_not_present, useable, lm;
6079     uint32_t *lp, entry_1, entry_2;
6080 
6081     if (bytecount != sizeof(ldt_info))
6082         return -TARGET_EINVAL;
6083     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6084         return -TARGET_EFAULT;
6085     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6086     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6087     ldt_info.limit = tswap32(target_ldt_info->limit);
6088     ldt_info.flags = tswap32(target_ldt_info->flags);
6089     unlock_user_struct(target_ldt_info, ptr, 0);
6090 
6091     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6092         return -TARGET_EINVAL;
6093     seg_32bit = ldt_info.flags & 1;
6094     contents = (ldt_info.flags >> 1) & 3;
6095     read_exec_only = (ldt_info.flags >> 3) & 1;
6096     limit_in_pages = (ldt_info.flags >> 4) & 1;
6097     seg_not_present = (ldt_info.flags >> 5) & 1;
6098     useable = (ldt_info.flags >> 6) & 1;
6099 #ifdef TARGET_ABI32
6100     lm = 0;
6101 #else
6102     lm = (ldt_info.flags >> 7) & 1;
6103 #endif
6104     if (contents == 3) {
6105         if (oldmode)
6106             return -TARGET_EINVAL;
6107         if (seg_not_present == 0)
6108             return -TARGET_EINVAL;
6109     }
6110     /* allocate the LDT */
6111     if (!ldt_table) {
6112         env->ldt.base = target_mmap(0,
6113                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6114                                     PROT_READ|PROT_WRITE,
6115                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6116         if (env->ldt.base == -1)
6117             return -TARGET_ENOMEM;
6118         memset(g2h(env->ldt.base), 0,
6119                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6120         env->ldt.limit = 0xffff;
6121         ldt_table = g2h(env->ldt.base);
6122     }
6123 
6124     /* NOTE: same code as Linux kernel */
6125     /* Allow LDTs to be cleared by the user. */
6126     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6127         if (oldmode ||
6128             (contents == 0		&&
6129              read_exec_only == 1	&&
6130              seg_32bit == 0		&&
6131              limit_in_pages == 0	&&
6132              seg_not_present == 1	&&
6133              useable == 0 )) {
6134             entry_1 = 0;
6135             entry_2 = 0;
6136             goto install;
6137         }
6138     }
6139 
6140     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6141         (ldt_info.limit & 0x0ffff);
6142     entry_2 = (ldt_info.base_addr & 0xff000000) |
6143         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6144         (ldt_info.limit & 0xf0000) |
6145         ((read_exec_only ^ 1) << 9) |
6146         (contents << 10) |
6147         ((seg_not_present ^ 1) << 15) |
6148         (seg_32bit << 22) |
6149         (limit_in_pages << 23) |
6150         (lm << 21) |
6151         0x7000;
6152     if (!oldmode)
6153         entry_2 |= (useable << 20);
6154 
6155     /* Install the new entry ...  */
6156 install:
6157     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6158     lp[0] = tswap32(entry_1);
6159     lp[1] = tswap32(entry_2);
6160     return 0;
6161 }
6162 
6163 /* specific and weird i386 syscalls */
6164 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6165                               unsigned long bytecount)
6166 {
6167     abi_long ret;
6168 
6169     switch (func) {
6170     case 0:
6171         ret = read_ldt(ptr, bytecount);
6172         break;
6173     case 1:
6174         ret = write_ldt(env, ptr, bytecount, 1);
6175         break;
6176     case 0x11:
6177         ret = write_ldt(env, ptr, bytecount, 0);
6178         break;
6179     default:
6180         ret = -TARGET_ENOSYS;
6181         break;
6182     }
6183     return ret;
6184 }
6185 
6186 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6187 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6188 {
6189     uint64_t *gdt_table = g2h(env->gdt.base);
6190     struct target_modify_ldt_ldt_s ldt_info;
6191     struct target_modify_ldt_ldt_s *target_ldt_info;
6192     int seg_32bit, contents, read_exec_only, limit_in_pages;
6193     int seg_not_present, useable, lm;
6194     uint32_t *lp, entry_1, entry_2;
6195     int i;
6196 
6197     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6198     if (!target_ldt_info)
6199         return -TARGET_EFAULT;
6200     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6201     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6202     ldt_info.limit = tswap32(target_ldt_info->limit);
6203     ldt_info.flags = tswap32(target_ldt_info->flags);
6204     if (ldt_info.entry_number == -1) {
6205         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6206             if (gdt_table[i] == 0) {
6207                 ldt_info.entry_number = i;
6208                 target_ldt_info->entry_number = tswap32(i);
6209                 break;
6210             }
6211         }
6212     }
6213     unlock_user_struct(target_ldt_info, ptr, 1);
6214 
6215     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6216         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6217            return -TARGET_EINVAL;
6218     seg_32bit = ldt_info.flags & 1;
6219     contents = (ldt_info.flags >> 1) & 3;
6220     read_exec_only = (ldt_info.flags >> 3) & 1;
6221     limit_in_pages = (ldt_info.flags >> 4) & 1;
6222     seg_not_present = (ldt_info.flags >> 5) & 1;
6223     useable = (ldt_info.flags >> 6) & 1;
6224 #ifdef TARGET_ABI32
6225     lm = 0;
6226 #else
6227     lm = (ldt_info.flags >> 7) & 1;
6228 #endif
6229 
6230     if (contents == 3) {
6231         if (seg_not_present == 0)
6232             return -TARGET_EINVAL;
6233     }
6234 
6235     /* NOTE: same code as Linux kernel */
6236     /* Allow LDTs to be cleared by the user. */
6237     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6238         if ((contents == 0             &&
6239              read_exec_only == 1       &&
6240              seg_32bit == 0            &&
6241              limit_in_pages == 0       &&
6242              seg_not_present == 1      &&
6243              useable == 0 )) {
6244             entry_1 = 0;
6245             entry_2 = 0;
6246             goto install;
6247         }
6248     }
6249 
6250     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6251         (ldt_info.limit & 0x0ffff);
6252     entry_2 = (ldt_info.base_addr & 0xff000000) |
6253         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6254         (ldt_info.limit & 0xf0000) |
6255         ((read_exec_only ^ 1) << 9) |
6256         (contents << 10) |
6257         ((seg_not_present ^ 1) << 15) |
6258         (seg_32bit << 22) |
6259         (limit_in_pages << 23) |
6260         (useable << 20) |
6261         (lm << 21) |
6262         0x7000;
6263 
6264     /* Install the new entry ...  */
6265 install:
6266     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6267     lp[0] = tswap32(entry_1);
6268     lp[1] = tswap32(entry_2);
6269     return 0;
6270 }
6271 
6272 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6273 {
6274     struct target_modify_ldt_ldt_s *target_ldt_info;
6275     uint64_t *gdt_table = g2h(env->gdt.base);
6276     uint32_t base_addr, limit, flags;
6277     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6278     int seg_not_present, useable, lm;
6279     uint32_t *lp, entry_1, entry_2;
6280 
6281     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6282     if (!target_ldt_info)
6283         return -TARGET_EFAULT;
6284     idx = tswap32(target_ldt_info->entry_number);
6285     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6286         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6287         unlock_user_struct(target_ldt_info, ptr, 1);
6288         return -TARGET_EINVAL;
6289     }
6290     lp = (uint32_t *)(gdt_table + idx);
6291     entry_1 = tswap32(lp[0]);
6292     entry_2 = tswap32(lp[1]);
6293 
6294     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6295     contents = (entry_2 >> 10) & 3;
6296     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6297     seg_32bit = (entry_2 >> 22) & 1;
6298     limit_in_pages = (entry_2 >> 23) & 1;
6299     useable = (entry_2 >> 20) & 1;
6300 #ifdef TARGET_ABI32
6301     lm = 0;
6302 #else
6303     lm = (entry_2 >> 21) & 1;
6304 #endif
6305     flags = (seg_32bit << 0) | (contents << 1) |
6306         (read_exec_only << 3) | (limit_in_pages << 4) |
6307         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6308     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6309     base_addr = (entry_1 >> 16) |
6310         (entry_2 & 0xff000000) |
6311         ((entry_2 & 0xff) << 16);
6312     target_ldt_info->base_addr = tswapal(base_addr);
6313     target_ldt_info->limit = tswap32(limit);
6314     target_ldt_info->flags = tswap32(flags);
6315     unlock_user_struct(target_ldt_info, ptr, 1);
6316     return 0;
6317 }
6318 #endif /* TARGET_I386 && TARGET_ABI32 */
6319 
6320 #ifndef TARGET_ABI32
6321 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6322 {
6323     abi_long ret = 0;
6324     abi_ulong val;
6325     int idx;
6326 
6327     switch(code) {
6328     case TARGET_ARCH_SET_GS:
6329     case TARGET_ARCH_SET_FS:
6330         if (code == TARGET_ARCH_SET_GS)
6331             idx = R_GS;
6332         else
6333             idx = R_FS;
6334         cpu_x86_load_seg(env, idx, 0);
6335         env->segs[idx].base = addr;
6336         break;
6337     case TARGET_ARCH_GET_GS:
6338     case TARGET_ARCH_GET_FS:
6339         if (code == TARGET_ARCH_GET_GS)
6340             idx = R_GS;
6341         else
6342             idx = R_FS;
6343         val = env->segs[idx].base;
6344         if (put_user(val, addr, abi_ulong))
6345             ret = -TARGET_EFAULT;
6346         break;
6347     default:
6348         ret = -TARGET_EINVAL;
6349         break;
6350     }
6351     return ret;
6352 }
6353 #endif
6354 
6355 #endif /* defined(TARGET_I386) */
6356 
6357 #define NEW_STACK_SIZE 0x40000
6358 
6359 
6360 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6361 typedef struct {
6362     CPUArchState *env;
6363     pthread_mutex_t mutex;
6364     pthread_cond_t cond;
6365     pthread_t thread;
6366     uint32_t tid;
6367     abi_ulong child_tidptr;
6368     abi_ulong parent_tidptr;
6369     sigset_t sigmask;
6370 } new_thread_info;
6371 
6372 static void *clone_func(void *arg)
6373 {
6374     new_thread_info *info = arg;
6375     CPUArchState *env;
6376     CPUState *cpu;
6377     TaskState *ts;
6378 
6379     rcu_register_thread();
6380     tcg_register_thread();
6381     env = info->env;
6382     cpu = ENV_GET_CPU(env);
6383     thread_cpu = cpu;
6384     ts = (TaskState *)cpu->opaque;
6385     info->tid = gettid();
6386     task_settid(ts);
6387     if (info->child_tidptr)
6388         put_user_u32(info->tid, info->child_tidptr);
6389     if (info->parent_tidptr)
6390         put_user_u32(info->tid, info->parent_tidptr);
6391     /* Enable signals.  */
6392     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6393     /* Signal to the parent that we're ready.  */
6394     pthread_mutex_lock(&info->mutex);
6395     pthread_cond_broadcast(&info->cond);
6396     pthread_mutex_unlock(&info->mutex);
6397     /* Wait until the parent has finished initializing the tls state.  */
6398     pthread_mutex_lock(&clone_lock);
6399     pthread_mutex_unlock(&clone_lock);
6400     cpu_loop(env);
6401     /* never exits */
6402     return NULL;
6403 }
6404 
6405 /* do_fork() Must return host values and target errnos (unlike most
6406    do_*() functions). */
6407 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6408                    abi_ulong parent_tidptr, target_ulong newtls,
6409                    abi_ulong child_tidptr)
6410 {
6411     CPUState *cpu = ENV_GET_CPU(env);
6412     int ret;
6413     TaskState *ts;
6414     CPUState *new_cpu;
6415     CPUArchState *new_env;
6416     sigset_t sigmask;
6417 
6418     flags &= ~CLONE_IGNORED_FLAGS;
6419 
6420     /* Emulate vfork() with fork() */
6421     if (flags & CLONE_VFORK)
6422         flags &= ~(CLONE_VFORK | CLONE_VM);
6423 
6424     if (flags & CLONE_VM) {
6425         TaskState *parent_ts = (TaskState *)cpu->opaque;
6426         new_thread_info info;
6427         pthread_attr_t attr;
6428 
6429         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6430             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6431             return -TARGET_EINVAL;
6432         }
6433 
6434         ts = g_new0(TaskState, 1);
6435         init_task_state(ts);
6436 
6437         /* Grab a mutex so that thread setup appears atomic.  */
6438         pthread_mutex_lock(&clone_lock);
6439 
6440         /* we create a new CPU instance. */
6441         new_env = cpu_copy(env);
6442         /* Init regs that differ from the parent.  */
6443         cpu_clone_regs(new_env, newsp);
6444         new_cpu = ENV_GET_CPU(new_env);
6445         new_cpu->opaque = ts;
6446         ts->bprm = parent_ts->bprm;
6447         ts->info = parent_ts->info;
6448         ts->signal_mask = parent_ts->signal_mask;
6449 
6450         if (flags & CLONE_CHILD_CLEARTID) {
6451             ts->child_tidptr = child_tidptr;
6452         }
6453 
6454         if (flags & CLONE_SETTLS) {
6455             cpu_set_tls (new_env, newtls);
6456         }
6457 
6458         memset(&info, 0, sizeof(info));
6459         pthread_mutex_init(&info.mutex, NULL);
6460         pthread_mutex_lock(&info.mutex);
6461         pthread_cond_init(&info.cond, NULL);
6462         info.env = new_env;
6463         if (flags & CLONE_CHILD_SETTID) {
6464             info.child_tidptr = child_tidptr;
6465         }
6466         if (flags & CLONE_PARENT_SETTID) {
6467             info.parent_tidptr = parent_tidptr;
6468         }
6469 
6470         ret = pthread_attr_init(&attr);
6471         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6472         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6473         /* It is not safe to deliver signals until the child has finished
6474            initializing, so temporarily block all signals.  */
6475         sigfillset(&sigmask);
6476         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6477 
6478         /* If this is our first additional thread, we need to ensure we
6479          * generate code for parallel execution and flush old translations.
6480          */
6481         if (!parallel_cpus) {
6482             parallel_cpus = true;
6483             tb_flush(cpu);
6484         }
6485 
6486         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6487         /* TODO: Free new CPU state if thread creation failed.  */
6488 
6489         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6490         pthread_attr_destroy(&attr);
6491         if (ret == 0) {
6492             /* Wait for the child to initialize.  */
6493             pthread_cond_wait(&info.cond, &info.mutex);
6494             ret = info.tid;
6495         } else {
6496             ret = -1;
6497         }
6498         pthread_mutex_unlock(&info.mutex);
6499         pthread_cond_destroy(&info.cond);
6500         pthread_mutex_destroy(&info.mutex);
6501         pthread_mutex_unlock(&clone_lock);
6502     } else {
6503         /* if no CLONE_VM, we consider it is a fork */
6504         if (flags & CLONE_INVALID_FORK_FLAGS) {
6505             return -TARGET_EINVAL;
6506         }
6507 
6508         /* We can't support custom termination signals */
6509         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6510             return -TARGET_EINVAL;
6511         }
6512 
6513         if (block_signals()) {
6514             return -TARGET_ERESTARTSYS;
6515         }
6516 
6517         fork_start();
6518         ret = fork();
6519         if (ret == 0) {
6520             /* Child Process.  */
6521             cpu_clone_regs(env, newsp);
6522             fork_end(1);
6523             /* There is a race condition here.  The parent process could
6524                theoretically read the TID in the child process before the child
6525                tid is set.  This would require using either ptrace
6526                (not implemented) or having *_tidptr to point at a shared memory
6527                mapping.  We can't repeat the spinlock hack used above because
6528                the child process gets its own copy of the lock.  */
6529             if (flags & CLONE_CHILD_SETTID)
6530                 put_user_u32(gettid(), child_tidptr);
6531             if (flags & CLONE_PARENT_SETTID)
6532                 put_user_u32(gettid(), parent_tidptr);
6533             ts = (TaskState *)cpu->opaque;
6534             if (flags & CLONE_SETTLS)
6535                 cpu_set_tls (env, newtls);
6536             if (flags & CLONE_CHILD_CLEARTID)
6537                 ts->child_tidptr = child_tidptr;
6538         } else {
6539             fork_end(0);
6540         }
6541     }
6542     return ret;
6543 }
6544 
6545 /* warning : doesn't handle linux specific flags... */
6546 static int target_to_host_fcntl_cmd(int cmd)
6547 {
6548     switch(cmd) {
6549 	case TARGET_F_DUPFD:
6550 	case TARGET_F_GETFD:
6551 	case TARGET_F_SETFD:
6552 	case TARGET_F_GETFL:
6553 	case TARGET_F_SETFL:
6554             return cmd;
6555         case TARGET_F_GETLK:
6556             return F_GETLK64;
6557         case TARGET_F_SETLK:
6558             return F_SETLK64;
6559         case TARGET_F_SETLKW:
6560             return F_SETLKW64;
6561 	case TARGET_F_GETOWN:
6562 	    return F_GETOWN;
6563 	case TARGET_F_SETOWN:
6564 	    return F_SETOWN;
6565 	case TARGET_F_GETSIG:
6566 	    return F_GETSIG;
6567 	case TARGET_F_SETSIG:
6568 	    return F_SETSIG;
6569 #if TARGET_ABI_BITS == 32
6570         case TARGET_F_GETLK64:
6571 	    return F_GETLK64;
6572 	case TARGET_F_SETLK64:
6573 	    return F_SETLK64;
6574 	case TARGET_F_SETLKW64:
6575 	    return F_SETLKW64;
6576 #endif
6577         case TARGET_F_SETLEASE:
6578             return F_SETLEASE;
6579         case TARGET_F_GETLEASE:
6580             return F_GETLEASE;
6581 #ifdef F_DUPFD_CLOEXEC
6582         case TARGET_F_DUPFD_CLOEXEC:
6583             return F_DUPFD_CLOEXEC;
6584 #endif
6585         case TARGET_F_NOTIFY:
6586             return F_NOTIFY;
6587 #ifdef F_GETOWN_EX
6588 	case TARGET_F_GETOWN_EX:
6589 	    return F_GETOWN_EX;
6590 #endif
6591 #ifdef F_SETOWN_EX
6592 	case TARGET_F_SETOWN_EX:
6593 	    return F_SETOWN_EX;
6594 #endif
6595 #ifdef F_SETPIPE_SZ
6596         case TARGET_F_SETPIPE_SZ:
6597             return F_SETPIPE_SZ;
6598         case TARGET_F_GETPIPE_SZ:
6599             return F_GETPIPE_SZ;
6600 #endif
6601 	default:
6602             return -TARGET_EINVAL;
6603     }
6604     return -TARGET_EINVAL;
6605 }
6606 
6607 #define FLOCK_TRANSTBL \
6608     switch (type) { \
6609     TRANSTBL_CONVERT(F_RDLCK); \
6610     TRANSTBL_CONVERT(F_WRLCK); \
6611     TRANSTBL_CONVERT(F_UNLCK); \
6612     TRANSTBL_CONVERT(F_EXLCK); \
6613     TRANSTBL_CONVERT(F_SHLCK); \
6614     }
6615 
6616 static int target_to_host_flock(int type)
6617 {
6618 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6619     FLOCK_TRANSTBL
6620 #undef  TRANSTBL_CONVERT
6621     return -TARGET_EINVAL;
6622 }
6623 
6624 static int host_to_target_flock(int type)
6625 {
6626 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6627     FLOCK_TRANSTBL
6628 #undef  TRANSTBL_CONVERT
6629     /* if we don't know how to convert the value coming
6630      * from the host we copy to the target field as-is
6631      */
6632     return type;
6633 }
6634 
6635 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6636                                             abi_ulong target_flock_addr)
6637 {
6638     struct target_flock *target_fl;
6639     int l_type;
6640 
6641     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6642         return -TARGET_EFAULT;
6643     }
6644 
6645     __get_user(l_type, &target_fl->l_type);
6646     l_type = target_to_host_flock(l_type);
6647     if (l_type < 0) {
6648         return l_type;
6649     }
6650     fl->l_type = l_type;
6651     __get_user(fl->l_whence, &target_fl->l_whence);
6652     __get_user(fl->l_start, &target_fl->l_start);
6653     __get_user(fl->l_len, &target_fl->l_len);
6654     __get_user(fl->l_pid, &target_fl->l_pid);
6655     unlock_user_struct(target_fl, target_flock_addr, 0);
6656     return 0;
6657 }
6658 
6659 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6660                                           const struct flock64 *fl)
6661 {
6662     struct target_flock *target_fl;
6663     short l_type;
6664 
6665     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6666         return -TARGET_EFAULT;
6667     }
6668 
6669     l_type = host_to_target_flock(fl->l_type);
6670     __put_user(l_type, &target_fl->l_type);
6671     __put_user(fl->l_whence, &target_fl->l_whence);
6672     __put_user(fl->l_start, &target_fl->l_start);
6673     __put_user(fl->l_len, &target_fl->l_len);
6674     __put_user(fl->l_pid, &target_fl->l_pid);
6675     unlock_user_struct(target_fl, target_flock_addr, 1);
6676     return 0;
6677 }
6678 
6679 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6680 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6681 
6682 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6683 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6684                                                    abi_ulong target_flock_addr)
6685 {
6686     struct target_oabi_flock64 *target_fl;
6687     int l_type;
6688 
6689     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6690         return -TARGET_EFAULT;
6691     }
6692 
6693     __get_user(l_type, &target_fl->l_type);
6694     l_type = target_to_host_flock(l_type);
6695     if (l_type < 0) {
6696         return l_type;
6697     }
6698     fl->l_type = l_type;
6699     __get_user(fl->l_whence, &target_fl->l_whence);
6700     __get_user(fl->l_start, &target_fl->l_start);
6701     __get_user(fl->l_len, &target_fl->l_len);
6702     __get_user(fl->l_pid, &target_fl->l_pid);
6703     unlock_user_struct(target_fl, target_flock_addr, 0);
6704     return 0;
6705 }
6706 
6707 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6708                                                  const struct flock64 *fl)
6709 {
6710     struct target_oabi_flock64 *target_fl;
6711     short l_type;
6712 
6713     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6714         return -TARGET_EFAULT;
6715     }
6716 
6717     l_type = host_to_target_flock(fl->l_type);
6718     __put_user(l_type, &target_fl->l_type);
6719     __put_user(fl->l_whence, &target_fl->l_whence);
6720     __put_user(fl->l_start, &target_fl->l_start);
6721     __put_user(fl->l_len, &target_fl->l_len);
6722     __put_user(fl->l_pid, &target_fl->l_pid);
6723     unlock_user_struct(target_fl, target_flock_addr, 1);
6724     return 0;
6725 }
6726 #endif
6727 
6728 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6729                                               abi_ulong target_flock_addr)
6730 {
6731     struct target_flock64 *target_fl;
6732     int l_type;
6733 
6734     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6735         return -TARGET_EFAULT;
6736     }
6737 
6738     __get_user(l_type, &target_fl->l_type);
6739     l_type = target_to_host_flock(l_type);
6740     if (l_type < 0) {
6741         return l_type;
6742     }
6743     fl->l_type = l_type;
6744     __get_user(fl->l_whence, &target_fl->l_whence);
6745     __get_user(fl->l_start, &target_fl->l_start);
6746     __get_user(fl->l_len, &target_fl->l_len);
6747     __get_user(fl->l_pid, &target_fl->l_pid);
6748     unlock_user_struct(target_fl, target_flock_addr, 0);
6749     return 0;
6750 }
6751 
6752 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6753                                             const struct flock64 *fl)
6754 {
6755     struct target_flock64 *target_fl;
6756     short l_type;
6757 
6758     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6759         return -TARGET_EFAULT;
6760     }
6761 
6762     l_type = host_to_target_flock(fl->l_type);
6763     __put_user(l_type, &target_fl->l_type);
6764     __put_user(fl->l_whence, &target_fl->l_whence);
6765     __put_user(fl->l_start, &target_fl->l_start);
6766     __put_user(fl->l_len, &target_fl->l_len);
6767     __put_user(fl->l_pid, &target_fl->l_pid);
6768     unlock_user_struct(target_fl, target_flock_addr, 1);
6769     return 0;
6770 }
6771 
6772 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6773 {
6774     struct flock64 fl64;
6775 #ifdef F_GETOWN_EX
6776     struct f_owner_ex fox;
6777     struct target_f_owner_ex *target_fox;
6778 #endif
6779     abi_long ret;
6780     int host_cmd = target_to_host_fcntl_cmd(cmd);
6781 
6782     if (host_cmd == -TARGET_EINVAL)
6783 	    return host_cmd;
6784 
6785     switch(cmd) {
6786     case TARGET_F_GETLK:
6787         ret = copy_from_user_flock(&fl64, arg);
6788         if (ret) {
6789             return ret;
6790         }
6791         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6792         if (ret == 0) {
6793             ret = copy_to_user_flock(arg, &fl64);
6794         }
6795         break;
6796 
6797     case TARGET_F_SETLK:
6798     case TARGET_F_SETLKW:
6799         ret = copy_from_user_flock(&fl64, arg);
6800         if (ret) {
6801             return ret;
6802         }
6803         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6804         break;
6805 
6806     case TARGET_F_GETLK64:
6807         ret = copy_from_user_flock64(&fl64, arg);
6808         if (ret) {
6809             return ret;
6810         }
6811         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6812         if (ret == 0) {
6813             ret = copy_to_user_flock64(arg, &fl64);
6814         }
6815         break;
6816     case TARGET_F_SETLK64:
6817     case TARGET_F_SETLKW64:
6818         ret = copy_from_user_flock64(&fl64, arg);
6819         if (ret) {
6820             return ret;
6821         }
6822         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6823         break;
6824 
6825     case TARGET_F_GETFL:
6826         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6827         if (ret >= 0) {
6828             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6829         }
6830         break;
6831 
6832     case TARGET_F_SETFL:
6833         ret = get_errno(safe_fcntl(fd, host_cmd,
6834                                    target_to_host_bitmask(arg,
6835                                                           fcntl_flags_tbl)));
6836         break;
6837 
6838 #ifdef F_GETOWN_EX
6839     case TARGET_F_GETOWN_EX:
6840         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6841         if (ret >= 0) {
6842             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6843                 return -TARGET_EFAULT;
6844             target_fox->type = tswap32(fox.type);
6845             target_fox->pid = tswap32(fox.pid);
6846             unlock_user_struct(target_fox, arg, 1);
6847         }
6848         break;
6849 #endif
6850 
6851 #ifdef F_SETOWN_EX
6852     case TARGET_F_SETOWN_EX:
6853         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6854             return -TARGET_EFAULT;
6855         fox.type = tswap32(target_fox->type);
6856         fox.pid = tswap32(target_fox->pid);
6857         unlock_user_struct(target_fox, arg, 0);
6858         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6859         break;
6860 #endif
6861 
6862     case TARGET_F_SETOWN:
6863     case TARGET_F_GETOWN:
6864     case TARGET_F_SETSIG:
6865     case TARGET_F_GETSIG:
6866     case TARGET_F_SETLEASE:
6867     case TARGET_F_GETLEASE:
6868     case TARGET_F_SETPIPE_SZ:
6869     case TARGET_F_GETPIPE_SZ:
6870         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6871         break;
6872 
6873     default:
6874         ret = get_errno(safe_fcntl(fd, cmd, arg));
6875         break;
6876     }
6877     return ret;
6878 }
6879 
6880 #ifdef USE_UID16
6881 
6882 static inline int high2lowuid(int uid)
6883 {
6884     if (uid > 65535)
6885         return 65534;
6886     else
6887         return uid;
6888 }
6889 
6890 static inline int high2lowgid(int gid)
6891 {
6892     if (gid > 65535)
6893         return 65534;
6894     else
6895         return gid;
6896 }
6897 
6898 static inline int low2highuid(int uid)
6899 {
6900     if ((int16_t)uid == -1)
6901         return -1;
6902     else
6903         return uid;
6904 }
6905 
6906 static inline int low2highgid(int gid)
6907 {
6908     if ((int16_t)gid == -1)
6909         return -1;
6910     else
6911         return gid;
6912 }
6913 static inline int tswapid(int id)
6914 {
6915     return tswap16(id);
6916 }
6917 
6918 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6919 
6920 #else /* !USE_UID16 */
6921 static inline int high2lowuid(int uid)
6922 {
6923     return uid;
6924 }
6925 static inline int high2lowgid(int gid)
6926 {
6927     return gid;
6928 }
6929 static inline int low2highuid(int uid)
6930 {
6931     return uid;
6932 }
6933 static inline int low2highgid(int gid)
6934 {
6935     return gid;
6936 }
6937 static inline int tswapid(int id)
6938 {
6939     return tswap32(id);
6940 }
6941 
6942 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6943 
6944 #endif /* USE_UID16 */
6945 
6946 /* We must do direct syscalls for setting UID/GID, because we want to
6947  * implement the Linux system call semantics of "change only for this thread",
6948  * not the libc/POSIX semantics of "change for all threads in process".
6949  * (See http://ewontfix.com/17/ for more details.)
6950  * We use the 32-bit version of the syscalls if present; if it is not
6951  * then either the host architecture supports 32-bit UIDs natively with
6952  * the standard syscall, or the 16-bit UID is the best we can do.
6953  */
6954 #ifdef __NR_setuid32
6955 #define __NR_sys_setuid __NR_setuid32
6956 #else
6957 #define __NR_sys_setuid __NR_setuid
6958 #endif
6959 #ifdef __NR_setgid32
6960 #define __NR_sys_setgid __NR_setgid32
6961 #else
6962 #define __NR_sys_setgid __NR_setgid
6963 #endif
6964 #ifdef __NR_setresuid32
6965 #define __NR_sys_setresuid __NR_setresuid32
6966 #else
6967 #define __NR_sys_setresuid __NR_setresuid
6968 #endif
6969 #ifdef __NR_setresgid32
6970 #define __NR_sys_setresgid __NR_setresgid32
6971 #else
6972 #define __NR_sys_setresgid __NR_setresgid
6973 #endif
6974 
6975 _syscall1(int, sys_setuid, uid_t, uid)
6976 _syscall1(int, sys_setgid, gid_t, gid)
6977 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6978 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6979 
6980 void syscall_init(void)
6981 {
6982     IOCTLEntry *ie;
6983     const argtype *arg_type;
6984     int size;
6985     int i;
6986 
6987     thunk_init(STRUCT_MAX);
6988 
6989 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6990 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6991 #include "syscall_types.h"
6992 #undef STRUCT
6993 #undef STRUCT_SPECIAL
6994 
6995     /* Build target_to_host_errno_table[] table from
6996      * host_to_target_errno_table[]. */
6997     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6998         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6999     }
7000 
7001     /* we patch the ioctl size if necessary. We rely on the fact that
7002        no ioctl has all the bits at '1' in the size field */
7003     ie = ioctl_entries;
7004     while (ie->target_cmd != 0) {
7005         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7006             TARGET_IOC_SIZEMASK) {
7007             arg_type = ie->arg_type;
7008             if (arg_type[0] != TYPE_PTR) {
7009                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7010                         ie->target_cmd);
7011                 exit(1);
7012             }
7013             arg_type++;
7014             size = thunk_type_size(arg_type, 0);
7015             ie->target_cmd = (ie->target_cmd &
7016                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7017                 (size << TARGET_IOC_SIZESHIFT);
7018         }
7019 
7020         /* automatic consistency check if same arch */
7021 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7022     (defined(__x86_64__) && defined(TARGET_X86_64))
7023         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7024             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7025                     ie->name, ie->target_cmd, ie->host_cmd);
7026         }
7027 #endif
7028         ie++;
7029     }
7030 }
7031 
7032 #if TARGET_ABI_BITS == 32
7033 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
7034 {
7035 #ifdef TARGET_WORDS_BIGENDIAN
7036     return ((uint64_t)word0 << 32) | word1;
7037 #else
7038     return ((uint64_t)word1 << 32) | word0;
7039 #endif
7040 }
7041 #else /* TARGET_ABI_BITS == 32 */
7042 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
7043 {
7044     return word0;
7045 }
7046 #endif /* TARGET_ABI_BITS != 32 */
7047 
7048 #ifdef TARGET_NR_truncate64
7049 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7050                                          abi_long arg2,
7051                                          abi_long arg3,
7052                                          abi_long arg4)
7053 {
7054     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7055         arg2 = arg3;
7056         arg3 = arg4;
7057     }
7058     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7059 }
7060 #endif
7061 
7062 #ifdef TARGET_NR_ftruncate64
7063 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7064                                           abi_long arg2,
7065                                           abi_long arg3,
7066                                           abi_long arg4)
7067 {
7068     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7069         arg2 = arg3;
7070         arg3 = arg4;
7071     }
7072     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7073 }
7074 #endif
7075 
7076 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
7077                                                abi_ulong target_addr)
7078 {
7079     struct target_timespec *target_ts;
7080 
7081     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
7082         return -TARGET_EFAULT;
7083     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
7084     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7085     unlock_user_struct(target_ts, target_addr, 0);
7086     return 0;
7087 }
7088 
7089 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
7090                                                struct timespec *host_ts)
7091 {
7092     struct target_timespec *target_ts;
7093 
7094     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
7095         return -TARGET_EFAULT;
7096     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
7097     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7098     unlock_user_struct(target_ts, target_addr, 1);
7099     return 0;
7100 }
7101 
7102 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
7103                                                  abi_ulong target_addr)
7104 {
7105     struct target_itimerspec *target_itspec;
7106 
7107     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
7108         return -TARGET_EFAULT;
7109     }
7110 
7111     host_itspec->it_interval.tv_sec =
7112                             tswapal(target_itspec->it_interval.tv_sec);
7113     host_itspec->it_interval.tv_nsec =
7114                             tswapal(target_itspec->it_interval.tv_nsec);
7115     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
7116     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
7117 
7118     unlock_user_struct(target_itspec, target_addr, 1);
7119     return 0;
7120 }
7121 
7122 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7123                                                struct itimerspec *host_its)
7124 {
7125     struct target_itimerspec *target_itspec;
7126 
7127     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
7128         return -TARGET_EFAULT;
7129     }
7130 
7131     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
7132     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
7133 
7134     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
7135     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7136 
7137     unlock_user_struct(target_itspec, target_addr, 0);
7138     return 0;
7139 }
7140 
7141 static inline abi_long target_to_host_timex(struct timex *host_tx,
7142                                             abi_long target_addr)
7143 {
7144     struct target_timex *target_tx;
7145 
7146     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7147         return -TARGET_EFAULT;
7148     }
7149 
7150     __get_user(host_tx->modes, &target_tx->modes);
7151     __get_user(host_tx->offset, &target_tx->offset);
7152     __get_user(host_tx->freq, &target_tx->freq);
7153     __get_user(host_tx->maxerror, &target_tx->maxerror);
7154     __get_user(host_tx->esterror, &target_tx->esterror);
7155     __get_user(host_tx->status, &target_tx->status);
7156     __get_user(host_tx->constant, &target_tx->constant);
7157     __get_user(host_tx->precision, &target_tx->precision);
7158     __get_user(host_tx->tolerance, &target_tx->tolerance);
7159     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7160     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7161     __get_user(host_tx->tick, &target_tx->tick);
7162     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7163     __get_user(host_tx->jitter, &target_tx->jitter);
7164     __get_user(host_tx->shift, &target_tx->shift);
7165     __get_user(host_tx->stabil, &target_tx->stabil);
7166     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7167     __get_user(host_tx->calcnt, &target_tx->calcnt);
7168     __get_user(host_tx->errcnt, &target_tx->errcnt);
7169     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7170     __get_user(host_tx->tai, &target_tx->tai);
7171 
7172     unlock_user_struct(target_tx, target_addr, 0);
7173     return 0;
7174 }
7175 
7176 static inline abi_long host_to_target_timex(abi_long target_addr,
7177                                             struct timex *host_tx)
7178 {
7179     struct target_timex *target_tx;
7180 
7181     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7182         return -TARGET_EFAULT;
7183     }
7184 
7185     __put_user(host_tx->modes, &target_tx->modes);
7186     __put_user(host_tx->offset, &target_tx->offset);
7187     __put_user(host_tx->freq, &target_tx->freq);
7188     __put_user(host_tx->maxerror, &target_tx->maxerror);
7189     __put_user(host_tx->esterror, &target_tx->esterror);
7190     __put_user(host_tx->status, &target_tx->status);
7191     __put_user(host_tx->constant, &target_tx->constant);
7192     __put_user(host_tx->precision, &target_tx->precision);
7193     __put_user(host_tx->tolerance, &target_tx->tolerance);
7194     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7195     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7196     __put_user(host_tx->tick, &target_tx->tick);
7197     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7198     __put_user(host_tx->jitter, &target_tx->jitter);
7199     __put_user(host_tx->shift, &target_tx->shift);
7200     __put_user(host_tx->stabil, &target_tx->stabil);
7201     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7202     __put_user(host_tx->calcnt, &target_tx->calcnt);
7203     __put_user(host_tx->errcnt, &target_tx->errcnt);
7204     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7205     __put_user(host_tx->tai, &target_tx->tai);
7206 
7207     unlock_user_struct(target_tx, target_addr, 1);
7208     return 0;
7209 }
7210 
7211 
7212 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7213                                                abi_ulong target_addr)
7214 {
7215     struct target_sigevent *target_sevp;
7216 
7217     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7218         return -TARGET_EFAULT;
7219     }
7220 
7221     /* This union is awkward on 64 bit systems because it has a 32 bit
7222      * integer and a pointer in it; we follow the conversion approach
7223      * used for handling sigval types in signal.c so the guest should get
7224      * the correct value back even if we did a 64 bit byteswap and it's
7225      * using the 32 bit integer.
7226      */
7227     host_sevp->sigev_value.sival_ptr =
7228         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7229     host_sevp->sigev_signo =
7230         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7231     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7232     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7233 
7234     unlock_user_struct(target_sevp, target_addr, 1);
7235     return 0;
7236 }
7237 
7238 #if defined(TARGET_NR_mlockall)
7239 static inline int target_to_host_mlockall_arg(int arg)
7240 {
7241     int result = 0;
7242 
7243     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7244         result |= MCL_CURRENT;
7245     }
7246     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7247         result |= MCL_FUTURE;
7248     }
7249     return result;
7250 }
7251 #endif
7252 
7253 static inline abi_long host_to_target_stat64(void *cpu_env,
7254                                              abi_ulong target_addr,
7255                                              struct stat *host_st)
7256 {
7257 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7258     if (((CPUARMState *)cpu_env)->eabi) {
7259         struct target_eabi_stat64 *target_st;
7260 
7261         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7262             return -TARGET_EFAULT;
7263         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7264         __put_user(host_st->st_dev, &target_st->st_dev);
7265         __put_user(host_st->st_ino, &target_st->st_ino);
7266 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7267         __put_user(host_st->st_ino, &target_st->__st_ino);
7268 #endif
7269         __put_user(host_st->st_mode, &target_st->st_mode);
7270         __put_user(host_st->st_nlink, &target_st->st_nlink);
7271         __put_user(host_st->st_uid, &target_st->st_uid);
7272         __put_user(host_st->st_gid, &target_st->st_gid);
7273         __put_user(host_st->st_rdev, &target_st->st_rdev);
7274         __put_user(host_st->st_size, &target_st->st_size);
7275         __put_user(host_st->st_blksize, &target_st->st_blksize);
7276         __put_user(host_st->st_blocks, &target_st->st_blocks);
7277         __put_user(host_st->st_atime, &target_st->target_st_atime);
7278         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7279         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7280         unlock_user_struct(target_st, target_addr, 1);
7281     } else
7282 #endif
7283     {
7284 #if defined(TARGET_HAS_STRUCT_STAT64)
7285         struct target_stat64 *target_st;
7286 #else
7287         struct target_stat *target_st;
7288 #endif
7289 
7290         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7291             return -TARGET_EFAULT;
7292         memset(target_st, 0, sizeof(*target_st));
7293         __put_user(host_st->st_dev, &target_st->st_dev);
7294         __put_user(host_st->st_ino, &target_st->st_ino);
7295 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7296         __put_user(host_st->st_ino, &target_st->__st_ino);
7297 #endif
7298         __put_user(host_st->st_mode, &target_st->st_mode);
7299         __put_user(host_st->st_nlink, &target_st->st_nlink);
7300         __put_user(host_st->st_uid, &target_st->st_uid);
7301         __put_user(host_st->st_gid, &target_st->st_gid);
7302         __put_user(host_st->st_rdev, &target_st->st_rdev);
7303         /* XXX: better use of kernel struct */
7304         __put_user(host_st->st_size, &target_st->st_size);
7305         __put_user(host_st->st_blksize, &target_st->st_blksize);
7306         __put_user(host_st->st_blocks, &target_st->st_blocks);
7307         __put_user(host_st->st_atime, &target_st->target_st_atime);
7308         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7309         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7310         unlock_user_struct(target_st, target_addr, 1);
7311     }
7312 
7313     return 0;
7314 }
7315 
7316 /* ??? Using host futex calls even when target atomic operations
7317    are not really atomic probably breaks things.  However implementing
7318    futexes locally would make futexes shared between multiple processes
7319    tricky.  However they're probably useless because guest atomic
7320    operations won't work either.  */
7321 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7322                     target_ulong uaddr2, int val3)
7323 {
7324     struct timespec ts, *pts;
7325     int base_op;
7326 
7327     /* ??? We assume FUTEX_* constants are the same on both host
7328        and target.  */
7329 #ifdef FUTEX_CMD_MASK
7330     base_op = op & FUTEX_CMD_MASK;
7331 #else
7332     base_op = op;
7333 #endif
7334     switch (base_op) {
7335     case FUTEX_WAIT:
7336     case FUTEX_WAIT_BITSET:
7337         if (timeout) {
7338             pts = &ts;
7339             target_to_host_timespec(pts, timeout);
7340         } else {
7341             pts = NULL;
7342         }
7343         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7344                          pts, NULL, val3));
7345     case FUTEX_WAKE:
7346         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7347     case FUTEX_FD:
7348         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7349     case FUTEX_REQUEUE:
7350     case FUTEX_CMP_REQUEUE:
7351     case FUTEX_WAKE_OP:
7352         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7353            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7354            But the prototype takes a `struct timespec *'; insert casts
7355            to satisfy the compiler.  We do not need to tswap TIMEOUT
7356            since it's not compared to guest memory.  */
7357         pts = (struct timespec *)(uintptr_t) timeout;
7358         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7359                                     g2h(uaddr2),
7360                                     (base_op == FUTEX_CMP_REQUEUE
7361                                      ? tswap32(val3)
7362                                      : val3)));
7363     default:
7364         return -TARGET_ENOSYS;
7365     }
7366 }
7367 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7368 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7369                                      abi_long handle, abi_long mount_id,
7370                                      abi_long flags)
7371 {
7372     struct file_handle *target_fh;
7373     struct file_handle *fh;
7374     int mid = 0;
7375     abi_long ret;
7376     char *name;
7377     unsigned int size, total_size;
7378 
7379     if (get_user_s32(size, handle)) {
7380         return -TARGET_EFAULT;
7381     }
7382 
7383     name = lock_user_string(pathname);
7384     if (!name) {
7385         return -TARGET_EFAULT;
7386     }
7387 
7388     total_size = sizeof(struct file_handle) + size;
7389     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7390     if (!target_fh) {
7391         unlock_user(name, pathname, 0);
7392         return -TARGET_EFAULT;
7393     }
7394 
7395     fh = g_malloc0(total_size);
7396     fh->handle_bytes = size;
7397 
7398     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7399     unlock_user(name, pathname, 0);
7400 
7401     /* man name_to_handle_at(2):
7402      * Other than the use of the handle_bytes field, the caller should treat
7403      * the file_handle structure as an opaque data type
7404      */
7405 
7406     memcpy(target_fh, fh, total_size);
7407     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7408     target_fh->handle_type = tswap32(fh->handle_type);
7409     g_free(fh);
7410     unlock_user(target_fh, handle, total_size);
7411 
7412     if (put_user_s32(mid, mount_id)) {
7413         return -TARGET_EFAULT;
7414     }
7415 
7416     return ret;
7417 
7418 }
7419 #endif
7420 
7421 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7422 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7423                                      abi_long flags)
7424 {
7425     struct file_handle *target_fh;
7426     struct file_handle *fh;
7427     unsigned int size, total_size;
7428     abi_long ret;
7429 
7430     if (get_user_s32(size, handle)) {
7431         return -TARGET_EFAULT;
7432     }
7433 
7434     total_size = sizeof(struct file_handle) + size;
7435     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7436     if (!target_fh) {
7437         return -TARGET_EFAULT;
7438     }
7439 
7440     fh = g_memdup(target_fh, total_size);
7441     fh->handle_bytes = size;
7442     fh->handle_type = tswap32(target_fh->handle_type);
7443 
7444     ret = get_errno(open_by_handle_at(mount_fd, fh,
7445                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7446 
7447     g_free(fh);
7448 
7449     unlock_user(target_fh, handle, total_size);
7450 
7451     return ret;
7452 }
7453 #endif
7454 
7455 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7456 
7457 /* signalfd siginfo conversion */
7458 
7459 static void
7460 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7461                                 const struct signalfd_siginfo *info)
7462 {
7463     int sig = host_to_target_signal(info->ssi_signo);
7464 
7465     /* linux/signalfd.h defines a ssi_addr_lsb
7466      * not defined in sys/signalfd.h but used by some kernels
7467      */
7468 
7469 #ifdef BUS_MCEERR_AO
7470     if (tinfo->ssi_signo == SIGBUS &&
7471         (tinfo->ssi_code == BUS_MCEERR_AR ||
7472          tinfo->ssi_code == BUS_MCEERR_AO)) {
7473         uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7474         uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7475         *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7476     }
7477 #endif
7478 
7479     tinfo->ssi_signo = tswap32(sig);
7480     tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7481     tinfo->ssi_code = tswap32(info->ssi_code);
7482     tinfo->ssi_pid = tswap32(info->ssi_pid);
7483     tinfo->ssi_uid = tswap32(info->ssi_uid);
7484     tinfo->ssi_fd = tswap32(info->ssi_fd);
7485     tinfo->ssi_tid = tswap32(info->ssi_tid);
7486     tinfo->ssi_band = tswap32(info->ssi_band);
7487     tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7488     tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7489     tinfo->ssi_status = tswap32(info->ssi_status);
7490     tinfo->ssi_int = tswap32(info->ssi_int);
7491     tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7492     tinfo->ssi_utime = tswap64(info->ssi_utime);
7493     tinfo->ssi_stime = tswap64(info->ssi_stime);
7494     tinfo->ssi_addr = tswap64(info->ssi_addr);
7495 }
7496 
7497 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7498 {
7499     int i;
7500 
7501     for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7502         host_to_target_signalfd_siginfo(buf + i, buf + i);
7503     }
7504 
7505     return len;
7506 }
7507 
7508 static TargetFdTrans target_signalfd_trans = {
7509     .host_to_target_data = host_to_target_data_signalfd,
7510 };
7511 
7512 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7513 {
7514     int host_flags;
7515     target_sigset_t *target_mask;
7516     sigset_t host_mask;
7517     abi_long ret;
7518 
7519     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7520         return -TARGET_EINVAL;
7521     }
7522     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7523         return -TARGET_EFAULT;
7524     }
7525 
7526     target_to_host_sigset(&host_mask, target_mask);
7527 
7528     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7529 
7530     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7531     if (ret >= 0) {
7532         fd_trans_register(ret, &target_signalfd_trans);
7533     }
7534 
7535     unlock_user_struct(target_mask, mask, 0);
7536 
7537     return ret;
7538 }
7539 #endif
7540 
7541 /* Map host to target signal numbers for the wait family of syscalls.
7542    Assume all other status bits are the same.  */
7543 int host_to_target_waitstatus(int status)
7544 {
7545     if (WIFSIGNALED(status)) {
7546         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7547     }
7548     if (WIFSTOPPED(status)) {
7549         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7550                | (status & 0xff);
7551     }
7552     return status;
7553 }
7554 
7555 static int open_self_cmdline(void *cpu_env, int fd)
7556 {
7557     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7558     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7559     int i;
7560 
7561     for (i = 0; i < bprm->argc; i++) {
7562         size_t len = strlen(bprm->argv[i]) + 1;
7563 
7564         if (write(fd, bprm->argv[i], len) != len) {
7565             return -1;
7566         }
7567     }
7568 
7569     return 0;
7570 }
7571 
7572 static int open_self_maps(void *cpu_env, int fd)
7573 {
7574     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7575     TaskState *ts = cpu->opaque;
7576     FILE *fp;
7577     char *line = NULL;
7578     size_t len = 0;
7579     ssize_t read;
7580 
7581     fp = fopen("/proc/self/maps", "r");
7582     if (fp == NULL) {
7583         return -1;
7584     }
7585 
7586     while ((read = getline(&line, &len, fp)) != -1) {
7587         int fields, dev_maj, dev_min, inode;
7588         uint64_t min, max, offset;
7589         char flag_r, flag_w, flag_x, flag_p;
7590         char path[512] = "";
7591         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7592                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7593                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7594 
7595         if ((fields < 10) || (fields > 11)) {
7596             continue;
7597         }
7598         if (h2g_valid(min)) {
7599             int flags = page_get_flags(h2g(min));
7600             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7601             if (page_check_range(h2g(min), max - min, flags) == -1) {
7602                 continue;
7603             }
7604             if (h2g(min) == ts->info->stack_limit) {
7605                 pstrcpy(path, sizeof(path), "      [stack]");
7606             }
7607             dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7608                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7609                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7610                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
7611                     path[0] ? "         " : "", path);
7612         }
7613     }
7614 
7615     free(line);
7616     fclose(fp);
7617 
7618     return 0;
7619 }
7620 
7621 static int open_self_stat(void *cpu_env, int fd)
7622 {
7623     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7624     TaskState *ts = cpu->opaque;
7625     abi_ulong start_stack = ts->info->start_stack;
7626     int i;
7627 
7628     for (i = 0; i < 44; i++) {
7629       char buf[128];
7630       int len;
7631       uint64_t val = 0;
7632 
7633       if (i == 0) {
7634         /* pid */
7635         val = getpid();
7636         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7637       } else if (i == 1) {
7638         /* app name */
7639         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7640       } else if (i == 27) {
7641         /* stack bottom */
7642         val = start_stack;
7643         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7644       } else {
7645         /* for the rest, there is MasterCard */
7646         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7647       }
7648 
7649       len = strlen(buf);
7650       if (write(fd, buf, len) != len) {
7651           return -1;
7652       }
7653     }
7654 
7655     return 0;
7656 }
7657 
7658 static int open_self_auxv(void *cpu_env, int fd)
7659 {
7660     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7661     TaskState *ts = cpu->opaque;
7662     abi_ulong auxv = ts->info->saved_auxv;
7663     abi_ulong len = ts->info->auxv_len;
7664     char *ptr;
7665 
7666     /*
7667      * Auxiliary vector is stored in target process stack.
7668      * read in whole auxv vector and copy it to file
7669      */
7670     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7671     if (ptr != NULL) {
7672         while (len > 0) {
7673             ssize_t r;
7674             r = write(fd, ptr, len);
7675             if (r <= 0) {
7676                 break;
7677             }
7678             len -= r;
7679             ptr += r;
7680         }
7681         lseek(fd, 0, SEEK_SET);
7682         unlock_user(ptr, auxv, len);
7683     }
7684 
7685     return 0;
7686 }
7687 
7688 static int is_proc_myself(const char *filename, const char *entry)
7689 {
7690     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7691         filename += strlen("/proc/");
7692         if (!strncmp(filename, "self/", strlen("self/"))) {
7693             filename += strlen("self/");
7694         } else if (*filename >= '1' && *filename <= '9') {
7695             char myself[80];
7696             snprintf(myself, sizeof(myself), "%d/", getpid());
7697             if (!strncmp(filename, myself, strlen(myself))) {
7698                 filename += strlen(myself);
7699             } else {
7700                 return 0;
7701             }
7702         } else {
7703             return 0;
7704         }
7705         if (!strcmp(filename, entry)) {
7706             return 1;
7707         }
7708     }
7709     return 0;
7710 }
7711 
7712 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7713 static int is_proc(const char *filename, const char *entry)
7714 {
7715     return strcmp(filename, entry) == 0;
7716 }
7717 
7718 static int open_net_route(void *cpu_env, int fd)
7719 {
7720     FILE *fp;
7721     char *line = NULL;
7722     size_t len = 0;
7723     ssize_t read;
7724 
7725     fp = fopen("/proc/net/route", "r");
7726     if (fp == NULL) {
7727         return -1;
7728     }
7729 
7730     /* read header */
7731 
7732     read = getline(&line, &len, fp);
7733     dprintf(fd, "%s", line);
7734 
7735     /* read routes */
7736 
7737     while ((read = getline(&line, &len, fp)) != -1) {
7738         char iface[16];
7739         uint32_t dest, gw, mask;
7740         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7741         sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7742                      iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7743                      &mask, &mtu, &window, &irtt);
7744         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7745                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7746                 metric, tswap32(mask), mtu, window, irtt);
7747     }
7748 
7749     free(line);
7750     fclose(fp);
7751 
7752     return 0;
7753 }
7754 #endif
7755 
7756 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7757 {
7758     struct fake_open {
7759         const char *filename;
7760         int (*fill)(void *cpu_env, int fd);
7761         int (*cmp)(const char *s1, const char *s2);
7762     };
7763     const struct fake_open *fake_open;
7764     static const struct fake_open fakes[] = {
7765         { "maps", open_self_maps, is_proc_myself },
7766         { "stat", open_self_stat, is_proc_myself },
7767         { "auxv", open_self_auxv, is_proc_myself },
7768         { "cmdline", open_self_cmdline, is_proc_myself },
7769 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7770         { "/proc/net/route", open_net_route, is_proc },
7771 #endif
7772         { NULL, NULL, NULL }
7773     };
7774 
7775     if (is_proc_myself(pathname, "exe")) {
7776         int execfd = qemu_getauxval(AT_EXECFD);
7777         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7778     }
7779 
7780     for (fake_open = fakes; fake_open->filename; fake_open++) {
7781         if (fake_open->cmp(pathname, fake_open->filename)) {
7782             break;
7783         }
7784     }
7785 
7786     if (fake_open->filename) {
7787         const char *tmpdir;
7788         char filename[PATH_MAX];
7789         int fd, r;
7790 
7791         /* create temporary file to map stat to */
7792         tmpdir = getenv("TMPDIR");
7793         if (!tmpdir)
7794             tmpdir = "/tmp";
7795         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7796         fd = mkstemp(filename);
7797         if (fd < 0) {
7798             return fd;
7799         }
7800         unlink(filename);
7801 
7802         if ((r = fake_open->fill(cpu_env, fd))) {
7803             int e = errno;
7804             close(fd);
7805             errno = e;
7806             return r;
7807         }
7808         lseek(fd, 0, SEEK_SET);
7809 
7810         return fd;
7811     }
7812 
7813     return safe_openat(dirfd, path(pathname), flags, mode);
7814 }
7815 
7816 #define TIMER_MAGIC 0x0caf0000
7817 #define TIMER_MAGIC_MASK 0xffff0000
7818 
7819 /* Convert QEMU provided timer ID back to internal 16bit index format */
7820 static target_timer_t get_timer_id(abi_long arg)
7821 {
7822     target_timer_t timerid = arg;
7823 
7824     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7825         return -TARGET_EINVAL;
7826     }
7827 
7828     timerid &= 0xffff;
7829 
7830     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7831         return -TARGET_EINVAL;
7832     }
7833 
7834     return timerid;
7835 }
7836 
7837 static abi_long swap_data_eventfd(void *buf, size_t len)
7838 {
7839     uint64_t *counter = buf;
7840     int i;
7841 
7842     if (len < sizeof(uint64_t)) {
7843         return -EINVAL;
7844     }
7845 
7846     for (i = 0; i < len; i += sizeof(uint64_t)) {
7847         *counter = tswap64(*counter);
7848         counter++;
7849     }
7850 
7851     return len;
7852 }
7853 
7854 static TargetFdTrans target_eventfd_trans = {
7855     .host_to_target_data = swap_data_eventfd,
7856     .target_to_host_data = swap_data_eventfd,
7857 };
7858 
7859 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7860     (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7861      defined(__NR_inotify_init1))
7862 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7863 {
7864     struct inotify_event *ev;
7865     int i;
7866     uint32_t name_len;
7867 
7868     for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7869         ev = (struct inotify_event *)((char *)buf + i);
7870         name_len = ev->len;
7871 
7872         ev->wd = tswap32(ev->wd);
7873         ev->mask = tswap32(ev->mask);
7874         ev->cookie = tswap32(ev->cookie);
7875         ev->len = tswap32(name_len);
7876     }
7877 
7878     return len;
7879 }
7880 
7881 static TargetFdTrans target_inotify_trans = {
7882     .host_to_target_data = host_to_target_data_inotify,
7883 };
7884 #endif
7885 
7886 static int target_to_host_cpu_mask(unsigned long *host_mask,
7887                                    size_t host_size,
7888                                    abi_ulong target_addr,
7889                                    size_t target_size)
7890 {
7891     unsigned target_bits = sizeof(abi_ulong) * 8;
7892     unsigned host_bits = sizeof(*host_mask) * 8;
7893     abi_ulong *target_mask;
7894     unsigned i, j;
7895 
7896     assert(host_size >= target_size);
7897 
7898     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7899     if (!target_mask) {
7900         return -TARGET_EFAULT;
7901     }
7902     memset(host_mask, 0, host_size);
7903 
7904     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7905         unsigned bit = i * target_bits;
7906         abi_ulong val;
7907 
7908         __get_user(val, &target_mask[i]);
7909         for (j = 0; j < target_bits; j++, bit++) {
7910             if (val & (1UL << j)) {
7911                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7912             }
7913         }
7914     }
7915 
7916     unlock_user(target_mask, target_addr, 0);
7917     return 0;
7918 }
7919 
7920 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7921                                    size_t host_size,
7922                                    abi_ulong target_addr,
7923                                    size_t target_size)
7924 {
7925     unsigned target_bits = sizeof(abi_ulong) * 8;
7926     unsigned host_bits = sizeof(*host_mask) * 8;
7927     abi_ulong *target_mask;
7928     unsigned i, j;
7929 
7930     assert(host_size >= target_size);
7931 
7932     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7933     if (!target_mask) {
7934         return -TARGET_EFAULT;
7935     }
7936 
7937     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7938         unsigned bit = i * target_bits;
7939         abi_ulong val = 0;
7940 
7941         for (j = 0; j < target_bits; j++, bit++) {
7942             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7943                 val |= 1UL << j;
7944             }
7945         }
7946         __put_user(val, &target_mask[i]);
7947     }
7948 
7949     unlock_user(target_mask, target_addr, target_size);
7950     return 0;
7951 }
7952 
7953 /* do_syscall() should always have a single exit point at the end so
7954    that actions, such as logging of syscall results, can be performed.
7955    All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7956 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7957                     abi_long arg2, abi_long arg3, abi_long arg4,
7958                     abi_long arg5, abi_long arg6, abi_long arg7,
7959                     abi_long arg8)
7960 {
7961     CPUState *cpu = ENV_GET_CPU(cpu_env);
7962     abi_long ret;
7963     struct stat st;
7964     struct statfs stfs;
7965     void *p;
7966 
7967 #if defined(DEBUG_ERESTARTSYS)
7968     /* Debug-only code for exercising the syscall-restart code paths
7969      * in the per-architecture cpu main loops: restart every syscall
7970      * the guest makes once before letting it through.
7971      */
7972     {
7973         static int flag;
7974 
7975         flag = !flag;
7976         if (flag) {
7977             return -TARGET_ERESTARTSYS;
7978         }
7979     }
7980 #endif
7981 
7982 #ifdef DEBUG
7983     gemu_log("syscall %d", num);
7984 #endif
7985     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7986     if(do_strace)
7987         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7988 
7989     switch(num) {
7990     case TARGET_NR_exit:
7991         /* In old applications this may be used to implement _exit(2).
7992            However in threaded applictions it is used for thread termination,
7993            and _exit_group is used for application termination.
7994            Do thread termination if we have more then one thread.  */
7995 
7996         if (block_signals()) {
7997             ret = -TARGET_ERESTARTSYS;
7998             break;
7999         }
8000 
8001         cpu_list_lock();
8002 
8003         if (CPU_NEXT(first_cpu)) {
8004             TaskState *ts;
8005 
8006             /* Remove the CPU from the list.  */
8007             QTAILQ_REMOVE(&cpus, cpu, node);
8008 
8009             cpu_list_unlock();
8010 
8011             ts = cpu->opaque;
8012             if (ts->child_tidptr) {
8013                 put_user_u32(0, ts->child_tidptr);
8014                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8015                           NULL, NULL, 0);
8016             }
8017             thread_cpu = NULL;
8018             object_unref(OBJECT(cpu));
8019             g_free(ts);
8020             rcu_unregister_thread();
8021             pthread_exit(NULL);
8022         }
8023 
8024         cpu_list_unlock();
8025 #ifdef TARGET_GPROF
8026         _mcleanup();
8027 #endif
8028         gdb_exit(cpu_env, arg1);
8029         _exit(arg1);
8030         ret = 0; /* avoid warning */
8031         break;
8032     case TARGET_NR_read:
8033         if (arg3 == 0)
8034             ret = 0;
8035         else {
8036             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8037                 goto efault;
8038             ret = get_errno(safe_read(arg1, p, arg3));
8039             if (ret >= 0 &&
8040                 fd_trans_host_to_target_data(arg1)) {
8041                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8042             }
8043             unlock_user(p, arg2, ret);
8044         }
8045         break;
8046     case TARGET_NR_write:
8047         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8048             goto efault;
8049         if (fd_trans_target_to_host_data(arg1)) {
8050             void *copy = g_malloc(arg3);
8051             memcpy(copy, p, arg3);
8052             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8053             if (ret >= 0) {
8054                 ret = get_errno(safe_write(arg1, copy, ret));
8055             }
8056             g_free(copy);
8057         } else {
8058             ret = get_errno(safe_write(arg1, p, arg3));
8059         }
8060         unlock_user(p, arg2, 0);
8061         break;
8062 #ifdef TARGET_NR_open
8063     case TARGET_NR_open:
8064         if (!(p = lock_user_string(arg1)))
8065             goto efault;
8066         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8067                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8068                                   arg3));
8069         fd_trans_unregister(ret);
8070         unlock_user(p, arg1, 0);
8071         break;
8072 #endif
8073     case TARGET_NR_openat:
8074         if (!(p = lock_user_string(arg2)))
8075             goto efault;
8076         ret = get_errno(do_openat(cpu_env, arg1, p,
8077                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8078                                   arg4));
8079         fd_trans_unregister(ret);
8080         unlock_user(p, arg2, 0);
8081         break;
8082 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8083     case TARGET_NR_name_to_handle_at:
8084         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8085         break;
8086 #endif
8087 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8088     case TARGET_NR_open_by_handle_at:
8089         ret = do_open_by_handle_at(arg1, arg2, arg3);
8090         fd_trans_unregister(ret);
8091         break;
8092 #endif
8093     case TARGET_NR_close:
8094         fd_trans_unregister(arg1);
8095         ret = get_errno(close(arg1));
8096         break;
8097     case TARGET_NR_brk:
8098         ret = do_brk(arg1);
8099         break;
8100 #ifdef TARGET_NR_fork
8101     case TARGET_NR_fork:
8102         ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8103         break;
8104 #endif
8105 #ifdef TARGET_NR_waitpid
8106     case TARGET_NR_waitpid:
8107         {
8108             int status;
8109             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8110             if (!is_error(ret) && arg2 && ret
8111                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8112                 goto efault;
8113         }
8114         break;
8115 #endif
8116 #ifdef TARGET_NR_waitid
8117     case TARGET_NR_waitid:
8118         {
8119             siginfo_t info;
8120             info.si_pid = 0;
8121             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8122             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8123                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8124                     goto efault;
8125                 host_to_target_siginfo(p, &info);
8126                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8127             }
8128         }
8129         break;
8130 #endif
8131 #ifdef TARGET_NR_creat /* not on alpha */
8132     case TARGET_NR_creat:
8133         if (!(p = lock_user_string(arg1)))
8134             goto efault;
8135         ret = get_errno(creat(p, arg2));
8136         fd_trans_unregister(ret);
8137         unlock_user(p, arg1, 0);
8138         break;
8139 #endif
8140 #ifdef TARGET_NR_link
8141     case TARGET_NR_link:
8142         {
8143             void * p2;
8144             p = lock_user_string(arg1);
8145             p2 = lock_user_string(arg2);
8146             if (!p || !p2)
8147                 ret = -TARGET_EFAULT;
8148             else
8149                 ret = get_errno(link(p, p2));
8150             unlock_user(p2, arg2, 0);
8151             unlock_user(p, arg1, 0);
8152         }
8153         break;
8154 #endif
8155 #if defined(TARGET_NR_linkat)
8156     case TARGET_NR_linkat:
8157         {
8158             void * p2 = NULL;
8159             if (!arg2 || !arg4)
8160                 goto efault;
8161             p  = lock_user_string(arg2);
8162             p2 = lock_user_string(arg4);
8163             if (!p || !p2)
8164                 ret = -TARGET_EFAULT;
8165             else
8166                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8167             unlock_user(p, arg2, 0);
8168             unlock_user(p2, arg4, 0);
8169         }
8170         break;
8171 #endif
8172 #ifdef TARGET_NR_unlink
8173     case TARGET_NR_unlink:
8174         if (!(p = lock_user_string(arg1)))
8175             goto efault;
8176         ret = get_errno(unlink(p));
8177         unlock_user(p, arg1, 0);
8178         break;
8179 #endif
8180 #if defined(TARGET_NR_unlinkat)
8181     case TARGET_NR_unlinkat:
8182         if (!(p = lock_user_string(arg2)))
8183             goto efault;
8184         ret = get_errno(unlinkat(arg1, p, arg3));
8185         unlock_user(p, arg2, 0);
8186         break;
8187 #endif
8188     case TARGET_NR_execve:
8189         {
8190             char **argp, **envp;
8191             int argc, envc;
8192             abi_ulong gp;
8193             abi_ulong guest_argp;
8194             abi_ulong guest_envp;
8195             abi_ulong addr;
8196             char **q;
8197             int total_size = 0;
8198 
8199             argc = 0;
8200             guest_argp = arg2;
8201             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8202                 if (get_user_ual(addr, gp))
8203                     goto efault;
8204                 if (!addr)
8205                     break;
8206                 argc++;
8207             }
8208             envc = 0;
8209             guest_envp = arg3;
8210             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8211                 if (get_user_ual(addr, gp))
8212                     goto efault;
8213                 if (!addr)
8214                     break;
8215                 envc++;
8216             }
8217 
8218             argp = g_new0(char *, argc + 1);
8219             envp = g_new0(char *, envc + 1);
8220 
8221             for (gp = guest_argp, q = argp; gp;
8222                   gp += sizeof(abi_ulong), q++) {
8223                 if (get_user_ual(addr, gp))
8224                     goto execve_efault;
8225                 if (!addr)
8226                     break;
8227                 if (!(*q = lock_user_string(addr)))
8228                     goto execve_efault;
8229                 total_size += strlen(*q) + 1;
8230             }
8231             *q = NULL;
8232 
8233             for (gp = guest_envp, q = envp; gp;
8234                   gp += sizeof(abi_ulong), q++) {
8235                 if (get_user_ual(addr, gp))
8236                     goto execve_efault;
8237                 if (!addr)
8238                     break;
8239                 if (!(*q = lock_user_string(addr)))
8240                     goto execve_efault;
8241                 total_size += strlen(*q) + 1;
8242             }
8243             *q = NULL;
8244 
8245             if (!(p = lock_user_string(arg1)))
8246                 goto execve_efault;
8247             /* Although execve() is not an interruptible syscall it is
8248              * a special case where we must use the safe_syscall wrapper:
8249              * if we allow a signal to happen before we make the host
8250              * syscall then we will 'lose' it, because at the point of
8251              * execve the process leaves QEMU's control. So we use the
8252              * safe syscall wrapper to ensure that we either take the
8253              * signal as a guest signal, or else it does not happen
8254              * before the execve completes and makes it the other
8255              * program's problem.
8256              */
8257             ret = get_errno(safe_execve(p, argp, envp));
8258             unlock_user(p, arg1, 0);
8259 
8260             goto execve_end;
8261 
8262         execve_efault:
8263             ret = -TARGET_EFAULT;
8264 
8265         execve_end:
8266             for (gp = guest_argp, q = argp; *q;
8267                   gp += sizeof(abi_ulong), q++) {
8268                 if (get_user_ual(addr, gp)
8269                     || !addr)
8270                     break;
8271                 unlock_user(*q, addr, 0);
8272             }
8273             for (gp = guest_envp, q = envp; *q;
8274                   gp += sizeof(abi_ulong), q++) {
8275                 if (get_user_ual(addr, gp)
8276                     || !addr)
8277                     break;
8278                 unlock_user(*q, addr, 0);
8279             }
8280 
8281             g_free(argp);
8282             g_free(envp);
8283         }
8284         break;
8285     case TARGET_NR_chdir:
8286         if (!(p = lock_user_string(arg1)))
8287             goto efault;
8288         ret = get_errno(chdir(p));
8289         unlock_user(p, arg1, 0);
8290         break;
8291 #ifdef TARGET_NR_time
8292     case TARGET_NR_time:
8293         {
8294             time_t host_time;
8295             ret = get_errno(time(&host_time));
8296             if (!is_error(ret)
8297                 && arg1
8298                 && put_user_sal(host_time, arg1))
8299                 goto efault;
8300         }
8301         break;
8302 #endif
8303 #ifdef TARGET_NR_mknod
8304     case TARGET_NR_mknod:
8305         if (!(p = lock_user_string(arg1)))
8306             goto efault;
8307         ret = get_errno(mknod(p, arg2, arg3));
8308         unlock_user(p, arg1, 0);
8309         break;
8310 #endif
8311 #if defined(TARGET_NR_mknodat)
8312     case TARGET_NR_mknodat:
8313         if (!(p = lock_user_string(arg2)))
8314             goto efault;
8315         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8316         unlock_user(p, arg2, 0);
8317         break;
8318 #endif
8319 #ifdef TARGET_NR_chmod
8320     case TARGET_NR_chmod:
8321         if (!(p = lock_user_string(arg1)))
8322             goto efault;
8323         ret = get_errno(chmod(p, arg2));
8324         unlock_user(p, arg1, 0);
8325         break;
8326 #endif
8327 #ifdef TARGET_NR_break
8328     case TARGET_NR_break:
8329         goto unimplemented;
8330 #endif
8331 #ifdef TARGET_NR_oldstat
8332     case TARGET_NR_oldstat:
8333         goto unimplemented;
8334 #endif
8335     case TARGET_NR_lseek:
8336         ret = get_errno(lseek(arg1, arg2, arg3));
8337         break;
8338 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8339     /* Alpha specific */
8340     case TARGET_NR_getxpid:
8341         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8342         ret = get_errno(getpid());
8343         break;
8344 #endif
8345 #ifdef TARGET_NR_getpid
8346     case TARGET_NR_getpid:
8347         ret = get_errno(getpid());
8348         break;
8349 #endif
8350     case TARGET_NR_mount:
8351         {
8352             /* need to look at the data field */
8353             void *p2, *p3;
8354 
8355             if (arg1) {
8356                 p = lock_user_string(arg1);
8357                 if (!p) {
8358                     goto efault;
8359                 }
8360             } else {
8361                 p = NULL;
8362             }
8363 
8364             p2 = lock_user_string(arg2);
8365             if (!p2) {
8366                 if (arg1) {
8367                     unlock_user(p, arg1, 0);
8368                 }
8369                 goto efault;
8370             }
8371 
8372             if (arg3) {
8373                 p3 = lock_user_string(arg3);
8374                 if (!p3) {
8375                     if (arg1) {
8376                         unlock_user(p, arg1, 0);
8377                     }
8378                     unlock_user(p2, arg2, 0);
8379                     goto efault;
8380                 }
8381             } else {
8382                 p3 = NULL;
8383             }
8384 
8385             /* FIXME - arg5 should be locked, but it isn't clear how to
8386              * do that since it's not guaranteed to be a NULL-terminated
8387              * string.
8388              */
8389             if (!arg5) {
8390                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8391             } else {
8392                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8393             }
8394             ret = get_errno(ret);
8395 
8396             if (arg1) {
8397                 unlock_user(p, arg1, 0);
8398             }
8399             unlock_user(p2, arg2, 0);
8400             if (arg3) {
8401                 unlock_user(p3, arg3, 0);
8402             }
8403         }
8404         break;
8405 #ifdef TARGET_NR_umount
8406     case TARGET_NR_umount:
8407         if (!(p = lock_user_string(arg1)))
8408             goto efault;
8409         ret = get_errno(umount(p));
8410         unlock_user(p, arg1, 0);
8411         break;
8412 #endif
8413 #ifdef TARGET_NR_stime /* not on alpha */
8414     case TARGET_NR_stime:
8415         {
8416             time_t host_time;
8417             if (get_user_sal(host_time, arg1))
8418                 goto efault;
8419             ret = get_errno(stime(&host_time));
8420         }
8421         break;
8422 #endif
8423     case TARGET_NR_ptrace:
8424         goto unimplemented;
8425 #ifdef TARGET_NR_alarm /* not on alpha */
8426     case TARGET_NR_alarm:
8427         ret = alarm(arg1);
8428         break;
8429 #endif
8430 #ifdef TARGET_NR_oldfstat
8431     case TARGET_NR_oldfstat:
8432         goto unimplemented;
8433 #endif
8434 #ifdef TARGET_NR_pause /* not on alpha */
8435     case TARGET_NR_pause:
8436         if (!block_signals()) {
8437             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8438         }
8439         ret = -TARGET_EINTR;
8440         break;
8441 #endif
8442 #ifdef TARGET_NR_utime
8443     case TARGET_NR_utime:
8444         {
8445             struct utimbuf tbuf, *host_tbuf;
8446             struct target_utimbuf *target_tbuf;
8447             if (arg2) {
8448                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8449                     goto efault;
8450                 tbuf.actime = tswapal(target_tbuf->actime);
8451                 tbuf.modtime = tswapal(target_tbuf->modtime);
8452                 unlock_user_struct(target_tbuf, arg2, 0);
8453                 host_tbuf = &tbuf;
8454             } else {
8455                 host_tbuf = NULL;
8456             }
8457             if (!(p = lock_user_string(arg1)))
8458                 goto efault;
8459             ret = get_errno(utime(p, host_tbuf));
8460             unlock_user(p, arg1, 0);
8461         }
8462         break;
8463 #endif
8464 #ifdef TARGET_NR_utimes
8465     case TARGET_NR_utimes:
8466         {
8467             struct timeval *tvp, tv[2];
8468             if (arg2) {
8469                 if (copy_from_user_timeval(&tv[0], arg2)
8470                     || copy_from_user_timeval(&tv[1],
8471                                               arg2 + sizeof(struct target_timeval)))
8472                     goto efault;
8473                 tvp = tv;
8474             } else {
8475                 tvp = NULL;
8476             }
8477             if (!(p = lock_user_string(arg1)))
8478                 goto efault;
8479             ret = get_errno(utimes(p, tvp));
8480             unlock_user(p, arg1, 0);
8481         }
8482         break;
8483 #endif
8484 #if defined(TARGET_NR_futimesat)
8485     case TARGET_NR_futimesat:
8486         {
8487             struct timeval *tvp, tv[2];
8488             if (arg3) {
8489                 if (copy_from_user_timeval(&tv[0], arg3)
8490                     || copy_from_user_timeval(&tv[1],
8491                                               arg3 + sizeof(struct target_timeval)))
8492                     goto efault;
8493                 tvp = tv;
8494             } else {
8495                 tvp = NULL;
8496             }
8497             if (!(p = lock_user_string(arg2)))
8498                 goto efault;
8499             ret = get_errno(futimesat(arg1, path(p), tvp));
8500             unlock_user(p, arg2, 0);
8501         }
8502         break;
8503 #endif
8504 #ifdef TARGET_NR_stty
8505     case TARGET_NR_stty:
8506         goto unimplemented;
8507 #endif
8508 #ifdef TARGET_NR_gtty
8509     case TARGET_NR_gtty:
8510         goto unimplemented;
8511 #endif
8512 #ifdef TARGET_NR_access
8513     case TARGET_NR_access:
8514         if (!(p = lock_user_string(arg1)))
8515             goto efault;
8516         ret = get_errno(access(path(p), arg2));
8517         unlock_user(p, arg1, 0);
8518         break;
8519 #endif
8520 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8521     case TARGET_NR_faccessat:
8522         if (!(p = lock_user_string(arg2)))
8523             goto efault;
8524         ret = get_errno(faccessat(arg1, p, arg3, 0));
8525         unlock_user(p, arg2, 0);
8526         break;
8527 #endif
8528 #ifdef TARGET_NR_nice /* not on alpha */
8529     case TARGET_NR_nice:
8530         ret = get_errno(nice(arg1));
8531         break;
8532 #endif
8533 #ifdef TARGET_NR_ftime
8534     case TARGET_NR_ftime:
8535         goto unimplemented;
8536 #endif
8537     case TARGET_NR_sync:
8538         sync();
8539         ret = 0;
8540         break;
8541 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8542     case TARGET_NR_syncfs:
8543         ret = get_errno(syncfs(arg1));
8544         break;
8545 #endif
8546     case TARGET_NR_kill:
8547         ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8548         break;
8549 #ifdef TARGET_NR_rename
8550     case TARGET_NR_rename:
8551         {
8552             void *p2;
8553             p = lock_user_string(arg1);
8554             p2 = lock_user_string(arg2);
8555             if (!p || !p2)
8556                 ret = -TARGET_EFAULT;
8557             else
8558                 ret = get_errno(rename(p, p2));
8559             unlock_user(p2, arg2, 0);
8560             unlock_user(p, arg1, 0);
8561         }
8562         break;
8563 #endif
8564 #if defined(TARGET_NR_renameat)
8565     case TARGET_NR_renameat:
8566         {
8567             void *p2;
8568             p  = lock_user_string(arg2);
8569             p2 = lock_user_string(arg4);
8570             if (!p || !p2)
8571                 ret = -TARGET_EFAULT;
8572             else
8573                 ret = get_errno(renameat(arg1, p, arg3, p2));
8574             unlock_user(p2, arg4, 0);
8575             unlock_user(p, arg2, 0);
8576         }
8577         break;
8578 #endif
8579 #if defined(TARGET_NR_renameat2)
8580     case TARGET_NR_renameat2:
8581         {
8582             void *p2;
8583             p  = lock_user_string(arg2);
8584             p2 = lock_user_string(arg4);
8585             if (!p || !p2) {
8586                 ret = -TARGET_EFAULT;
8587             } else {
8588                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8589             }
8590             unlock_user(p2, arg4, 0);
8591             unlock_user(p, arg2, 0);
8592         }
8593         break;
8594 #endif
8595 #ifdef TARGET_NR_mkdir
8596     case TARGET_NR_mkdir:
8597         if (!(p = lock_user_string(arg1)))
8598             goto efault;
8599         ret = get_errno(mkdir(p, arg2));
8600         unlock_user(p, arg1, 0);
8601         break;
8602 #endif
8603 #if defined(TARGET_NR_mkdirat)
8604     case TARGET_NR_mkdirat:
8605         if (!(p = lock_user_string(arg2)))
8606             goto efault;
8607         ret = get_errno(mkdirat(arg1, p, arg3));
8608         unlock_user(p, arg2, 0);
8609         break;
8610 #endif
8611 #ifdef TARGET_NR_rmdir
8612     case TARGET_NR_rmdir:
8613         if (!(p = lock_user_string(arg1)))
8614             goto efault;
8615         ret = get_errno(rmdir(p));
8616         unlock_user(p, arg1, 0);
8617         break;
8618 #endif
8619     case TARGET_NR_dup:
8620         ret = get_errno(dup(arg1));
8621         if (ret >= 0) {
8622             fd_trans_dup(arg1, ret);
8623         }
8624         break;
8625 #ifdef TARGET_NR_pipe
8626     case TARGET_NR_pipe:
8627         ret = do_pipe(cpu_env, arg1, 0, 0);
8628         break;
8629 #endif
8630 #ifdef TARGET_NR_pipe2
8631     case TARGET_NR_pipe2:
8632         ret = do_pipe(cpu_env, arg1,
8633                       target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8634         break;
8635 #endif
8636     case TARGET_NR_times:
8637         {
8638             struct target_tms *tmsp;
8639             struct tms tms;
8640             ret = get_errno(times(&tms));
8641             if (arg1) {
8642                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8643                 if (!tmsp)
8644                     goto efault;
8645                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8646                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8647                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8648                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8649             }
8650             if (!is_error(ret))
8651                 ret = host_to_target_clock_t(ret);
8652         }
8653         break;
8654 #ifdef TARGET_NR_prof
8655     case TARGET_NR_prof:
8656         goto unimplemented;
8657 #endif
8658 #ifdef TARGET_NR_signal
8659     case TARGET_NR_signal:
8660         goto unimplemented;
8661 #endif
8662     case TARGET_NR_acct:
8663         if (arg1 == 0) {
8664             ret = get_errno(acct(NULL));
8665         } else {
8666             if (!(p = lock_user_string(arg1)))
8667                 goto efault;
8668             ret = get_errno(acct(path(p)));
8669             unlock_user(p, arg1, 0);
8670         }
8671         break;
8672 #ifdef TARGET_NR_umount2
8673     case TARGET_NR_umount2:
8674         if (!(p = lock_user_string(arg1)))
8675             goto efault;
8676         ret = get_errno(umount2(p, arg2));
8677         unlock_user(p, arg1, 0);
8678         break;
8679 #endif
8680 #ifdef TARGET_NR_lock
8681     case TARGET_NR_lock:
8682         goto unimplemented;
8683 #endif
8684     case TARGET_NR_ioctl:
8685         ret = do_ioctl(arg1, arg2, arg3);
8686         break;
8687 #ifdef TARGET_NR_fcntl
8688     case TARGET_NR_fcntl:
8689         ret = do_fcntl(arg1, arg2, arg3);
8690         break;
8691 #endif
8692 #ifdef TARGET_NR_mpx
8693     case TARGET_NR_mpx:
8694         goto unimplemented;
8695 #endif
8696     case TARGET_NR_setpgid:
8697         ret = get_errno(setpgid(arg1, arg2));
8698         break;
8699 #ifdef TARGET_NR_ulimit
8700     case TARGET_NR_ulimit:
8701         goto unimplemented;
8702 #endif
8703 #ifdef TARGET_NR_oldolduname
8704     case TARGET_NR_oldolduname:
8705         goto unimplemented;
8706 #endif
8707     case TARGET_NR_umask:
8708         ret = get_errno(umask(arg1));
8709         break;
8710     case TARGET_NR_chroot:
8711         if (!(p = lock_user_string(arg1)))
8712             goto efault;
8713         ret = get_errno(chroot(p));
8714         unlock_user(p, arg1, 0);
8715         break;
8716 #ifdef TARGET_NR_ustat
8717     case TARGET_NR_ustat:
8718         goto unimplemented;
8719 #endif
8720 #ifdef TARGET_NR_dup2
8721     case TARGET_NR_dup2:
8722         ret = get_errno(dup2(arg1, arg2));
8723         if (ret >= 0) {
8724             fd_trans_dup(arg1, arg2);
8725         }
8726         break;
8727 #endif
8728 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8729     case TARGET_NR_dup3:
8730     {
8731         int host_flags;
8732 
8733         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8734             return -EINVAL;
8735         }
8736         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8737         ret = get_errno(dup3(arg1, arg2, host_flags));
8738         if (ret >= 0) {
8739             fd_trans_dup(arg1, arg2);
8740         }
8741         break;
8742     }
8743 #endif
8744 #ifdef TARGET_NR_getppid /* not on alpha */
8745     case TARGET_NR_getppid:
8746         ret = get_errno(getppid());
8747         break;
8748 #endif
8749 #ifdef TARGET_NR_getpgrp
8750     case TARGET_NR_getpgrp:
8751         ret = get_errno(getpgrp());
8752         break;
8753 #endif
8754     case TARGET_NR_setsid:
8755         ret = get_errno(setsid());
8756         break;
8757 #ifdef TARGET_NR_sigaction
8758     case TARGET_NR_sigaction:
8759         {
8760 #if defined(TARGET_ALPHA)
8761             struct target_sigaction act, oact, *pact = 0;
8762             struct target_old_sigaction *old_act;
8763             if (arg2) {
8764                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8765                     goto efault;
8766                 act._sa_handler = old_act->_sa_handler;
8767                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8768                 act.sa_flags = old_act->sa_flags;
8769                 act.sa_restorer = 0;
8770                 unlock_user_struct(old_act, arg2, 0);
8771                 pact = &act;
8772             }
8773             ret = get_errno(do_sigaction(arg1, pact, &oact));
8774             if (!is_error(ret) && arg3) {
8775                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8776                     goto efault;
8777                 old_act->_sa_handler = oact._sa_handler;
8778                 old_act->sa_mask = oact.sa_mask.sig[0];
8779                 old_act->sa_flags = oact.sa_flags;
8780                 unlock_user_struct(old_act, arg3, 1);
8781             }
8782 #elif defined(TARGET_MIPS)
8783 	    struct target_sigaction act, oact, *pact, *old_act;
8784 
8785 	    if (arg2) {
8786                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8787                     goto efault;
8788 		act._sa_handler = old_act->_sa_handler;
8789 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8790 		act.sa_flags = old_act->sa_flags;
8791 		unlock_user_struct(old_act, arg2, 0);
8792 		pact = &act;
8793 	    } else {
8794 		pact = NULL;
8795 	    }
8796 
8797 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8798 
8799 	    if (!is_error(ret) && arg3) {
8800                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8801                     goto efault;
8802 		old_act->_sa_handler = oact._sa_handler;
8803 		old_act->sa_flags = oact.sa_flags;
8804 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8805 		old_act->sa_mask.sig[1] = 0;
8806 		old_act->sa_mask.sig[2] = 0;
8807 		old_act->sa_mask.sig[3] = 0;
8808 		unlock_user_struct(old_act, arg3, 1);
8809 	    }
8810 #else
8811             struct target_old_sigaction *old_act;
8812             struct target_sigaction act, oact, *pact;
8813             if (arg2) {
8814                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8815                     goto efault;
8816                 act._sa_handler = old_act->_sa_handler;
8817                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8818                 act.sa_flags = old_act->sa_flags;
8819                 act.sa_restorer = old_act->sa_restorer;
8820 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8821                 act.ka_restorer = 0;
8822 #endif
8823                 unlock_user_struct(old_act, arg2, 0);
8824                 pact = &act;
8825             } else {
8826                 pact = NULL;
8827             }
8828             ret = get_errno(do_sigaction(arg1, pact, &oact));
8829             if (!is_error(ret) && arg3) {
8830                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8831                     goto efault;
8832                 old_act->_sa_handler = oact._sa_handler;
8833                 old_act->sa_mask = oact.sa_mask.sig[0];
8834                 old_act->sa_flags = oact.sa_flags;
8835                 old_act->sa_restorer = oact.sa_restorer;
8836                 unlock_user_struct(old_act, arg3, 1);
8837             }
8838 #endif
8839         }
8840         break;
8841 #endif
8842     case TARGET_NR_rt_sigaction:
8843         {
8844 #if defined(TARGET_ALPHA)
8845             /* For Alpha and SPARC this is a 5 argument syscall, with
8846              * a 'restorer' parameter which must be copied into the
8847              * sa_restorer field of the sigaction struct.
8848              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8849              * and arg5 is the sigsetsize.
8850              * Alpha also has a separate rt_sigaction struct that it uses
8851              * here; SPARC uses the usual sigaction struct.
8852              */
8853             struct target_rt_sigaction *rt_act;
8854             struct target_sigaction act, oact, *pact = 0;
8855 
8856             if (arg4 != sizeof(target_sigset_t)) {
8857                 ret = -TARGET_EINVAL;
8858                 break;
8859             }
8860             if (arg2) {
8861                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8862                     goto efault;
8863                 act._sa_handler = rt_act->_sa_handler;
8864                 act.sa_mask = rt_act->sa_mask;
8865                 act.sa_flags = rt_act->sa_flags;
8866                 act.sa_restorer = arg5;
8867                 unlock_user_struct(rt_act, arg2, 0);
8868                 pact = &act;
8869             }
8870             ret = get_errno(do_sigaction(arg1, pact, &oact));
8871             if (!is_error(ret) && arg3) {
8872                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8873                     goto efault;
8874                 rt_act->_sa_handler = oact._sa_handler;
8875                 rt_act->sa_mask = oact.sa_mask;
8876                 rt_act->sa_flags = oact.sa_flags;
8877                 unlock_user_struct(rt_act, arg3, 1);
8878             }
8879 #else
8880 #ifdef TARGET_SPARC
8881             target_ulong restorer = arg4;
8882             target_ulong sigsetsize = arg5;
8883 #else
8884             target_ulong sigsetsize = arg4;
8885 #endif
8886             struct target_sigaction *act;
8887             struct target_sigaction *oact;
8888 
8889             if (sigsetsize != sizeof(target_sigset_t)) {
8890                 ret = -TARGET_EINVAL;
8891                 break;
8892             }
8893             if (arg2) {
8894                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8895                     goto efault;
8896                 }
8897 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8898                 act->ka_restorer = restorer;
8899 #endif
8900             } else {
8901                 act = NULL;
8902             }
8903             if (arg3) {
8904                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8905                     ret = -TARGET_EFAULT;
8906                     goto rt_sigaction_fail;
8907                 }
8908             } else
8909                 oact = NULL;
8910             ret = get_errno(do_sigaction(arg1, act, oact));
8911 	rt_sigaction_fail:
8912             if (act)
8913                 unlock_user_struct(act, arg2, 0);
8914             if (oact)
8915                 unlock_user_struct(oact, arg3, 1);
8916 #endif
8917         }
8918         break;
8919 #ifdef TARGET_NR_sgetmask /* not on alpha */
8920     case TARGET_NR_sgetmask:
8921         {
8922             sigset_t cur_set;
8923             abi_ulong target_set;
8924             ret = do_sigprocmask(0, NULL, &cur_set);
8925             if (!ret) {
8926                 host_to_target_old_sigset(&target_set, &cur_set);
8927                 ret = target_set;
8928             }
8929         }
8930         break;
8931 #endif
8932 #ifdef TARGET_NR_ssetmask /* not on alpha */
8933     case TARGET_NR_ssetmask:
8934         {
8935             sigset_t set, oset;
8936             abi_ulong target_set = arg1;
8937             target_to_host_old_sigset(&set, &target_set);
8938             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8939             if (!ret) {
8940                 host_to_target_old_sigset(&target_set, &oset);
8941                 ret = target_set;
8942             }
8943         }
8944         break;
8945 #endif
8946 #ifdef TARGET_NR_sigprocmask
8947     case TARGET_NR_sigprocmask:
8948         {
8949 #if defined(TARGET_ALPHA)
8950             sigset_t set, oldset;
8951             abi_ulong mask;
8952             int how;
8953 
8954             switch (arg1) {
8955             case TARGET_SIG_BLOCK:
8956                 how = SIG_BLOCK;
8957                 break;
8958             case TARGET_SIG_UNBLOCK:
8959                 how = SIG_UNBLOCK;
8960                 break;
8961             case TARGET_SIG_SETMASK:
8962                 how = SIG_SETMASK;
8963                 break;
8964             default:
8965                 ret = -TARGET_EINVAL;
8966                 goto fail;
8967             }
8968             mask = arg2;
8969             target_to_host_old_sigset(&set, &mask);
8970 
8971             ret = do_sigprocmask(how, &set, &oldset);
8972             if (!is_error(ret)) {
8973                 host_to_target_old_sigset(&mask, &oldset);
8974                 ret = mask;
8975                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8976             }
8977 #else
8978             sigset_t set, oldset, *set_ptr;
8979             int how;
8980 
8981             if (arg2) {
8982                 switch (arg1) {
8983                 case TARGET_SIG_BLOCK:
8984                     how = SIG_BLOCK;
8985                     break;
8986                 case TARGET_SIG_UNBLOCK:
8987                     how = SIG_UNBLOCK;
8988                     break;
8989                 case TARGET_SIG_SETMASK:
8990                     how = SIG_SETMASK;
8991                     break;
8992                 default:
8993                     ret = -TARGET_EINVAL;
8994                     goto fail;
8995                 }
8996                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8997                     goto efault;
8998                 target_to_host_old_sigset(&set, p);
8999                 unlock_user(p, arg2, 0);
9000                 set_ptr = &set;
9001             } else {
9002                 how = 0;
9003                 set_ptr = NULL;
9004             }
9005             ret = do_sigprocmask(how, set_ptr, &oldset);
9006             if (!is_error(ret) && arg3) {
9007                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9008                     goto efault;
9009                 host_to_target_old_sigset(p, &oldset);
9010                 unlock_user(p, arg3, sizeof(target_sigset_t));
9011             }
9012 #endif
9013         }
9014         break;
9015 #endif
9016     case TARGET_NR_rt_sigprocmask:
9017         {
9018             int how = arg1;
9019             sigset_t set, oldset, *set_ptr;
9020 
9021             if (arg4 != sizeof(target_sigset_t)) {
9022                 ret = -TARGET_EINVAL;
9023                 break;
9024             }
9025 
9026             if (arg2) {
9027                 switch(how) {
9028                 case TARGET_SIG_BLOCK:
9029                     how = SIG_BLOCK;
9030                     break;
9031                 case TARGET_SIG_UNBLOCK:
9032                     how = SIG_UNBLOCK;
9033                     break;
9034                 case TARGET_SIG_SETMASK:
9035                     how = SIG_SETMASK;
9036                     break;
9037                 default:
9038                     ret = -TARGET_EINVAL;
9039                     goto fail;
9040                 }
9041                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9042                     goto efault;
9043                 target_to_host_sigset(&set, p);
9044                 unlock_user(p, arg2, 0);
9045                 set_ptr = &set;
9046             } else {
9047                 how = 0;
9048                 set_ptr = NULL;
9049             }
9050             ret = do_sigprocmask(how, set_ptr, &oldset);
9051             if (!is_error(ret) && arg3) {
9052                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9053                     goto efault;
9054                 host_to_target_sigset(p, &oldset);
9055                 unlock_user(p, arg3, sizeof(target_sigset_t));
9056             }
9057         }
9058         break;
9059 #ifdef TARGET_NR_sigpending
9060     case TARGET_NR_sigpending:
9061         {
9062             sigset_t set;
9063             ret = get_errno(sigpending(&set));
9064             if (!is_error(ret)) {
9065                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9066                     goto efault;
9067                 host_to_target_old_sigset(p, &set);
9068                 unlock_user(p, arg1, sizeof(target_sigset_t));
9069             }
9070         }
9071         break;
9072 #endif
9073     case TARGET_NR_rt_sigpending:
9074         {
9075             sigset_t set;
9076 
9077             /* Yes, this check is >, not != like most. We follow the kernel's
9078              * logic and it does it like this because it implements
9079              * NR_sigpending through the same code path, and in that case
9080              * the old_sigset_t is smaller in size.
9081              */
9082             if (arg2 > sizeof(target_sigset_t)) {
9083                 ret = -TARGET_EINVAL;
9084                 break;
9085             }
9086 
9087             ret = get_errno(sigpending(&set));
9088             if (!is_error(ret)) {
9089                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9090                     goto efault;
9091                 host_to_target_sigset(p, &set);
9092                 unlock_user(p, arg1, sizeof(target_sigset_t));
9093             }
9094         }
9095         break;
9096 #ifdef TARGET_NR_sigsuspend
9097     case TARGET_NR_sigsuspend:
9098         {
9099             TaskState *ts = cpu->opaque;
9100 #if defined(TARGET_ALPHA)
9101             abi_ulong mask = arg1;
9102             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9103 #else
9104             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9105                 goto efault;
9106             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9107             unlock_user(p, arg1, 0);
9108 #endif
9109             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9110                                                SIGSET_T_SIZE));
9111             if (ret != -TARGET_ERESTARTSYS) {
9112                 ts->in_sigsuspend = 1;
9113             }
9114         }
9115         break;
9116 #endif
9117     case TARGET_NR_rt_sigsuspend:
9118         {
9119             TaskState *ts = cpu->opaque;
9120 
9121             if (arg2 != sizeof(target_sigset_t)) {
9122                 ret = -TARGET_EINVAL;
9123                 break;
9124             }
9125             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9126                 goto efault;
9127             target_to_host_sigset(&ts->sigsuspend_mask, p);
9128             unlock_user(p, arg1, 0);
9129             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9130                                                SIGSET_T_SIZE));
9131             if (ret != -TARGET_ERESTARTSYS) {
9132                 ts->in_sigsuspend = 1;
9133             }
9134         }
9135         break;
9136     case TARGET_NR_rt_sigtimedwait:
9137         {
9138             sigset_t set;
9139             struct timespec uts, *puts;
9140             siginfo_t uinfo;
9141 
9142             if (arg4 != sizeof(target_sigset_t)) {
9143                 ret = -TARGET_EINVAL;
9144                 break;
9145             }
9146 
9147             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9148                 goto efault;
9149             target_to_host_sigset(&set, p);
9150             unlock_user(p, arg1, 0);
9151             if (arg3) {
9152                 puts = &uts;
9153                 target_to_host_timespec(puts, arg3);
9154             } else {
9155                 puts = NULL;
9156             }
9157             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9158                                                  SIGSET_T_SIZE));
9159             if (!is_error(ret)) {
9160                 if (arg2) {
9161                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9162                                   0);
9163                     if (!p) {
9164                         goto efault;
9165                     }
9166                     host_to_target_siginfo(p, &uinfo);
9167                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9168                 }
9169                 ret = host_to_target_signal(ret);
9170             }
9171         }
9172         break;
9173     case TARGET_NR_rt_sigqueueinfo:
9174         {
9175             siginfo_t uinfo;
9176 
9177             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9178             if (!p) {
9179                 goto efault;
9180             }
9181             target_to_host_siginfo(&uinfo, p);
9182             unlock_user(p, arg3, 0);
9183             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9184         }
9185         break;
9186     case TARGET_NR_rt_tgsigqueueinfo:
9187         {
9188             siginfo_t uinfo;
9189 
9190             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9191             if (!p) {
9192                 goto efault;
9193             }
9194             target_to_host_siginfo(&uinfo, p);
9195             unlock_user(p, arg4, 0);
9196             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9197         }
9198         break;
9199 #ifdef TARGET_NR_sigreturn
9200     case TARGET_NR_sigreturn:
9201         if (block_signals()) {
9202             ret = -TARGET_ERESTARTSYS;
9203         } else {
9204             ret = do_sigreturn(cpu_env);
9205         }
9206         break;
9207 #endif
9208     case TARGET_NR_rt_sigreturn:
9209         if (block_signals()) {
9210             ret = -TARGET_ERESTARTSYS;
9211         } else {
9212             ret = do_rt_sigreturn(cpu_env);
9213         }
9214         break;
9215     case TARGET_NR_sethostname:
9216         if (!(p = lock_user_string(arg1)))
9217             goto efault;
9218         ret = get_errno(sethostname(p, arg2));
9219         unlock_user(p, arg1, 0);
9220         break;
9221     case TARGET_NR_setrlimit:
9222         {
9223             int resource = target_to_host_resource(arg1);
9224             struct target_rlimit *target_rlim;
9225             struct rlimit rlim;
9226             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9227                 goto efault;
9228             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9229             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9230             unlock_user_struct(target_rlim, arg2, 0);
9231             ret = get_errno(setrlimit(resource, &rlim));
9232         }
9233         break;
9234     case TARGET_NR_getrlimit:
9235         {
9236             int resource = target_to_host_resource(arg1);
9237             struct target_rlimit *target_rlim;
9238             struct rlimit rlim;
9239 
9240             ret = get_errno(getrlimit(resource, &rlim));
9241             if (!is_error(ret)) {
9242                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9243                     goto efault;
9244                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9245                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9246                 unlock_user_struct(target_rlim, arg2, 1);
9247             }
9248         }
9249         break;
9250     case TARGET_NR_getrusage:
9251         {
9252             struct rusage rusage;
9253             ret = get_errno(getrusage(arg1, &rusage));
9254             if (!is_error(ret)) {
9255                 ret = host_to_target_rusage(arg2, &rusage);
9256             }
9257         }
9258         break;
9259     case TARGET_NR_gettimeofday:
9260         {
9261             struct timeval tv;
9262             ret = get_errno(gettimeofday(&tv, NULL));
9263             if (!is_error(ret)) {
9264                 if (copy_to_user_timeval(arg1, &tv))
9265                     goto efault;
9266             }
9267         }
9268         break;
9269     case TARGET_NR_settimeofday:
9270         {
9271             struct timeval tv, *ptv = NULL;
9272             struct timezone tz, *ptz = NULL;
9273 
9274             if (arg1) {
9275                 if (copy_from_user_timeval(&tv, arg1)) {
9276                     goto efault;
9277                 }
9278                 ptv = &tv;
9279             }
9280 
9281             if (arg2) {
9282                 if (copy_from_user_timezone(&tz, arg2)) {
9283                     goto efault;
9284                 }
9285                 ptz = &tz;
9286             }
9287 
9288             ret = get_errno(settimeofday(ptv, ptz));
9289         }
9290         break;
9291 #if defined(TARGET_NR_select)
9292     case TARGET_NR_select:
9293 #if defined(TARGET_WANT_NI_OLD_SELECT)
9294         /* some architectures used to have old_select here
9295          * but now ENOSYS it.
9296          */
9297         ret = -TARGET_ENOSYS;
9298 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9299         ret = do_old_select(arg1);
9300 #else
9301         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9302 #endif
9303         break;
9304 #endif
9305 #ifdef TARGET_NR_pselect6
9306     case TARGET_NR_pselect6:
9307         {
9308             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9309             fd_set rfds, wfds, efds;
9310             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9311             struct timespec ts, *ts_ptr;
9312 
9313             /*
9314              * The 6th arg is actually two args smashed together,
9315              * so we cannot use the C library.
9316              */
9317             sigset_t set;
9318             struct {
9319                 sigset_t *set;
9320                 size_t size;
9321             } sig, *sig_ptr;
9322 
9323             abi_ulong arg_sigset, arg_sigsize, *arg7;
9324             target_sigset_t *target_sigset;
9325 
9326             n = arg1;
9327             rfd_addr = arg2;
9328             wfd_addr = arg3;
9329             efd_addr = arg4;
9330             ts_addr = arg5;
9331 
9332             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9333             if (ret) {
9334                 goto fail;
9335             }
9336             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9337             if (ret) {
9338                 goto fail;
9339             }
9340             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9341             if (ret) {
9342                 goto fail;
9343             }
9344 
9345             /*
9346              * This takes a timespec, and not a timeval, so we cannot
9347              * use the do_select() helper ...
9348              */
9349             if (ts_addr) {
9350                 if (target_to_host_timespec(&ts, ts_addr)) {
9351                     goto efault;
9352                 }
9353                 ts_ptr = &ts;
9354             } else {
9355                 ts_ptr = NULL;
9356             }
9357 
9358             /* Extract the two packed args for the sigset */
9359             if (arg6) {
9360                 sig_ptr = &sig;
9361                 sig.size = SIGSET_T_SIZE;
9362 
9363                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9364                 if (!arg7) {
9365                     goto efault;
9366                 }
9367                 arg_sigset = tswapal(arg7[0]);
9368                 arg_sigsize = tswapal(arg7[1]);
9369                 unlock_user(arg7, arg6, 0);
9370 
9371                 if (arg_sigset) {
9372                     sig.set = &set;
9373                     if (arg_sigsize != sizeof(*target_sigset)) {
9374                         /* Like the kernel, we enforce correct size sigsets */
9375                         ret = -TARGET_EINVAL;
9376                         goto fail;
9377                     }
9378                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
9379                                               sizeof(*target_sigset), 1);
9380                     if (!target_sigset) {
9381                         goto efault;
9382                     }
9383                     target_to_host_sigset(&set, target_sigset);
9384                     unlock_user(target_sigset, arg_sigset, 0);
9385                 } else {
9386                     sig.set = NULL;
9387                 }
9388             } else {
9389                 sig_ptr = NULL;
9390             }
9391 
9392             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9393                                           ts_ptr, sig_ptr));
9394 
9395             if (!is_error(ret)) {
9396                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9397                     goto efault;
9398                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9399                     goto efault;
9400                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9401                     goto efault;
9402 
9403                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9404                     goto efault;
9405             }
9406         }
9407         break;
9408 #endif
9409 #ifdef TARGET_NR_symlink
9410     case TARGET_NR_symlink:
9411         {
9412             void *p2;
9413             p = lock_user_string(arg1);
9414             p2 = lock_user_string(arg2);
9415             if (!p || !p2)
9416                 ret = -TARGET_EFAULT;
9417             else
9418                 ret = get_errno(symlink(p, p2));
9419             unlock_user(p2, arg2, 0);
9420             unlock_user(p, arg1, 0);
9421         }
9422         break;
9423 #endif
9424 #if defined(TARGET_NR_symlinkat)
9425     case TARGET_NR_symlinkat:
9426         {
9427             void *p2;
9428             p  = lock_user_string(arg1);
9429             p2 = lock_user_string(arg3);
9430             if (!p || !p2)
9431                 ret = -TARGET_EFAULT;
9432             else
9433                 ret = get_errno(symlinkat(p, arg2, p2));
9434             unlock_user(p2, arg3, 0);
9435             unlock_user(p, arg1, 0);
9436         }
9437         break;
9438 #endif
9439 #ifdef TARGET_NR_oldlstat
9440     case TARGET_NR_oldlstat:
9441         goto unimplemented;
9442 #endif
9443 #ifdef TARGET_NR_readlink
9444     case TARGET_NR_readlink:
9445         {
9446             void *p2;
9447             p = lock_user_string(arg1);
9448             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9449             if (!p || !p2) {
9450                 ret = -TARGET_EFAULT;
9451             } else if (!arg3) {
9452                 /* Short circuit this for the magic exe check. */
9453                 ret = -TARGET_EINVAL;
9454             } else if (is_proc_myself((const char *)p, "exe")) {
9455                 char real[PATH_MAX], *temp;
9456                 temp = realpath(exec_path, real);
9457                 /* Return value is # of bytes that we wrote to the buffer. */
9458                 if (temp == NULL) {
9459                     ret = get_errno(-1);
9460                 } else {
9461                     /* Don't worry about sign mismatch as earlier mapping
9462                      * logic would have thrown a bad address error. */
9463                     ret = MIN(strlen(real), arg3);
9464                     /* We cannot NUL terminate the string. */
9465                     memcpy(p2, real, ret);
9466                 }
9467             } else {
9468                 ret = get_errno(readlink(path(p), p2, arg3));
9469             }
9470             unlock_user(p2, arg2, ret);
9471             unlock_user(p, arg1, 0);
9472         }
9473         break;
9474 #endif
9475 #if defined(TARGET_NR_readlinkat)
9476     case TARGET_NR_readlinkat:
9477         {
9478             void *p2;
9479             p  = lock_user_string(arg2);
9480             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9481             if (!p || !p2) {
9482                 ret = -TARGET_EFAULT;
9483             } else if (is_proc_myself((const char *)p, "exe")) {
9484                 char real[PATH_MAX], *temp;
9485                 temp = realpath(exec_path, real);
9486                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9487                 snprintf((char *)p2, arg4, "%s", real);
9488             } else {
9489                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9490             }
9491             unlock_user(p2, arg3, ret);
9492             unlock_user(p, arg2, 0);
9493         }
9494         break;
9495 #endif
9496 #ifdef TARGET_NR_uselib
9497     case TARGET_NR_uselib:
9498         goto unimplemented;
9499 #endif
9500 #ifdef TARGET_NR_swapon
9501     case TARGET_NR_swapon:
9502         if (!(p = lock_user_string(arg1)))
9503             goto efault;
9504         ret = get_errno(swapon(p, arg2));
9505         unlock_user(p, arg1, 0);
9506         break;
9507 #endif
9508     case TARGET_NR_reboot:
9509         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9510            /* arg4 must be ignored in all other cases */
9511            p = lock_user_string(arg4);
9512            if (!p) {
9513               goto efault;
9514            }
9515            ret = get_errno(reboot(arg1, arg2, arg3, p));
9516            unlock_user(p, arg4, 0);
9517         } else {
9518            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9519         }
9520         break;
9521 #ifdef TARGET_NR_readdir
9522     case TARGET_NR_readdir:
9523         goto unimplemented;
9524 #endif
9525 #ifdef TARGET_NR_mmap
9526     case TARGET_NR_mmap:
9527 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9528     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9529     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9530     || defined(TARGET_S390X)
9531         {
9532             abi_ulong *v;
9533             abi_ulong v1, v2, v3, v4, v5, v6;
9534             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9535                 goto efault;
9536             v1 = tswapal(v[0]);
9537             v2 = tswapal(v[1]);
9538             v3 = tswapal(v[2]);
9539             v4 = tswapal(v[3]);
9540             v5 = tswapal(v[4]);
9541             v6 = tswapal(v[5]);
9542             unlock_user(v, arg1, 0);
9543             ret = get_errno(target_mmap(v1, v2, v3,
9544                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9545                                         v5, v6));
9546         }
9547 #else
9548         ret = get_errno(target_mmap(arg1, arg2, arg3,
9549                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9550                                     arg5,
9551                                     arg6));
9552 #endif
9553         break;
9554 #endif
9555 #ifdef TARGET_NR_mmap2
9556     case TARGET_NR_mmap2:
9557 #ifndef MMAP_SHIFT
9558 #define MMAP_SHIFT 12
9559 #endif
9560         ret = get_errno(target_mmap(arg1, arg2, arg3,
9561                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9562                                     arg5,
9563                                     arg6 << MMAP_SHIFT));
9564         break;
9565 #endif
9566     case TARGET_NR_munmap:
9567         ret = get_errno(target_munmap(arg1, arg2));
9568         break;
9569     case TARGET_NR_mprotect:
9570         {
9571             TaskState *ts = cpu->opaque;
9572             /* Special hack to detect libc making the stack executable.  */
9573             if ((arg3 & PROT_GROWSDOWN)
9574                 && arg1 >= ts->info->stack_limit
9575                 && arg1 <= ts->info->start_stack) {
9576                 arg3 &= ~PROT_GROWSDOWN;
9577                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9578                 arg1 = ts->info->stack_limit;
9579             }
9580         }
9581         ret = get_errno(target_mprotect(arg1, arg2, arg3));
9582         break;
9583 #ifdef TARGET_NR_mremap
9584     case TARGET_NR_mremap:
9585         ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9586         break;
9587 #endif
9588         /* ??? msync/mlock/munlock are broken for softmmu.  */
9589 #ifdef TARGET_NR_msync
9590     case TARGET_NR_msync:
9591         ret = get_errno(msync(g2h(arg1), arg2, arg3));
9592         break;
9593 #endif
9594 #ifdef TARGET_NR_mlock
9595     case TARGET_NR_mlock:
9596         ret = get_errno(mlock(g2h(arg1), arg2));
9597         break;
9598 #endif
9599 #ifdef TARGET_NR_munlock
9600     case TARGET_NR_munlock:
9601         ret = get_errno(munlock(g2h(arg1), arg2));
9602         break;
9603 #endif
9604 #ifdef TARGET_NR_mlockall
9605     case TARGET_NR_mlockall:
9606         ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9607         break;
9608 #endif
9609 #ifdef TARGET_NR_munlockall
9610     case TARGET_NR_munlockall:
9611         ret = get_errno(munlockall());
9612         break;
9613 #endif
9614     case TARGET_NR_truncate:
9615         if (!(p = lock_user_string(arg1)))
9616             goto efault;
9617         ret = get_errno(truncate(p, arg2));
9618         unlock_user(p, arg1, 0);
9619         break;
9620     case TARGET_NR_ftruncate:
9621         ret = get_errno(ftruncate(arg1, arg2));
9622         break;
9623     case TARGET_NR_fchmod:
9624         ret = get_errno(fchmod(arg1, arg2));
9625         break;
9626 #if defined(TARGET_NR_fchmodat)
9627     case TARGET_NR_fchmodat:
9628         if (!(p = lock_user_string(arg2)))
9629             goto efault;
9630         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9631         unlock_user(p, arg2, 0);
9632         break;
9633 #endif
9634     case TARGET_NR_getpriority:
9635         /* Note that negative values are valid for getpriority, so we must
9636            differentiate based on errno settings.  */
9637         errno = 0;
9638         ret = getpriority(arg1, arg2);
9639         if (ret == -1 && errno != 0) {
9640             ret = -host_to_target_errno(errno);
9641             break;
9642         }
9643 #ifdef TARGET_ALPHA
9644         /* Return value is the unbiased priority.  Signal no error.  */
9645         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9646 #else
9647         /* Return value is a biased priority to avoid negative numbers.  */
9648         ret = 20 - ret;
9649 #endif
9650         break;
9651     case TARGET_NR_setpriority:
9652         ret = get_errno(setpriority(arg1, arg2, arg3));
9653         break;
9654 #ifdef TARGET_NR_profil
9655     case TARGET_NR_profil:
9656         goto unimplemented;
9657 #endif
9658     case TARGET_NR_statfs:
9659         if (!(p = lock_user_string(arg1)))
9660             goto efault;
9661         ret = get_errno(statfs(path(p), &stfs));
9662         unlock_user(p, arg1, 0);
9663     convert_statfs:
9664         if (!is_error(ret)) {
9665             struct target_statfs *target_stfs;
9666 
9667             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9668                 goto efault;
9669             __put_user(stfs.f_type, &target_stfs->f_type);
9670             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9671             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9672             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9673             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9674             __put_user(stfs.f_files, &target_stfs->f_files);
9675             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9676             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9677             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9678             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9679             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9680 #ifdef _STATFS_F_FLAGS
9681             __put_user(stfs.f_flags, &target_stfs->f_flags);
9682 #else
9683             __put_user(0, &target_stfs->f_flags);
9684 #endif
9685             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9686             unlock_user_struct(target_stfs, arg2, 1);
9687         }
9688         break;
9689     case TARGET_NR_fstatfs:
9690         ret = get_errno(fstatfs(arg1, &stfs));
9691         goto convert_statfs;
9692 #ifdef TARGET_NR_statfs64
9693     case TARGET_NR_statfs64:
9694         if (!(p = lock_user_string(arg1)))
9695             goto efault;
9696         ret = get_errno(statfs(path(p), &stfs));
9697         unlock_user(p, arg1, 0);
9698     convert_statfs64:
9699         if (!is_error(ret)) {
9700             struct target_statfs64 *target_stfs;
9701 
9702             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9703                 goto efault;
9704             __put_user(stfs.f_type, &target_stfs->f_type);
9705             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9706             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9707             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9708             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9709             __put_user(stfs.f_files, &target_stfs->f_files);
9710             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9711             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9712             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9713             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9714             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9715             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9716             unlock_user_struct(target_stfs, arg3, 1);
9717         }
9718         break;
9719     case TARGET_NR_fstatfs64:
9720         ret = get_errno(fstatfs(arg1, &stfs));
9721         goto convert_statfs64;
9722 #endif
9723 #ifdef TARGET_NR_ioperm
9724     case TARGET_NR_ioperm:
9725         goto unimplemented;
9726 #endif
9727 #ifdef TARGET_NR_socketcall
9728     case TARGET_NR_socketcall:
9729         ret = do_socketcall(arg1, arg2);
9730         break;
9731 #endif
9732 #ifdef TARGET_NR_accept
9733     case TARGET_NR_accept:
9734         ret = do_accept4(arg1, arg2, arg3, 0);
9735         break;
9736 #endif
9737 #ifdef TARGET_NR_accept4
9738     case TARGET_NR_accept4:
9739         ret = do_accept4(arg1, arg2, arg3, arg4);
9740         break;
9741 #endif
9742 #ifdef TARGET_NR_bind
9743     case TARGET_NR_bind:
9744         ret = do_bind(arg1, arg2, arg3);
9745         break;
9746 #endif
9747 #ifdef TARGET_NR_connect
9748     case TARGET_NR_connect:
9749         ret = do_connect(arg1, arg2, arg3);
9750         break;
9751 #endif
9752 #ifdef TARGET_NR_getpeername
9753     case TARGET_NR_getpeername:
9754         ret = do_getpeername(arg1, arg2, arg3);
9755         break;
9756 #endif
9757 #ifdef TARGET_NR_getsockname
9758     case TARGET_NR_getsockname:
9759         ret = do_getsockname(arg1, arg2, arg3);
9760         break;
9761 #endif
9762 #ifdef TARGET_NR_getsockopt
9763     case TARGET_NR_getsockopt:
9764         ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9765         break;
9766 #endif
9767 #ifdef TARGET_NR_listen
9768     case TARGET_NR_listen:
9769         ret = get_errno(listen(arg1, arg2));
9770         break;
9771 #endif
9772 #ifdef TARGET_NR_recv
9773     case TARGET_NR_recv:
9774         ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9775         break;
9776 #endif
9777 #ifdef TARGET_NR_recvfrom
9778     case TARGET_NR_recvfrom:
9779         ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9780         break;
9781 #endif
9782 #ifdef TARGET_NR_recvmsg
9783     case TARGET_NR_recvmsg:
9784         ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9785         break;
9786 #endif
9787 #ifdef TARGET_NR_send
9788     case TARGET_NR_send:
9789         ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9790         break;
9791 #endif
9792 #ifdef TARGET_NR_sendmsg
9793     case TARGET_NR_sendmsg:
9794         ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9795         break;
9796 #endif
9797 #ifdef TARGET_NR_sendmmsg
9798     case TARGET_NR_sendmmsg:
9799         ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9800         break;
9801     case TARGET_NR_recvmmsg:
9802         ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9803         break;
9804 #endif
9805 #ifdef TARGET_NR_sendto
9806     case TARGET_NR_sendto:
9807         ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9808         break;
9809 #endif
9810 #ifdef TARGET_NR_shutdown
9811     case TARGET_NR_shutdown:
9812         ret = get_errno(shutdown(arg1, arg2));
9813         break;
9814 #endif
9815 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9816     case TARGET_NR_getrandom:
9817         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9818         if (!p) {
9819             goto efault;
9820         }
9821         ret = get_errno(getrandom(p, arg2, arg3));
9822         unlock_user(p, arg1, ret);
9823         break;
9824 #endif
9825 #ifdef TARGET_NR_socket
9826     case TARGET_NR_socket:
9827         ret = do_socket(arg1, arg2, arg3);
9828         break;
9829 #endif
9830 #ifdef TARGET_NR_socketpair
9831     case TARGET_NR_socketpair:
9832         ret = do_socketpair(arg1, arg2, arg3, arg4);
9833         break;
9834 #endif
9835 #ifdef TARGET_NR_setsockopt
9836     case TARGET_NR_setsockopt:
9837         ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9838         break;
9839 #endif
9840 #if defined(TARGET_NR_syslog)
9841     case TARGET_NR_syslog:
9842         {
9843             int len = arg2;
9844 
9845             switch (arg1) {
9846             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9847             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9848             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9849             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9850             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9851             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9852             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9853             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9854                 {
9855                     ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9856                 }
9857                 break;
9858             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9859             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9860             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9861                 {
9862                     ret = -TARGET_EINVAL;
9863                     if (len < 0) {
9864                         goto fail;
9865                     }
9866                     ret = 0;
9867                     if (len == 0) {
9868                         break;
9869                     }
9870                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9871                     if (!p) {
9872                         ret = -TARGET_EFAULT;
9873                         goto fail;
9874                     }
9875                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9876                     unlock_user(p, arg2, arg3);
9877                 }
9878                 break;
9879             default:
9880                 ret = -EINVAL;
9881                 break;
9882             }
9883         }
9884         break;
9885 #endif
9886     case TARGET_NR_setitimer:
9887         {
9888             struct itimerval value, ovalue, *pvalue;
9889 
9890             if (arg2) {
9891                 pvalue = &value;
9892                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9893                     || copy_from_user_timeval(&pvalue->it_value,
9894                                               arg2 + sizeof(struct target_timeval)))
9895                     goto efault;
9896             } else {
9897                 pvalue = NULL;
9898             }
9899             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9900             if (!is_error(ret) && arg3) {
9901                 if (copy_to_user_timeval(arg3,
9902                                          &ovalue.it_interval)
9903                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9904                                             &ovalue.it_value))
9905                     goto efault;
9906             }
9907         }
9908         break;
9909     case TARGET_NR_getitimer:
9910         {
9911             struct itimerval value;
9912 
9913             ret = get_errno(getitimer(arg1, &value));
9914             if (!is_error(ret) && arg2) {
9915                 if (copy_to_user_timeval(arg2,
9916                                          &value.it_interval)
9917                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9918                                             &value.it_value))
9919                     goto efault;
9920             }
9921         }
9922         break;
9923 #ifdef TARGET_NR_stat
9924     case TARGET_NR_stat:
9925         if (!(p = lock_user_string(arg1)))
9926             goto efault;
9927         ret = get_errno(stat(path(p), &st));
9928         unlock_user(p, arg1, 0);
9929         goto do_stat;
9930 #endif
9931 #ifdef TARGET_NR_lstat
9932     case TARGET_NR_lstat:
9933         if (!(p = lock_user_string(arg1)))
9934             goto efault;
9935         ret = get_errno(lstat(path(p), &st));
9936         unlock_user(p, arg1, 0);
9937         goto do_stat;
9938 #endif
9939     case TARGET_NR_fstat:
9940         {
9941             ret = get_errno(fstat(arg1, &st));
9942 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9943         do_stat:
9944 #endif
9945             if (!is_error(ret)) {
9946                 struct target_stat *target_st;
9947 
9948                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9949                     goto efault;
9950                 memset(target_st, 0, sizeof(*target_st));
9951                 __put_user(st.st_dev, &target_st->st_dev);
9952                 __put_user(st.st_ino, &target_st->st_ino);
9953                 __put_user(st.st_mode, &target_st->st_mode);
9954                 __put_user(st.st_uid, &target_st->st_uid);
9955                 __put_user(st.st_gid, &target_st->st_gid);
9956                 __put_user(st.st_nlink, &target_st->st_nlink);
9957                 __put_user(st.st_rdev, &target_st->st_rdev);
9958                 __put_user(st.st_size, &target_st->st_size);
9959                 __put_user(st.st_blksize, &target_st->st_blksize);
9960                 __put_user(st.st_blocks, &target_st->st_blocks);
9961                 __put_user(st.st_atime, &target_st->target_st_atime);
9962                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9963                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9964                 unlock_user_struct(target_st, arg2, 1);
9965             }
9966         }
9967         break;
9968 #ifdef TARGET_NR_olduname
9969     case TARGET_NR_olduname:
9970         goto unimplemented;
9971 #endif
9972 #ifdef TARGET_NR_iopl
9973     case TARGET_NR_iopl:
9974         goto unimplemented;
9975 #endif
9976     case TARGET_NR_vhangup:
9977         ret = get_errno(vhangup());
9978         break;
9979 #ifdef TARGET_NR_idle
9980     case TARGET_NR_idle:
9981         goto unimplemented;
9982 #endif
9983 #ifdef TARGET_NR_syscall
9984     case TARGET_NR_syscall:
9985         ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9986                          arg6, arg7, arg8, 0);
9987         break;
9988 #endif
9989     case TARGET_NR_wait4:
9990         {
9991             int status;
9992             abi_long status_ptr = arg2;
9993             struct rusage rusage, *rusage_ptr;
9994             abi_ulong target_rusage = arg4;
9995             abi_long rusage_err;
9996             if (target_rusage)
9997                 rusage_ptr = &rusage;
9998             else
9999                 rusage_ptr = NULL;
10000             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10001             if (!is_error(ret)) {
10002                 if (status_ptr && ret) {
10003                     status = host_to_target_waitstatus(status);
10004                     if (put_user_s32(status, status_ptr))
10005                         goto efault;
10006                 }
10007                 if (target_rusage) {
10008                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10009                     if (rusage_err) {
10010                         ret = rusage_err;
10011                     }
10012                 }
10013             }
10014         }
10015         break;
10016 #ifdef TARGET_NR_swapoff
10017     case TARGET_NR_swapoff:
10018         if (!(p = lock_user_string(arg1)))
10019             goto efault;
10020         ret = get_errno(swapoff(p));
10021         unlock_user(p, arg1, 0);
10022         break;
10023 #endif
10024     case TARGET_NR_sysinfo:
10025         {
10026             struct target_sysinfo *target_value;
10027             struct sysinfo value;
10028             ret = get_errno(sysinfo(&value));
10029             if (!is_error(ret) && arg1)
10030             {
10031                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10032                     goto efault;
10033                 __put_user(value.uptime, &target_value->uptime);
10034                 __put_user(value.loads[0], &target_value->loads[0]);
10035                 __put_user(value.loads[1], &target_value->loads[1]);
10036                 __put_user(value.loads[2], &target_value->loads[2]);
10037                 __put_user(value.totalram, &target_value->totalram);
10038                 __put_user(value.freeram, &target_value->freeram);
10039                 __put_user(value.sharedram, &target_value->sharedram);
10040                 __put_user(value.bufferram, &target_value->bufferram);
10041                 __put_user(value.totalswap, &target_value->totalswap);
10042                 __put_user(value.freeswap, &target_value->freeswap);
10043                 __put_user(value.procs, &target_value->procs);
10044                 __put_user(value.totalhigh, &target_value->totalhigh);
10045                 __put_user(value.freehigh, &target_value->freehigh);
10046                 __put_user(value.mem_unit, &target_value->mem_unit);
10047                 unlock_user_struct(target_value, arg1, 1);
10048             }
10049         }
10050         break;
10051 #ifdef TARGET_NR_ipc
10052     case TARGET_NR_ipc:
10053         ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10054         break;
10055 #endif
10056 #ifdef TARGET_NR_semget
10057     case TARGET_NR_semget:
10058         ret = get_errno(semget(arg1, arg2, arg3));
10059         break;
10060 #endif
10061 #ifdef TARGET_NR_semop
10062     case TARGET_NR_semop:
10063         ret = do_semop(arg1, arg2, arg3);
10064         break;
10065 #endif
10066 #ifdef TARGET_NR_semctl
10067     case TARGET_NR_semctl:
10068         ret = do_semctl(arg1, arg2, arg3, arg4);
10069         break;
10070 #endif
10071 #ifdef TARGET_NR_msgctl
10072     case TARGET_NR_msgctl:
10073         ret = do_msgctl(arg1, arg2, arg3);
10074         break;
10075 #endif
10076 #ifdef TARGET_NR_msgget
10077     case TARGET_NR_msgget:
10078         ret = get_errno(msgget(arg1, arg2));
10079         break;
10080 #endif
10081 #ifdef TARGET_NR_msgrcv
10082     case TARGET_NR_msgrcv:
10083         ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10084         break;
10085 #endif
10086 #ifdef TARGET_NR_msgsnd
10087     case TARGET_NR_msgsnd:
10088         ret = do_msgsnd(arg1, arg2, arg3, arg4);
10089         break;
10090 #endif
10091 #ifdef TARGET_NR_shmget
10092     case TARGET_NR_shmget:
10093         ret = get_errno(shmget(arg1, arg2, arg3));
10094         break;
10095 #endif
10096 #ifdef TARGET_NR_shmctl
10097     case TARGET_NR_shmctl:
10098         ret = do_shmctl(arg1, arg2, arg3);
10099         break;
10100 #endif
10101 #ifdef TARGET_NR_shmat
10102     case TARGET_NR_shmat:
10103         ret = do_shmat(cpu_env, arg1, arg2, arg3);
10104         break;
10105 #endif
10106 #ifdef TARGET_NR_shmdt
10107     case TARGET_NR_shmdt:
10108         ret = do_shmdt(arg1);
10109         break;
10110 #endif
10111     case TARGET_NR_fsync:
10112         ret = get_errno(fsync(arg1));
10113         break;
10114     case TARGET_NR_clone:
10115         /* Linux manages to have three different orderings for its
10116          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10117          * match the kernel's CONFIG_CLONE_* settings.
10118          * Microblaze is further special in that it uses a sixth
10119          * implicit argument to clone for the TLS pointer.
10120          */
10121 #if defined(TARGET_MICROBLAZE)
10122         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10123 #elif defined(TARGET_CLONE_BACKWARDS)
10124         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10125 #elif defined(TARGET_CLONE_BACKWARDS2)
10126         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10127 #else
10128         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10129 #endif
10130         break;
10131 #ifdef __NR_exit_group
10132         /* new thread calls */
10133     case TARGET_NR_exit_group:
10134 #ifdef TARGET_GPROF
10135         _mcleanup();
10136 #endif
10137         gdb_exit(cpu_env, arg1);
10138         ret = get_errno(exit_group(arg1));
10139         break;
10140 #endif
10141     case TARGET_NR_setdomainname:
10142         if (!(p = lock_user_string(arg1)))
10143             goto efault;
10144         ret = get_errno(setdomainname(p, arg2));
10145         unlock_user(p, arg1, 0);
10146         break;
10147     case TARGET_NR_uname:
10148         /* no need to transcode because we use the linux syscall */
10149         {
10150             struct new_utsname * buf;
10151 
10152             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10153                 goto efault;
10154             ret = get_errno(sys_uname(buf));
10155             if (!is_error(ret)) {
10156                 /* Overwrite the native machine name with whatever is being
10157                    emulated. */
10158                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10159                           sizeof(buf->machine));
10160                 /* Allow the user to override the reported release.  */
10161                 if (qemu_uname_release && *qemu_uname_release) {
10162                     g_strlcpy(buf->release, qemu_uname_release,
10163                               sizeof(buf->release));
10164                 }
10165             }
10166             unlock_user_struct(buf, arg1, 1);
10167         }
10168         break;
10169 #ifdef TARGET_I386
10170     case TARGET_NR_modify_ldt:
10171         ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
10172         break;
10173 #if !defined(TARGET_X86_64)
10174     case TARGET_NR_vm86old:
10175         goto unimplemented;
10176     case TARGET_NR_vm86:
10177         ret = do_vm86(cpu_env, arg1, arg2);
10178         break;
10179 #endif
10180 #endif
10181     case TARGET_NR_adjtimex:
10182         {
10183             struct timex host_buf;
10184 
10185             if (target_to_host_timex(&host_buf, arg1) != 0) {
10186                 goto efault;
10187             }
10188             ret = get_errno(adjtimex(&host_buf));
10189             if (!is_error(ret)) {
10190                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10191                     goto efault;
10192                 }
10193             }
10194         }
10195         break;
10196 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10197     case TARGET_NR_clock_adjtime:
10198         {
10199             struct timex htx, *phtx = &htx;
10200 
10201             if (target_to_host_timex(phtx, arg2) != 0) {
10202                 goto efault;
10203             }
10204             ret = get_errno(clock_adjtime(arg1, phtx));
10205             if (!is_error(ret) && phtx) {
10206                 if (host_to_target_timex(arg2, phtx) != 0) {
10207                     goto efault;
10208                 }
10209             }
10210         }
10211         break;
10212 #endif
10213 #ifdef TARGET_NR_create_module
10214     case TARGET_NR_create_module:
10215 #endif
10216     case TARGET_NR_init_module:
10217     case TARGET_NR_delete_module:
10218 #ifdef TARGET_NR_get_kernel_syms
10219     case TARGET_NR_get_kernel_syms:
10220 #endif
10221         goto unimplemented;
10222     case TARGET_NR_quotactl:
10223         goto unimplemented;
10224     case TARGET_NR_getpgid:
10225         ret = get_errno(getpgid(arg1));
10226         break;
10227     case TARGET_NR_fchdir:
10228         ret = get_errno(fchdir(arg1));
10229         break;
10230 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10231     case TARGET_NR_bdflush:
10232         goto unimplemented;
10233 #endif
10234 #ifdef TARGET_NR_sysfs
10235     case TARGET_NR_sysfs:
10236         goto unimplemented;
10237 #endif
10238     case TARGET_NR_personality:
10239         ret = get_errno(personality(arg1));
10240         break;
10241 #ifdef TARGET_NR_afs_syscall
10242     case TARGET_NR_afs_syscall:
10243         goto unimplemented;
10244 #endif
10245 #ifdef TARGET_NR__llseek /* Not on alpha */
10246     case TARGET_NR__llseek:
10247         {
10248             int64_t res;
10249 #if !defined(__NR_llseek)
10250             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10251             if (res == -1) {
10252                 ret = get_errno(res);
10253             } else {
10254                 ret = 0;
10255             }
10256 #else
10257             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10258 #endif
10259             if ((ret == 0) && put_user_s64(res, arg4)) {
10260                 goto efault;
10261             }
10262         }
10263         break;
10264 #endif
10265 #ifdef TARGET_NR_getdents
10266     case TARGET_NR_getdents:
10267 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10268 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10269         {
10270             struct target_dirent *target_dirp;
10271             struct linux_dirent *dirp;
10272             abi_long count = arg3;
10273 
10274             dirp = g_try_malloc(count);
10275             if (!dirp) {
10276                 ret = -TARGET_ENOMEM;
10277                 goto fail;
10278             }
10279 
10280             ret = get_errno(sys_getdents(arg1, dirp, count));
10281             if (!is_error(ret)) {
10282                 struct linux_dirent *de;
10283 		struct target_dirent *tde;
10284                 int len = ret;
10285                 int reclen, treclen;
10286 		int count1, tnamelen;
10287 
10288 		count1 = 0;
10289                 de = dirp;
10290                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10291                     goto efault;
10292 		tde = target_dirp;
10293                 while (len > 0) {
10294                     reclen = de->d_reclen;
10295                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10296                     assert(tnamelen >= 0);
10297                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10298                     assert(count1 + treclen <= count);
10299                     tde->d_reclen = tswap16(treclen);
10300                     tde->d_ino = tswapal(de->d_ino);
10301                     tde->d_off = tswapal(de->d_off);
10302                     memcpy(tde->d_name, de->d_name, tnamelen);
10303                     de = (struct linux_dirent *)((char *)de + reclen);
10304                     len -= reclen;
10305                     tde = (struct target_dirent *)((char *)tde + treclen);
10306 		    count1 += treclen;
10307                 }
10308 		ret = count1;
10309                 unlock_user(target_dirp, arg2, ret);
10310             }
10311             g_free(dirp);
10312         }
10313 #else
10314         {
10315             struct linux_dirent *dirp;
10316             abi_long count = arg3;
10317 
10318             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10319                 goto efault;
10320             ret = get_errno(sys_getdents(arg1, dirp, count));
10321             if (!is_error(ret)) {
10322                 struct linux_dirent *de;
10323                 int len = ret;
10324                 int reclen;
10325                 de = dirp;
10326                 while (len > 0) {
10327                     reclen = de->d_reclen;
10328                     if (reclen > len)
10329                         break;
10330                     de->d_reclen = tswap16(reclen);
10331                     tswapls(&de->d_ino);
10332                     tswapls(&de->d_off);
10333                     de = (struct linux_dirent *)((char *)de + reclen);
10334                     len -= reclen;
10335                 }
10336             }
10337             unlock_user(dirp, arg2, ret);
10338         }
10339 #endif
10340 #else
10341         /* Implement getdents in terms of getdents64 */
10342         {
10343             struct linux_dirent64 *dirp;
10344             abi_long count = arg3;
10345 
10346             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10347             if (!dirp) {
10348                 goto efault;
10349             }
10350             ret = get_errno(sys_getdents64(arg1, dirp, count));
10351             if (!is_error(ret)) {
10352                 /* Convert the dirent64 structs to target dirent.  We do this
10353                  * in-place, since we can guarantee that a target_dirent is no
10354                  * larger than a dirent64; however this means we have to be
10355                  * careful to read everything before writing in the new format.
10356                  */
10357                 struct linux_dirent64 *de;
10358                 struct target_dirent *tde;
10359                 int len = ret;
10360                 int tlen = 0;
10361 
10362                 de = dirp;
10363                 tde = (struct target_dirent *)dirp;
10364                 while (len > 0) {
10365                     int namelen, treclen;
10366                     int reclen = de->d_reclen;
10367                     uint64_t ino = de->d_ino;
10368                     int64_t off = de->d_off;
10369                     uint8_t type = de->d_type;
10370 
10371                     namelen = strlen(de->d_name);
10372                     treclen = offsetof(struct target_dirent, d_name)
10373                         + namelen + 2;
10374                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10375 
10376                     memmove(tde->d_name, de->d_name, namelen + 1);
10377                     tde->d_ino = tswapal(ino);
10378                     tde->d_off = tswapal(off);
10379                     tde->d_reclen = tswap16(treclen);
10380                     /* The target_dirent type is in what was formerly a padding
10381                      * byte at the end of the structure:
10382                      */
10383                     *(((char *)tde) + treclen - 1) = type;
10384 
10385                     de = (struct linux_dirent64 *)((char *)de + reclen);
10386                     tde = (struct target_dirent *)((char *)tde + treclen);
10387                     len -= reclen;
10388                     tlen += treclen;
10389                 }
10390                 ret = tlen;
10391             }
10392             unlock_user(dirp, arg2, ret);
10393         }
10394 #endif
10395         break;
10396 #endif /* TARGET_NR_getdents */
10397 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10398     case TARGET_NR_getdents64:
10399         {
10400             struct linux_dirent64 *dirp;
10401             abi_long count = arg3;
10402             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10403                 goto efault;
10404             ret = get_errno(sys_getdents64(arg1, dirp, count));
10405             if (!is_error(ret)) {
10406                 struct linux_dirent64 *de;
10407                 int len = ret;
10408                 int reclen;
10409                 de = dirp;
10410                 while (len > 0) {
10411                     reclen = de->d_reclen;
10412                     if (reclen > len)
10413                         break;
10414                     de->d_reclen = tswap16(reclen);
10415                     tswap64s((uint64_t *)&de->d_ino);
10416                     tswap64s((uint64_t *)&de->d_off);
10417                     de = (struct linux_dirent64 *)((char *)de + reclen);
10418                     len -= reclen;
10419                 }
10420             }
10421             unlock_user(dirp, arg2, ret);
10422         }
10423         break;
10424 #endif /* TARGET_NR_getdents64 */
10425 #if defined(TARGET_NR__newselect)
10426     case TARGET_NR__newselect:
10427         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10428         break;
10429 #endif
10430 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10431 # ifdef TARGET_NR_poll
10432     case TARGET_NR_poll:
10433 # endif
10434 # ifdef TARGET_NR_ppoll
10435     case TARGET_NR_ppoll:
10436 # endif
10437         {
10438             struct target_pollfd *target_pfd;
10439             unsigned int nfds = arg2;
10440             struct pollfd *pfd;
10441             unsigned int i;
10442 
10443             pfd = NULL;
10444             target_pfd = NULL;
10445             if (nfds) {
10446                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10447                     ret = -TARGET_EINVAL;
10448                     break;
10449                 }
10450 
10451                 target_pfd = lock_user(VERIFY_WRITE, arg1,
10452                                        sizeof(struct target_pollfd) * nfds, 1);
10453                 if (!target_pfd) {
10454                     goto efault;
10455                 }
10456 
10457                 pfd = alloca(sizeof(struct pollfd) * nfds);
10458                 for (i = 0; i < nfds; i++) {
10459                     pfd[i].fd = tswap32(target_pfd[i].fd);
10460                     pfd[i].events = tswap16(target_pfd[i].events);
10461                 }
10462             }
10463 
10464             switch (num) {
10465 # ifdef TARGET_NR_ppoll
10466             case TARGET_NR_ppoll:
10467             {
10468                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10469                 target_sigset_t *target_set;
10470                 sigset_t _set, *set = &_set;
10471 
10472                 if (arg3) {
10473                     if (target_to_host_timespec(timeout_ts, arg3)) {
10474                         unlock_user(target_pfd, arg1, 0);
10475                         goto efault;
10476                     }
10477                 } else {
10478                     timeout_ts = NULL;
10479                 }
10480 
10481                 if (arg4) {
10482                     if (arg5 != sizeof(target_sigset_t)) {
10483                         unlock_user(target_pfd, arg1, 0);
10484                         ret = -TARGET_EINVAL;
10485                         break;
10486                     }
10487 
10488                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10489                     if (!target_set) {
10490                         unlock_user(target_pfd, arg1, 0);
10491                         goto efault;
10492                     }
10493                     target_to_host_sigset(set, target_set);
10494                 } else {
10495                     set = NULL;
10496                 }
10497 
10498                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10499                                            set, SIGSET_T_SIZE));
10500 
10501                 if (!is_error(ret) && arg3) {
10502                     host_to_target_timespec(arg3, timeout_ts);
10503                 }
10504                 if (arg4) {
10505                     unlock_user(target_set, arg4, 0);
10506                 }
10507                 break;
10508             }
10509 # endif
10510 # ifdef TARGET_NR_poll
10511             case TARGET_NR_poll:
10512             {
10513                 struct timespec ts, *pts;
10514 
10515                 if (arg3 >= 0) {
10516                     /* Convert ms to secs, ns */
10517                     ts.tv_sec = arg3 / 1000;
10518                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10519                     pts = &ts;
10520                 } else {
10521                     /* -ve poll() timeout means "infinite" */
10522                     pts = NULL;
10523                 }
10524                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10525                 break;
10526             }
10527 # endif
10528             default:
10529                 g_assert_not_reached();
10530             }
10531 
10532             if (!is_error(ret)) {
10533                 for(i = 0; i < nfds; i++) {
10534                     target_pfd[i].revents = tswap16(pfd[i].revents);
10535                 }
10536             }
10537             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10538         }
10539         break;
10540 #endif
10541     case TARGET_NR_flock:
10542         /* NOTE: the flock constant seems to be the same for every
10543            Linux platform */
10544         ret = get_errno(safe_flock(arg1, arg2));
10545         break;
10546     case TARGET_NR_readv:
10547         {
10548             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10549             if (vec != NULL) {
10550                 ret = get_errno(safe_readv(arg1, vec, arg3));
10551                 unlock_iovec(vec, arg2, arg3, 1);
10552             } else {
10553                 ret = -host_to_target_errno(errno);
10554             }
10555         }
10556         break;
10557     case TARGET_NR_writev:
10558         {
10559             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10560             if (vec != NULL) {
10561                 ret = get_errno(safe_writev(arg1, vec, arg3));
10562                 unlock_iovec(vec, arg2, arg3, 0);
10563             } else {
10564                 ret = -host_to_target_errno(errno);
10565             }
10566         }
10567         break;
10568 #if defined(TARGET_NR_preadv)
10569     case TARGET_NR_preadv:
10570         {
10571             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10572             if (vec != NULL) {
10573                 unsigned long low, high;
10574 
10575                 target_to_host_low_high(arg4, arg5, &low, &high);
10576                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10577                 unlock_iovec(vec, arg2, arg3, 1);
10578             } else {
10579                 ret = -host_to_target_errno(errno);
10580            }
10581         }
10582         break;
10583 #endif
10584 #if defined(TARGET_NR_pwritev)
10585     case TARGET_NR_pwritev:
10586         {
10587             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10588             if (vec != NULL) {
10589                 unsigned long low, high;
10590 
10591                 target_to_host_low_high(arg4, arg5, &low, &high);
10592                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10593                 unlock_iovec(vec, arg2, arg3, 0);
10594             } else {
10595                 ret = -host_to_target_errno(errno);
10596            }
10597         }
10598         break;
10599 #endif
10600     case TARGET_NR_getsid:
10601         ret = get_errno(getsid(arg1));
10602         break;
10603 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10604     case TARGET_NR_fdatasync:
10605         ret = get_errno(fdatasync(arg1));
10606         break;
10607 #endif
10608 #ifdef TARGET_NR__sysctl
10609     case TARGET_NR__sysctl:
10610         /* We don't implement this, but ENOTDIR is always a safe
10611            return value. */
10612         ret = -TARGET_ENOTDIR;
10613         break;
10614 #endif
10615     case TARGET_NR_sched_getaffinity:
10616         {
10617             unsigned int mask_size;
10618             unsigned long *mask;
10619 
10620             /*
10621              * sched_getaffinity needs multiples of ulong, so need to take
10622              * care of mismatches between target ulong and host ulong sizes.
10623              */
10624             if (arg2 & (sizeof(abi_ulong) - 1)) {
10625                 ret = -TARGET_EINVAL;
10626                 break;
10627             }
10628             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10629 
10630             mask = alloca(mask_size);
10631             memset(mask, 0, mask_size);
10632             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10633 
10634             if (!is_error(ret)) {
10635                 if (ret > arg2) {
10636                     /* More data returned than the caller's buffer will fit.
10637                      * This only happens if sizeof(abi_long) < sizeof(long)
10638                      * and the caller passed us a buffer holding an odd number
10639                      * of abi_longs. If the host kernel is actually using the
10640                      * extra 4 bytes then fail EINVAL; otherwise we can just
10641                      * ignore them and only copy the interesting part.
10642                      */
10643                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10644                     if (numcpus > arg2 * 8) {
10645                         ret = -TARGET_EINVAL;
10646                         break;
10647                     }
10648                     ret = arg2;
10649                 }
10650 
10651                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10652                     goto efault;
10653                 }
10654             }
10655         }
10656         break;
10657     case TARGET_NR_sched_setaffinity:
10658         {
10659             unsigned int mask_size;
10660             unsigned long *mask;
10661 
10662             /*
10663              * sched_setaffinity needs multiples of ulong, so need to take
10664              * care of mismatches between target ulong and host ulong sizes.
10665              */
10666             if (arg2 & (sizeof(abi_ulong) - 1)) {
10667                 ret = -TARGET_EINVAL;
10668                 break;
10669             }
10670             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10671             mask = alloca(mask_size);
10672 
10673             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10674             if (ret) {
10675                 break;
10676             }
10677 
10678             ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10679         }
10680         break;
10681     case TARGET_NR_getcpu:
10682         {
10683             unsigned cpu, node;
10684             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10685                                        arg2 ? &node : NULL,
10686                                        NULL));
10687             if (is_error(ret)) {
10688                 goto fail;
10689             }
10690             if (arg1 && put_user_u32(cpu, arg1)) {
10691                 goto efault;
10692             }
10693             if (arg2 && put_user_u32(node, arg2)) {
10694                 goto efault;
10695             }
10696         }
10697         break;
10698     case TARGET_NR_sched_setparam:
10699         {
10700             struct sched_param *target_schp;
10701             struct sched_param schp;
10702 
10703             if (arg2 == 0) {
10704                 return -TARGET_EINVAL;
10705             }
10706             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10707                 goto efault;
10708             schp.sched_priority = tswap32(target_schp->sched_priority);
10709             unlock_user_struct(target_schp, arg2, 0);
10710             ret = get_errno(sched_setparam(arg1, &schp));
10711         }
10712         break;
10713     case TARGET_NR_sched_getparam:
10714         {
10715             struct sched_param *target_schp;
10716             struct sched_param schp;
10717 
10718             if (arg2 == 0) {
10719                 return -TARGET_EINVAL;
10720             }
10721             ret = get_errno(sched_getparam(arg1, &schp));
10722             if (!is_error(ret)) {
10723                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10724                     goto efault;
10725                 target_schp->sched_priority = tswap32(schp.sched_priority);
10726                 unlock_user_struct(target_schp, arg2, 1);
10727             }
10728         }
10729         break;
10730     case TARGET_NR_sched_setscheduler:
10731         {
10732             struct sched_param *target_schp;
10733             struct sched_param schp;
10734             if (arg3 == 0) {
10735                 return -TARGET_EINVAL;
10736             }
10737             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10738                 goto efault;
10739             schp.sched_priority = tswap32(target_schp->sched_priority);
10740             unlock_user_struct(target_schp, arg3, 0);
10741             ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10742         }
10743         break;
10744     case TARGET_NR_sched_getscheduler:
10745         ret = get_errno(sched_getscheduler(arg1));
10746         break;
10747     case TARGET_NR_sched_yield:
10748         ret = get_errno(sched_yield());
10749         break;
10750     case TARGET_NR_sched_get_priority_max:
10751         ret = get_errno(sched_get_priority_max(arg1));
10752         break;
10753     case TARGET_NR_sched_get_priority_min:
10754         ret = get_errno(sched_get_priority_min(arg1));
10755         break;
10756     case TARGET_NR_sched_rr_get_interval:
10757         {
10758             struct timespec ts;
10759             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10760             if (!is_error(ret)) {
10761                 ret = host_to_target_timespec(arg2, &ts);
10762             }
10763         }
10764         break;
10765     case TARGET_NR_nanosleep:
10766         {
10767             struct timespec req, rem;
10768             target_to_host_timespec(&req, arg1);
10769             ret = get_errno(safe_nanosleep(&req, &rem));
10770             if (is_error(ret) && arg2) {
10771                 host_to_target_timespec(arg2, &rem);
10772             }
10773         }
10774         break;
10775 #ifdef TARGET_NR_query_module
10776     case TARGET_NR_query_module:
10777         goto unimplemented;
10778 #endif
10779 #ifdef TARGET_NR_nfsservctl
10780     case TARGET_NR_nfsservctl:
10781         goto unimplemented;
10782 #endif
10783     case TARGET_NR_prctl:
10784         switch (arg1) {
10785         case PR_GET_PDEATHSIG:
10786         {
10787             int deathsig;
10788             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10789             if (!is_error(ret) && arg2
10790                 && put_user_ual(deathsig, arg2)) {
10791                 goto efault;
10792             }
10793             break;
10794         }
10795 #ifdef PR_GET_NAME
10796         case PR_GET_NAME:
10797         {
10798             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10799             if (!name) {
10800                 goto efault;
10801             }
10802             ret = get_errno(prctl(arg1, (unsigned long)name,
10803                                   arg3, arg4, arg5));
10804             unlock_user(name, arg2, 16);
10805             break;
10806         }
10807         case PR_SET_NAME:
10808         {
10809             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10810             if (!name) {
10811                 goto efault;
10812             }
10813             ret = get_errno(prctl(arg1, (unsigned long)name,
10814                                   arg3, arg4, arg5));
10815             unlock_user(name, arg2, 0);
10816             break;
10817         }
10818 #endif
10819 #ifdef TARGET_AARCH64
10820         case TARGET_PR_SVE_SET_VL:
10821             /* We cannot support either PR_SVE_SET_VL_ONEXEC
10822                or PR_SVE_VL_INHERIT.  Therefore, anything above
10823                ARM_MAX_VQ results in EINVAL.  */
10824             ret = -TARGET_EINVAL;
10825             if (arm_feature(cpu_env, ARM_FEATURE_SVE)
10826                 && arg2 >= 0 && arg2 <= ARM_MAX_VQ * 16 && !(arg2 & 15)) {
10827                 CPUARMState *env = cpu_env;
10828                 int old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10829                 int vq = MAX(arg2 / 16, 1);
10830 
10831                 if (vq < old_vq) {
10832                     aarch64_sve_narrow_vq(env, vq);
10833                 }
10834                 env->vfp.zcr_el[1] = vq - 1;
10835                 ret = vq * 16;
10836             }
10837             break;
10838         case TARGET_PR_SVE_GET_VL:
10839             ret = -TARGET_EINVAL;
10840             if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
10841                 CPUARMState *env = cpu_env;
10842                 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
10843             }
10844             break;
10845 #endif /* AARCH64 */
10846         case PR_GET_SECCOMP:
10847         case PR_SET_SECCOMP:
10848             /* Disable seccomp to prevent the target disabling syscalls we
10849              * need. */
10850             ret = -TARGET_EINVAL;
10851             break;
10852         default:
10853             /* Most prctl options have no pointer arguments */
10854             ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10855             break;
10856         }
10857         break;
10858 #ifdef TARGET_NR_arch_prctl
10859     case TARGET_NR_arch_prctl:
10860 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10861         ret = do_arch_prctl(cpu_env, arg1, arg2);
10862         break;
10863 #else
10864         goto unimplemented;
10865 #endif
10866 #endif
10867 #ifdef TARGET_NR_pread64
10868     case TARGET_NR_pread64:
10869         if (regpairs_aligned(cpu_env, num)) {
10870             arg4 = arg5;
10871             arg5 = arg6;
10872         }
10873         if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10874             goto efault;
10875         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10876         unlock_user(p, arg2, ret);
10877         break;
10878     case TARGET_NR_pwrite64:
10879         if (regpairs_aligned(cpu_env, num)) {
10880             arg4 = arg5;
10881             arg5 = arg6;
10882         }
10883         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10884             goto efault;
10885         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10886         unlock_user(p, arg2, 0);
10887         break;
10888 #endif
10889     case TARGET_NR_getcwd:
10890         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10891             goto efault;
10892         ret = get_errno(sys_getcwd1(p, arg2));
10893         unlock_user(p, arg1, ret);
10894         break;
10895     case TARGET_NR_capget:
10896     case TARGET_NR_capset:
10897     {
10898         struct target_user_cap_header *target_header;
10899         struct target_user_cap_data *target_data = NULL;
10900         struct __user_cap_header_struct header;
10901         struct __user_cap_data_struct data[2];
10902         struct __user_cap_data_struct *dataptr = NULL;
10903         int i, target_datalen;
10904         int data_items = 1;
10905 
10906         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10907             goto efault;
10908         }
10909         header.version = tswap32(target_header->version);
10910         header.pid = tswap32(target_header->pid);
10911 
10912         if (header.version != _LINUX_CAPABILITY_VERSION) {
10913             /* Version 2 and up takes pointer to two user_data structs */
10914             data_items = 2;
10915         }
10916 
10917         target_datalen = sizeof(*target_data) * data_items;
10918 
10919         if (arg2) {
10920             if (num == TARGET_NR_capget) {
10921                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10922             } else {
10923                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10924             }
10925             if (!target_data) {
10926                 unlock_user_struct(target_header, arg1, 0);
10927                 goto efault;
10928             }
10929 
10930             if (num == TARGET_NR_capset) {
10931                 for (i = 0; i < data_items; i++) {
10932                     data[i].effective = tswap32(target_data[i].effective);
10933                     data[i].permitted = tswap32(target_data[i].permitted);
10934                     data[i].inheritable = tswap32(target_data[i].inheritable);
10935                 }
10936             }
10937 
10938             dataptr = data;
10939         }
10940 
10941         if (num == TARGET_NR_capget) {
10942             ret = get_errno(capget(&header, dataptr));
10943         } else {
10944             ret = get_errno(capset(&header, dataptr));
10945         }
10946 
10947         /* The kernel always updates version for both capget and capset */
10948         target_header->version = tswap32(header.version);
10949         unlock_user_struct(target_header, arg1, 1);
10950 
10951         if (arg2) {
10952             if (num == TARGET_NR_capget) {
10953                 for (i = 0; i < data_items; i++) {
10954                     target_data[i].effective = tswap32(data[i].effective);
10955                     target_data[i].permitted = tswap32(data[i].permitted);
10956                     target_data[i].inheritable = tswap32(data[i].inheritable);
10957                 }
10958                 unlock_user(target_data, arg2, target_datalen);
10959             } else {
10960                 unlock_user(target_data, arg2, 0);
10961             }
10962         }
10963         break;
10964     }
10965     case TARGET_NR_sigaltstack:
10966         ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10967         break;
10968 
10969 #ifdef CONFIG_SENDFILE
10970     case TARGET_NR_sendfile:
10971     {
10972         off_t *offp = NULL;
10973         off_t off;
10974         if (arg3) {
10975             ret = get_user_sal(off, arg3);
10976             if (is_error(ret)) {
10977                 break;
10978             }
10979             offp = &off;
10980         }
10981         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10982         if (!is_error(ret) && arg3) {
10983             abi_long ret2 = put_user_sal(off, arg3);
10984             if (is_error(ret2)) {
10985                 ret = ret2;
10986             }
10987         }
10988         break;
10989     }
10990 #ifdef TARGET_NR_sendfile64
10991     case TARGET_NR_sendfile64:
10992     {
10993         off_t *offp = NULL;
10994         off_t off;
10995         if (arg3) {
10996             ret = get_user_s64(off, arg3);
10997             if (is_error(ret)) {
10998                 break;
10999             }
11000             offp = &off;
11001         }
11002         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11003         if (!is_error(ret) && arg3) {
11004             abi_long ret2 = put_user_s64(off, arg3);
11005             if (is_error(ret2)) {
11006                 ret = ret2;
11007             }
11008         }
11009         break;
11010     }
11011 #endif
11012 #else
11013     case TARGET_NR_sendfile:
11014 #ifdef TARGET_NR_sendfile64
11015     case TARGET_NR_sendfile64:
11016 #endif
11017         goto unimplemented;
11018 #endif
11019 
11020 #ifdef TARGET_NR_getpmsg
11021     case TARGET_NR_getpmsg:
11022         goto unimplemented;
11023 #endif
11024 #ifdef TARGET_NR_putpmsg
11025     case TARGET_NR_putpmsg:
11026         goto unimplemented;
11027 #endif
11028 #ifdef TARGET_NR_vfork
11029     case TARGET_NR_vfork:
11030         ret = get_errno(do_fork(cpu_env,
11031                         CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11032                         0, 0, 0, 0));
11033         break;
11034 #endif
11035 #ifdef TARGET_NR_ugetrlimit
11036     case TARGET_NR_ugetrlimit:
11037     {
11038 	struct rlimit rlim;
11039 	int resource = target_to_host_resource(arg1);
11040 	ret = get_errno(getrlimit(resource, &rlim));
11041 	if (!is_error(ret)) {
11042 	    struct target_rlimit *target_rlim;
11043             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11044                 goto efault;
11045 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11046 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11047             unlock_user_struct(target_rlim, arg2, 1);
11048 	}
11049 	break;
11050     }
11051 #endif
11052 #ifdef TARGET_NR_truncate64
11053     case TARGET_NR_truncate64:
11054         if (!(p = lock_user_string(arg1)))
11055             goto efault;
11056 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11057         unlock_user(p, arg1, 0);
11058 	break;
11059 #endif
11060 #ifdef TARGET_NR_ftruncate64
11061     case TARGET_NR_ftruncate64:
11062 	ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11063 	break;
11064 #endif
11065 #ifdef TARGET_NR_stat64
11066     case TARGET_NR_stat64:
11067         if (!(p = lock_user_string(arg1)))
11068             goto efault;
11069         ret = get_errno(stat(path(p), &st));
11070         unlock_user(p, arg1, 0);
11071         if (!is_error(ret))
11072             ret = host_to_target_stat64(cpu_env, arg2, &st);
11073         break;
11074 #endif
11075 #ifdef TARGET_NR_lstat64
11076     case TARGET_NR_lstat64:
11077         if (!(p = lock_user_string(arg1)))
11078             goto efault;
11079         ret = get_errno(lstat(path(p), &st));
11080         unlock_user(p, arg1, 0);
11081         if (!is_error(ret))
11082             ret = host_to_target_stat64(cpu_env, arg2, &st);
11083         break;
11084 #endif
11085 #ifdef TARGET_NR_fstat64
11086     case TARGET_NR_fstat64:
11087         ret = get_errno(fstat(arg1, &st));
11088         if (!is_error(ret))
11089             ret = host_to_target_stat64(cpu_env, arg2, &st);
11090         break;
11091 #endif
11092 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11093 #ifdef TARGET_NR_fstatat64
11094     case TARGET_NR_fstatat64:
11095 #endif
11096 #ifdef TARGET_NR_newfstatat
11097     case TARGET_NR_newfstatat:
11098 #endif
11099         if (!(p = lock_user_string(arg2)))
11100             goto efault;
11101         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11102         if (!is_error(ret))
11103             ret = host_to_target_stat64(cpu_env, arg3, &st);
11104         break;
11105 #endif
11106 #ifdef TARGET_NR_lchown
11107     case TARGET_NR_lchown:
11108         if (!(p = lock_user_string(arg1)))
11109             goto efault;
11110         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11111         unlock_user(p, arg1, 0);
11112         break;
11113 #endif
11114 #ifdef TARGET_NR_getuid
11115     case TARGET_NR_getuid:
11116         ret = get_errno(high2lowuid(getuid()));
11117         break;
11118 #endif
11119 #ifdef TARGET_NR_getgid
11120     case TARGET_NR_getgid:
11121         ret = get_errno(high2lowgid(getgid()));
11122         break;
11123 #endif
11124 #ifdef TARGET_NR_geteuid
11125     case TARGET_NR_geteuid:
11126         ret = get_errno(high2lowuid(geteuid()));
11127         break;
11128 #endif
11129 #ifdef TARGET_NR_getegid
11130     case TARGET_NR_getegid:
11131         ret = get_errno(high2lowgid(getegid()));
11132         break;
11133 #endif
11134     case TARGET_NR_setreuid:
11135         ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11136         break;
11137     case TARGET_NR_setregid:
11138         ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11139         break;
11140     case TARGET_NR_getgroups:
11141         {
11142             int gidsetsize = arg1;
11143             target_id *target_grouplist;
11144             gid_t *grouplist;
11145             int i;
11146 
11147             grouplist = alloca(gidsetsize * sizeof(gid_t));
11148             ret = get_errno(getgroups(gidsetsize, grouplist));
11149             if (gidsetsize == 0)
11150                 break;
11151             if (!is_error(ret)) {
11152                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11153                 if (!target_grouplist)
11154                     goto efault;
11155                 for(i = 0;i < ret; i++)
11156                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11157                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11158             }
11159         }
11160         break;
11161     case TARGET_NR_setgroups:
11162         {
11163             int gidsetsize = arg1;
11164             target_id *target_grouplist;
11165             gid_t *grouplist = NULL;
11166             int i;
11167             if (gidsetsize) {
11168                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11169                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11170                 if (!target_grouplist) {
11171                     ret = -TARGET_EFAULT;
11172                     goto fail;
11173                 }
11174                 for (i = 0; i < gidsetsize; i++) {
11175                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11176                 }
11177                 unlock_user(target_grouplist, arg2, 0);
11178             }
11179             ret = get_errno(setgroups(gidsetsize, grouplist));
11180         }
11181         break;
11182     case TARGET_NR_fchown:
11183         ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11184         break;
11185 #if defined(TARGET_NR_fchownat)
11186     case TARGET_NR_fchownat:
11187         if (!(p = lock_user_string(arg2)))
11188             goto efault;
11189         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11190                                  low2highgid(arg4), arg5));
11191         unlock_user(p, arg2, 0);
11192         break;
11193 #endif
11194 #ifdef TARGET_NR_setresuid
11195     case TARGET_NR_setresuid:
11196         ret = get_errno(sys_setresuid(low2highuid(arg1),
11197                                       low2highuid(arg2),
11198                                       low2highuid(arg3)));
11199         break;
11200 #endif
11201 #ifdef TARGET_NR_getresuid
11202     case TARGET_NR_getresuid:
11203         {
11204             uid_t ruid, euid, suid;
11205             ret = get_errno(getresuid(&ruid, &euid, &suid));
11206             if (!is_error(ret)) {
11207                 if (put_user_id(high2lowuid(ruid), arg1)
11208                     || put_user_id(high2lowuid(euid), arg2)
11209                     || put_user_id(high2lowuid(suid), arg3))
11210                     goto efault;
11211             }
11212         }
11213         break;
11214 #endif
11215 #ifdef TARGET_NR_getresgid
11216     case TARGET_NR_setresgid:
11217         ret = get_errno(sys_setresgid(low2highgid(arg1),
11218                                       low2highgid(arg2),
11219                                       low2highgid(arg3)));
11220         break;
11221 #endif
11222 #ifdef TARGET_NR_getresgid
11223     case TARGET_NR_getresgid:
11224         {
11225             gid_t rgid, egid, sgid;
11226             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11227             if (!is_error(ret)) {
11228                 if (put_user_id(high2lowgid(rgid), arg1)
11229                     || put_user_id(high2lowgid(egid), arg2)
11230                     || put_user_id(high2lowgid(sgid), arg3))
11231                     goto efault;
11232             }
11233         }
11234         break;
11235 #endif
11236 #ifdef TARGET_NR_chown
11237     case TARGET_NR_chown:
11238         if (!(p = lock_user_string(arg1)))
11239             goto efault;
11240         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11241         unlock_user(p, arg1, 0);
11242         break;
11243 #endif
11244     case TARGET_NR_setuid:
11245         ret = get_errno(sys_setuid(low2highuid(arg1)));
11246         break;
11247     case TARGET_NR_setgid:
11248         ret = get_errno(sys_setgid(low2highgid(arg1)));
11249         break;
11250     case TARGET_NR_setfsuid:
11251         ret = get_errno(setfsuid(arg1));
11252         break;
11253     case TARGET_NR_setfsgid:
11254         ret = get_errno(setfsgid(arg1));
11255         break;
11256 
11257 #ifdef TARGET_NR_lchown32
11258     case TARGET_NR_lchown32:
11259         if (!(p = lock_user_string(arg1)))
11260             goto efault;
11261         ret = get_errno(lchown(p, arg2, arg3));
11262         unlock_user(p, arg1, 0);
11263         break;
11264 #endif
11265 #ifdef TARGET_NR_getuid32
11266     case TARGET_NR_getuid32:
11267         ret = get_errno(getuid());
11268         break;
11269 #endif
11270 
11271 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11272    /* Alpha specific */
11273     case TARGET_NR_getxuid:
11274          {
11275             uid_t euid;
11276             euid=geteuid();
11277             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11278          }
11279         ret = get_errno(getuid());
11280         break;
11281 #endif
11282 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11283    /* Alpha specific */
11284     case TARGET_NR_getxgid:
11285          {
11286             uid_t egid;
11287             egid=getegid();
11288             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11289          }
11290         ret = get_errno(getgid());
11291         break;
11292 #endif
11293 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11294     /* Alpha specific */
11295     case TARGET_NR_osf_getsysinfo:
11296         ret = -TARGET_EOPNOTSUPP;
11297         switch (arg1) {
11298           case TARGET_GSI_IEEE_FP_CONTROL:
11299             {
11300                 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11301 
11302                 /* Copied from linux ieee_fpcr_to_swcr.  */
11303                 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11304                 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11305                 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11306                                         | SWCR_TRAP_ENABLE_DZE
11307                                         | SWCR_TRAP_ENABLE_OVF);
11308                 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11309                                         | SWCR_TRAP_ENABLE_INE);
11310                 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11311                 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11312 
11313                 if (put_user_u64 (swcr, arg2))
11314                         goto efault;
11315                 ret = 0;
11316             }
11317             break;
11318 
11319           /* case GSI_IEEE_STATE_AT_SIGNAL:
11320              -- Not implemented in linux kernel.
11321              case GSI_UACPROC:
11322              -- Retrieves current unaligned access state; not much used.
11323              case GSI_PROC_TYPE:
11324              -- Retrieves implver information; surely not used.
11325              case GSI_GET_HWRPB:
11326              -- Grabs a copy of the HWRPB; surely not used.
11327           */
11328         }
11329         break;
11330 #endif
11331 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11332     /* Alpha specific */
11333     case TARGET_NR_osf_setsysinfo:
11334         ret = -TARGET_EOPNOTSUPP;
11335         switch (arg1) {
11336           case TARGET_SSI_IEEE_FP_CONTROL:
11337             {
11338                 uint64_t swcr, fpcr, orig_fpcr;
11339 
11340                 if (get_user_u64 (swcr, arg2)) {
11341                     goto efault;
11342                 }
11343                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11344                 fpcr = orig_fpcr & FPCR_DYN_MASK;
11345 
11346                 /* Copied from linux ieee_swcr_to_fpcr.  */
11347                 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11348                 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11349                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11350                                   | SWCR_TRAP_ENABLE_DZE
11351                                   | SWCR_TRAP_ENABLE_OVF)) << 48;
11352                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11353                                   | SWCR_TRAP_ENABLE_INE)) << 57;
11354                 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11355                 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11356 
11357                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11358                 ret = 0;
11359             }
11360             break;
11361 
11362           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11363             {
11364                 uint64_t exc, fpcr, orig_fpcr;
11365                 int si_code;
11366 
11367                 if (get_user_u64(exc, arg2)) {
11368                     goto efault;
11369                 }
11370 
11371                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11372 
11373                 /* We only add to the exception status here.  */
11374                 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11375 
11376                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11377                 ret = 0;
11378 
11379                 /* Old exceptions are not signaled.  */
11380                 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11381 
11382                 /* If any exceptions set by this call,
11383                    and are unmasked, send a signal.  */
11384                 si_code = 0;
11385                 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11386                     si_code = TARGET_FPE_FLTRES;
11387                 }
11388                 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11389                     si_code = TARGET_FPE_FLTUND;
11390                 }
11391                 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11392                     si_code = TARGET_FPE_FLTOVF;
11393                 }
11394                 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11395                     si_code = TARGET_FPE_FLTDIV;
11396                 }
11397                 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11398                     si_code = TARGET_FPE_FLTINV;
11399                 }
11400                 if (si_code != 0) {
11401                     target_siginfo_t info;
11402                     info.si_signo = SIGFPE;
11403                     info.si_errno = 0;
11404                     info.si_code = si_code;
11405                     info._sifields._sigfault._addr
11406                         = ((CPUArchState *)cpu_env)->pc;
11407                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11408                                  QEMU_SI_FAULT, &info);
11409                 }
11410             }
11411             break;
11412 
11413           /* case SSI_NVPAIRS:
11414              -- Used with SSIN_UACPROC to enable unaligned accesses.
11415              case SSI_IEEE_STATE_AT_SIGNAL:
11416              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11417              -- Not implemented in linux kernel
11418           */
11419         }
11420         break;
11421 #endif
11422 #ifdef TARGET_NR_osf_sigprocmask
11423     /* Alpha specific.  */
11424     case TARGET_NR_osf_sigprocmask:
11425         {
11426             abi_ulong mask;
11427             int how;
11428             sigset_t set, oldset;
11429 
11430             switch(arg1) {
11431             case TARGET_SIG_BLOCK:
11432                 how = SIG_BLOCK;
11433                 break;
11434             case TARGET_SIG_UNBLOCK:
11435                 how = SIG_UNBLOCK;
11436                 break;
11437             case TARGET_SIG_SETMASK:
11438                 how = SIG_SETMASK;
11439                 break;
11440             default:
11441                 ret = -TARGET_EINVAL;
11442                 goto fail;
11443             }
11444             mask = arg2;
11445             target_to_host_old_sigset(&set, &mask);
11446             ret = do_sigprocmask(how, &set, &oldset);
11447             if (!ret) {
11448                 host_to_target_old_sigset(&mask, &oldset);
11449                 ret = mask;
11450             }
11451         }
11452         break;
11453 #endif
11454 
11455 #ifdef TARGET_NR_getgid32
11456     case TARGET_NR_getgid32:
11457         ret = get_errno(getgid());
11458         break;
11459 #endif
11460 #ifdef TARGET_NR_geteuid32
11461     case TARGET_NR_geteuid32:
11462         ret = get_errno(geteuid());
11463         break;
11464 #endif
11465 #ifdef TARGET_NR_getegid32
11466     case TARGET_NR_getegid32:
11467         ret = get_errno(getegid());
11468         break;
11469 #endif
11470 #ifdef TARGET_NR_setreuid32
11471     case TARGET_NR_setreuid32:
11472         ret = get_errno(setreuid(arg1, arg2));
11473         break;
11474 #endif
11475 #ifdef TARGET_NR_setregid32
11476     case TARGET_NR_setregid32:
11477         ret = get_errno(setregid(arg1, arg2));
11478         break;
11479 #endif
11480 #ifdef TARGET_NR_getgroups32
11481     case TARGET_NR_getgroups32:
11482         {
11483             int gidsetsize = arg1;
11484             uint32_t *target_grouplist;
11485             gid_t *grouplist;
11486             int i;
11487 
11488             grouplist = alloca(gidsetsize * sizeof(gid_t));
11489             ret = get_errno(getgroups(gidsetsize, grouplist));
11490             if (gidsetsize == 0)
11491                 break;
11492             if (!is_error(ret)) {
11493                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11494                 if (!target_grouplist) {
11495                     ret = -TARGET_EFAULT;
11496                     goto fail;
11497                 }
11498                 for(i = 0;i < ret; i++)
11499                     target_grouplist[i] = tswap32(grouplist[i]);
11500                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11501             }
11502         }
11503         break;
11504 #endif
11505 #ifdef TARGET_NR_setgroups32
11506     case TARGET_NR_setgroups32:
11507         {
11508             int gidsetsize = arg1;
11509             uint32_t *target_grouplist;
11510             gid_t *grouplist;
11511             int i;
11512 
11513             grouplist = alloca(gidsetsize * sizeof(gid_t));
11514             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11515             if (!target_grouplist) {
11516                 ret = -TARGET_EFAULT;
11517                 goto fail;
11518             }
11519             for(i = 0;i < gidsetsize; i++)
11520                 grouplist[i] = tswap32(target_grouplist[i]);
11521             unlock_user(target_grouplist, arg2, 0);
11522             ret = get_errno(setgroups(gidsetsize, grouplist));
11523         }
11524         break;
11525 #endif
11526 #ifdef TARGET_NR_fchown32
11527     case TARGET_NR_fchown32:
11528         ret = get_errno(fchown(arg1, arg2, arg3));
11529         break;
11530 #endif
11531 #ifdef TARGET_NR_setresuid32
11532     case TARGET_NR_setresuid32:
11533         ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11534         break;
11535 #endif
11536 #ifdef TARGET_NR_getresuid32
11537     case TARGET_NR_getresuid32:
11538         {
11539             uid_t ruid, euid, suid;
11540             ret = get_errno(getresuid(&ruid, &euid, &suid));
11541             if (!is_error(ret)) {
11542                 if (put_user_u32(ruid, arg1)
11543                     || put_user_u32(euid, arg2)
11544                     || put_user_u32(suid, arg3))
11545                     goto efault;
11546             }
11547         }
11548         break;
11549 #endif
11550 #ifdef TARGET_NR_setresgid32
11551     case TARGET_NR_setresgid32:
11552         ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11553         break;
11554 #endif
11555 #ifdef TARGET_NR_getresgid32
11556     case TARGET_NR_getresgid32:
11557         {
11558             gid_t rgid, egid, sgid;
11559             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11560             if (!is_error(ret)) {
11561                 if (put_user_u32(rgid, arg1)
11562                     || put_user_u32(egid, arg2)
11563                     || put_user_u32(sgid, arg3))
11564                     goto efault;
11565             }
11566         }
11567         break;
11568 #endif
11569 #ifdef TARGET_NR_chown32
11570     case TARGET_NR_chown32:
11571         if (!(p = lock_user_string(arg1)))
11572             goto efault;
11573         ret = get_errno(chown(p, arg2, arg3));
11574         unlock_user(p, arg1, 0);
11575         break;
11576 #endif
11577 #ifdef TARGET_NR_setuid32
11578     case TARGET_NR_setuid32:
11579         ret = get_errno(sys_setuid(arg1));
11580         break;
11581 #endif
11582 #ifdef TARGET_NR_setgid32
11583     case TARGET_NR_setgid32:
11584         ret = get_errno(sys_setgid(arg1));
11585         break;
11586 #endif
11587 #ifdef TARGET_NR_setfsuid32
11588     case TARGET_NR_setfsuid32:
11589         ret = get_errno(setfsuid(arg1));
11590         break;
11591 #endif
11592 #ifdef TARGET_NR_setfsgid32
11593     case TARGET_NR_setfsgid32:
11594         ret = get_errno(setfsgid(arg1));
11595         break;
11596 #endif
11597 
11598     case TARGET_NR_pivot_root:
11599         goto unimplemented;
11600 #ifdef TARGET_NR_mincore
11601     case TARGET_NR_mincore:
11602         {
11603             void *a;
11604             ret = -TARGET_ENOMEM;
11605             a = lock_user(VERIFY_READ, arg1, arg2, 0);
11606             if (!a) {
11607                 goto fail;
11608             }
11609             ret = -TARGET_EFAULT;
11610             p = lock_user_string(arg3);
11611             if (!p) {
11612                 goto mincore_fail;
11613             }
11614             ret = get_errno(mincore(a, arg2, p));
11615             unlock_user(p, arg3, ret);
11616             mincore_fail:
11617             unlock_user(a, arg1, 0);
11618         }
11619         break;
11620 #endif
11621 #ifdef TARGET_NR_arm_fadvise64_64
11622     case TARGET_NR_arm_fadvise64_64:
11623         /* arm_fadvise64_64 looks like fadvise64_64 but
11624          * with different argument order: fd, advice, offset, len
11625          * rather than the usual fd, offset, len, advice.
11626          * Note that offset and len are both 64-bit so appear as
11627          * pairs of 32-bit registers.
11628          */
11629         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11630                             target_offset64(arg5, arg6), arg2);
11631         ret = -host_to_target_errno(ret);
11632         break;
11633 #endif
11634 
11635 #if TARGET_ABI_BITS == 32
11636 
11637 #ifdef TARGET_NR_fadvise64_64
11638     case TARGET_NR_fadvise64_64:
11639 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11640         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11641         ret = arg2;
11642         arg2 = arg3;
11643         arg3 = arg4;
11644         arg4 = arg5;
11645         arg5 = arg6;
11646         arg6 = ret;
11647 #else
11648         /* 6 args: fd, offset (high, low), len (high, low), advice */
11649         if (regpairs_aligned(cpu_env, num)) {
11650             /* offset is in (3,4), len in (5,6) and advice in 7 */
11651             arg2 = arg3;
11652             arg3 = arg4;
11653             arg4 = arg5;
11654             arg5 = arg6;
11655             arg6 = arg7;
11656         }
11657 #endif
11658         ret = -host_to_target_errno(posix_fadvise(arg1,
11659                                                   target_offset64(arg2, arg3),
11660                                                   target_offset64(arg4, arg5),
11661                                                   arg6));
11662         break;
11663 #endif
11664 
11665 #ifdef TARGET_NR_fadvise64
11666     case TARGET_NR_fadvise64:
11667         /* 5 args: fd, offset (high, low), len, advice */
11668         if (regpairs_aligned(cpu_env, num)) {
11669             /* offset is in (3,4), len in 5 and advice in 6 */
11670             arg2 = arg3;
11671             arg3 = arg4;
11672             arg4 = arg5;
11673             arg5 = arg6;
11674         }
11675         ret = -host_to_target_errno(posix_fadvise(arg1,
11676                                                   target_offset64(arg2, arg3),
11677                                                   arg4, arg5));
11678         break;
11679 #endif
11680 
11681 #else /* not a 32-bit ABI */
11682 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11683 #ifdef TARGET_NR_fadvise64_64
11684     case TARGET_NR_fadvise64_64:
11685 #endif
11686 #ifdef TARGET_NR_fadvise64
11687     case TARGET_NR_fadvise64:
11688 #endif
11689 #ifdef TARGET_S390X
11690         switch (arg4) {
11691         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11692         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11693         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11694         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11695         default: break;
11696         }
11697 #endif
11698         ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11699         break;
11700 #endif
11701 #endif /* end of 64-bit ABI fadvise handling */
11702 
11703 #ifdef TARGET_NR_madvise
11704     case TARGET_NR_madvise:
11705         /* A straight passthrough may not be safe because qemu sometimes
11706            turns private file-backed mappings into anonymous mappings.
11707            This will break MADV_DONTNEED.
11708            This is a hint, so ignoring and returning success is ok.  */
11709         ret = get_errno(0);
11710         break;
11711 #endif
11712 #if TARGET_ABI_BITS == 32
11713     case TARGET_NR_fcntl64:
11714     {
11715 	int cmd;
11716 	struct flock64 fl;
11717         from_flock64_fn *copyfrom = copy_from_user_flock64;
11718         to_flock64_fn *copyto = copy_to_user_flock64;
11719 
11720 #ifdef TARGET_ARM
11721         if (!((CPUARMState *)cpu_env)->eabi) {
11722             copyfrom = copy_from_user_oabi_flock64;
11723             copyto = copy_to_user_oabi_flock64;
11724         }
11725 #endif
11726 
11727 	cmd = target_to_host_fcntl_cmd(arg2);
11728         if (cmd == -TARGET_EINVAL) {
11729             ret = cmd;
11730             break;
11731         }
11732 
11733         switch(arg2) {
11734         case TARGET_F_GETLK64:
11735             ret = copyfrom(&fl, arg3);
11736             if (ret) {
11737                 break;
11738             }
11739             ret = get_errno(fcntl(arg1, cmd, &fl));
11740             if (ret == 0) {
11741                 ret = copyto(arg3, &fl);
11742             }
11743 	    break;
11744 
11745         case TARGET_F_SETLK64:
11746         case TARGET_F_SETLKW64:
11747             ret = copyfrom(&fl, arg3);
11748             if (ret) {
11749                 break;
11750             }
11751             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11752 	    break;
11753         default:
11754             ret = do_fcntl(arg1, arg2, arg3);
11755             break;
11756         }
11757 	break;
11758     }
11759 #endif
11760 #ifdef TARGET_NR_cacheflush
11761     case TARGET_NR_cacheflush:
11762         /* self-modifying code is handled automatically, so nothing needed */
11763         ret = 0;
11764         break;
11765 #endif
11766 #ifdef TARGET_NR_security
11767     case TARGET_NR_security:
11768         goto unimplemented;
11769 #endif
11770 #ifdef TARGET_NR_getpagesize
11771     case TARGET_NR_getpagesize:
11772         ret = TARGET_PAGE_SIZE;
11773         break;
11774 #endif
11775     case TARGET_NR_gettid:
11776         ret = get_errno(gettid());
11777         break;
11778 #ifdef TARGET_NR_readahead
11779     case TARGET_NR_readahead:
11780 #if TARGET_ABI_BITS == 32
11781         if (regpairs_aligned(cpu_env, num)) {
11782             arg2 = arg3;
11783             arg3 = arg4;
11784             arg4 = arg5;
11785         }
11786         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11787 #else
11788         ret = get_errno(readahead(arg1, arg2, arg3));
11789 #endif
11790         break;
11791 #endif
11792 #ifdef CONFIG_ATTR
11793 #ifdef TARGET_NR_setxattr
11794     case TARGET_NR_listxattr:
11795     case TARGET_NR_llistxattr:
11796     {
11797         void *p, *b = 0;
11798         if (arg2) {
11799             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11800             if (!b) {
11801                 ret = -TARGET_EFAULT;
11802                 break;
11803             }
11804         }
11805         p = lock_user_string(arg1);
11806         if (p) {
11807             if (num == TARGET_NR_listxattr) {
11808                 ret = get_errno(listxattr(p, b, arg3));
11809             } else {
11810                 ret = get_errno(llistxattr(p, b, arg3));
11811             }
11812         } else {
11813             ret = -TARGET_EFAULT;
11814         }
11815         unlock_user(p, arg1, 0);
11816         unlock_user(b, arg2, arg3);
11817         break;
11818     }
11819     case TARGET_NR_flistxattr:
11820     {
11821         void *b = 0;
11822         if (arg2) {
11823             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11824             if (!b) {
11825                 ret = -TARGET_EFAULT;
11826                 break;
11827             }
11828         }
11829         ret = get_errno(flistxattr(arg1, b, arg3));
11830         unlock_user(b, arg2, arg3);
11831         break;
11832     }
11833     case TARGET_NR_setxattr:
11834     case TARGET_NR_lsetxattr:
11835         {
11836             void *p, *n, *v = 0;
11837             if (arg3) {
11838                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11839                 if (!v) {
11840                     ret = -TARGET_EFAULT;
11841                     break;
11842                 }
11843             }
11844             p = lock_user_string(arg1);
11845             n = lock_user_string(arg2);
11846             if (p && n) {
11847                 if (num == TARGET_NR_setxattr) {
11848                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11849                 } else {
11850                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11851                 }
11852             } else {
11853                 ret = -TARGET_EFAULT;
11854             }
11855             unlock_user(p, arg1, 0);
11856             unlock_user(n, arg2, 0);
11857             unlock_user(v, arg3, 0);
11858         }
11859         break;
11860     case TARGET_NR_fsetxattr:
11861         {
11862             void *n, *v = 0;
11863             if (arg3) {
11864                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11865                 if (!v) {
11866                     ret = -TARGET_EFAULT;
11867                     break;
11868                 }
11869             }
11870             n = lock_user_string(arg2);
11871             if (n) {
11872                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11873             } else {
11874                 ret = -TARGET_EFAULT;
11875             }
11876             unlock_user(n, arg2, 0);
11877             unlock_user(v, arg3, 0);
11878         }
11879         break;
11880     case TARGET_NR_getxattr:
11881     case TARGET_NR_lgetxattr:
11882         {
11883             void *p, *n, *v = 0;
11884             if (arg3) {
11885                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11886                 if (!v) {
11887                     ret = -TARGET_EFAULT;
11888                     break;
11889                 }
11890             }
11891             p = lock_user_string(arg1);
11892             n = lock_user_string(arg2);
11893             if (p && n) {
11894                 if (num == TARGET_NR_getxattr) {
11895                     ret = get_errno(getxattr(p, n, v, arg4));
11896                 } else {
11897                     ret = get_errno(lgetxattr(p, n, v, arg4));
11898                 }
11899             } else {
11900                 ret = -TARGET_EFAULT;
11901             }
11902             unlock_user(p, arg1, 0);
11903             unlock_user(n, arg2, 0);
11904             unlock_user(v, arg3, arg4);
11905         }
11906         break;
11907     case TARGET_NR_fgetxattr:
11908         {
11909             void *n, *v = 0;
11910             if (arg3) {
11911                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11912                 if (!v) {
11913                     ret = -TARGET_EFAULT;
11914                     break;
11915                 }
11916             }
11917             n = lock_user_string(arg2);
11918             if (n) {
11919                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11920             } else {
11921                 ret = -TARGET_EFAULT;
11922             }
11923             unlock_user(n, arg2, 0);
11924             unlock_user(v, arg3, arg4);
11925         }
11926         break;
11927     case TARGET_NR_removexattr:
11928     case TARGET_NR_lremovexattr:
11929         {
11930             void *p, *n;
11931             p = lock_user_string(arg1);
11932             n = lock_user_string(arg2);
11933             if (p && n) {
11934                 if (num == TARGET_NR_removexattr) {
11935                     ret = get_errno(removexattr(p, n));
11936                 } else {
11937                     ret = get_errno(lremovexattr(p, n));
11938                 }
11939             } else {
11940                 ret = -TARGET_EFAULT;
11941             }
11942             unlock_user(p, arg1, 0);
11943             unlock_user(n, arg2, 0);
11944         }
11945         break;
11946     case TARGET_NR_fremovexattr:
11947         {
11948             void *n;
11949             n = lock_user_string(arg2);
11950             if (n) {
11951                 ret = get_errno(fremovexattr(arg1, n));
11952             } else {
11953                 ret = -TARGET_EFAULT;
11954             }
11955             unlock_user(n, arg2, 0);
11956         }
11957         break;
11958 #endif
11959 #endif /* CONFIG_ATTR */
11960 #ifdef TARGET_NR_set_thread_area
11961     case TARGET_NR_set_thread_area:
11962 #if defined(TARGET_MIPS)
11963       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11964       ret = 0;
11965       break;
11966 #elif defined(TARGET_CRIS)
11967       if (arg1 & 0xff)
11968           ret = -TARGET_EINVAL;
11969       else {
11970           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11971           ret = 0;
11972       }
11973       break;
11974 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11975       ret = do_set_thread_area(cpu_env, arg1);
11976       break;
11977 #elif defined(TARGET_M68K)
11978       {
11979           TaskState *ts = cpu->opaque;
11980           ts->tp_value = arg1;
11981           ret = 0;
11982           break;
11983       }
11984 #else
11985       goto unimplemented_nowarn;
11986 #endif
11987 #endif
11988 #ifdef TARGET_NR_get_thread_area
11989     case TARGET_NR_get_thread_area:
11990 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11991         ret = do_get_thread_area(cpu_env, arg1);
11992         break;
11993 #elif defined(TARGET_M68K)
11994         {
11995             TaskState *ts = cpu->opaque;
11996             ret = ts->tp_value;
11997             break;
11998         }
11999 #else
12000         goto unimplemented_nowarn;
12001 #endif
12002 #endif
12003 #ifdef TARGET_NR_getdomainname
12004     case TARGET_NR_getdomainname:
12005         goto unimplemented_nowarn;
12006 #endif
12007 
12008 #ifdef TARGET_NR_clock_settime
12009     case TARGET_NR_clock_settime:
12010     {
12011         struct timespec ts;
12012 
12013         ret = target_to_host_timespec(&ts, arg2);
12014         if (!is_error(ret)) {
12015             ret = get_errno(clock_settime(arg1, &ts));
12016         }
12017         break;
12018     }
12019 #endif
12020 #ifdef TARGET_NR_clock_gettime
12021     case TARGET_NR_clock_gettime:
12022     {
12023         struct timespec ts;
12024         ret = get_errno(clock_gettime(arg1, &ts));
12025         if (!is_error(ret)) {
12026             ret = host_to_target_timespec(arg2, &ts);
12027         }
12028         break;
12029     }
12030 #endif
12031 #ifdef TARGET_NR_clock_getres
12032     case TARGET_NR_clock_getres:
12033     {
12034         struct timespec ts;
12035         ret = get_errno(clock_getres(arg1, &ts));
12036         if (!is_error(ret)) {
12037             host_to_target_timespec(arg2, &ts);
12038         }
12039         break;
12040     }
12041 #endif
12042 #ifdef TARGET_NR_clock_nanosleep
12043     case TARGET_NR_clock_nanosleep:
12044     {
12045         struct timespec ts;
12046         target_to_host_timespec(&ts, arg3);
12047         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12048                                              &ts, arg4 ? &ts : NULL));
12049         if (arg4)
12050             host_to_target_timespec(arg4, &ts);
12051 
12052 #if defined(TARGET_PPC)
12053         /* clock_nanosleep is odd in that it returns positive errno values.
12054          * On PPC, CR0 bit 3 should be set in such a situation. */
12055         if (ret && ret != -TARGET_ERESTARTSYS) {
12056             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
12057         }
12058 #endif
12059         break;
12060     }
12061 #endif
12062 
12063 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12064     case TARGET_NR_set_tid_address:
12065         ret = get_errno(set_tid_address((int *)g2h(arg1)));
12066         break;
12067 #endif
12068 
12069     case TARGET_NR_tkill:
12070         ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12071         break;
12072 
12073     case TARGET_NR_tgkill:
12074         ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
12075                         target_to_host_signal(arg3)));
12076         break;
12077 
12078 #ifdef TARGET_NR_set_robust_list
12079     case TARGET_NR_set_robust_list:
12080     case TARGET_NR_get_robust_list:
12081         /* The ABI for supporting robust futexes has userspace pass
12082          * the kernel a pointer to a linked list which is updated by
12083          * userspace after the syscall; the list is walked by the kernel
12084          * when the thread exits. Since the linked list in QEMU guest
12085          * memory isn't a valid linked list for the host and we have
12086          * no way to reliably intercept the thread-death event, we can't
12087          * support these. Silently return ENOSYS so that guest userspace
12088          * falls back to a non-robust futex implementation (which should
12089          * be OK except in the corner case of the guest crashing while
12090          * holding a mutex that is shared with another process via
12091          * shared memory).
12092          */
12093         goto unimplemented_nowarn;
12094 #endif
12095 
12096 #if defined(TARGET_NR_utimensat)
12097     case TARGET_NR_utimensat:
12098         {
12099             struct timespec *tsp, ts[2];
12100             if (!arg3) {
12101                 tsp = NULL;
12102             } else {
12103                 target_to_host_timespec(ts, arg3);
12104                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
12105                 tsp = ts;
12106             }
12107             if (!arg2)
12108                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12109             else {
12110                 if (!(p = lock_user_string(arg2))) {
12111                     ret = -TARGET_EFAULT;
12112                     goto fail;
12113                 }
12114                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12115                 unlock_user(p, arg2, 0);
12116             }
12117         }
12118 	break;
12119 #endif
12120     case TARGET_NR_futex:
12121         ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12122         break;
12123 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12124     case TARGET_NR_inotify_init:
12125         ret = get_errno(sys_inotify_init());
12126         if (ret >= 0) {
12127             fd_trans_register(ret, &target_inotify_trans);
12128         }
12129         break;
12130 #endif
12131 #ifdef CONFIG_INOTIFY1
12132 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12133     case TARGET_NR_inotify_init1:
12134         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12135                                           fcntl_flags_tbl)));
12136         if (ret >= 0) {
12137             fd_trans_register(ret, &target_inotify_trans);
12138         }
12139         break;
12140 #endif
12141 #endif
12142 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12143     case TARGET_NR_inotify_add_watch:
12144         p = lock_user_string(arg2);
12145         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12146         unlock_user(p, arg2, 0);
12147         break;
12148 #endif
12149 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12150     case TARGET_NR_inotify_rm_watch:
12151         ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
12152         break;
12153 #endif
12154 
12155 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12156     case TARGET_NR_mq_open:
12157         {
12158             struct mq_attr posix_mq_attr;
12159             struct mq_attr *pposix_mq_attr;
12160             int host_flags;
12161 
12162             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12163             pposix_mq_attr = NULL;
12164             if (arg4) {
12165                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12166                     goto efault;
12167                 }
12168                 pposix_mq_attr = &posix_mq_attr;
12169             }
12170             p = lock_user_string(arg1 - 1);
12171             if (!p) {
12172                 goto efault;
12173             }
12174             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12175             unlock_user (p, arg1, 0);
12176         }
12177         break;
12178 
12179     case TARGET_NR_mq_unlink:
12180         p = lock_user_string(arg1 - 1);
12181         if (!p) {
12182             ret = -TARGET_EFAULT;
12183             break;
12184         }
12185         ret = get_errno(mq_unlink(p));
12186         unlock_user (p, arg1, 0);
12187         break;
12188 
12189     case TARGET_NR_mq_timedsend:
12190         {
12191             struct timespec ts;
12192 
12193             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12194             if (arg5 != 0) {
12195                 target_to_host_timespec(&ts, arg5);
12196                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12197                 host_to_target_timespec(arg5, &ts);
12198             } else {
12199                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12200             }
12201             unlock_user (p, arg2, arg3);
12202         }
12203         break;
12204 
12205     case TARGET_NR_mq_timedreceive:
12206         {
12207             struct timespec ts;
12208             unsigned int prio;
12209 
12210             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12211             if (arg5 != 0) {
12212                 target_to_host_timespec(&ts, arg5);
12213                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12214                                                      &prio, &ts));
12215                 host_to_target_timespec(arg5, &ts);
12216             } else {
12217                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12218                                                      &prio, NULL));
12219             }
12220             unlock_user (p, arg2, arg3);
12221             if (arg4 != 0)
12222                 put_user_u32(prio, arg4);
12223         }
12224         break;
12225 
12226     /* Not implemented for now... */
12227 /*     case TARGET_NR_mq_notify: */
12228 /*         break; */
12229 
12230     case TARGET_NR_mq_getsetattr:
12231         {
12232             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12233             ret = 0;
12234             if (arg2 != 0) {
12235                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12236                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12237                                            &posix_mq_attr_out));
12238             } else if (arg3 != 0) {
12239                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12240             }
12241             if (ret == 0 && arg3 != 0) {
12242                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12243             }
12244         }
12245         break;
12246 #endif
12247 
12248 #ifdef CONFIG_SPLICE
12249 #ifdef TARGET_NR_tee
12250     case TARGET_NR_tee:
12251         {
12252             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12253         }
12254         break;
12255 #endif
12256 #ifdef TARGET_NR_splice
12257     case TARGET_NR_splice:
12258         {
12259             loff_t loff_in, loff_out;
12260             loff_t *ploff_in = NULL, *ploff_out = NULL;
12261             if (arg2) {
12262                 if (get_user_u64(loff_in, arg2)) {
12263                     goto efault;
12264                 }
12265                 ploff_in = &loff_in;
12266             }
12267             if (arg4) {
12268                 if (get_user_u64(loff_out, arg4)) {
12269                     goto efault;
12270                 }
12271                 ploff_out = &loff_out;
12272             }
12273             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12274             if (arg2) {
12275                 if (put_user_u64(loff_in, arg2)) {
12276                     goto efault;
12277                 }
12278             }
12279             if (arg4) {
12280                 if (put_user_u64(loff_out, arg4)) {
12281                     goto efault;
12282                 }
12283             }
12284         }
12285         break;
12286 #endif
12287 #ifdef TARGET_NR_vmsplice
12288 	case TARGET_NR_vmsplice:
12289         {
12290             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12291             if (vec != NULL) {
12292                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12293                 unlock_iovec(vec, arg2, arg3, 0);
12294             } else {
12295                 ret = -host_to_target_errno(errno);
12296             }
12297         }
12298         break;
12299 #endif
12300 #endif /* CONFIG_SPLICE */
12301 #ifdef CONFIG_EVENTFD
12302 #if defined(TARGET_NR_eventfd)
12303     case TARGET_NR_eventfd:
12304         ret = get_errno(eventfd(arg1, 0));
12305         if (ret >= 0) {
12306             fd_trans_register(ret, &target_eventfd_trans);
12307         }
12308         break;
12309 #endif
12310 #if defined(TARGET_NR_eventfd2)
12311     case TARGET_NR_eventfd2:
12312     {
12313         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12314         if (arg2 & TARGET_O_NONBLOCK) {
12315             host_flags |= O_NONBLOCK;
12316         }
12317         if (arg2 & TARGET_O_CLOEXEC) {
12318             host_flags |= O_CLOEXEC;
12319         }
12320         ret = get_errno(eventfd(arg1, host_flags));
12321         if (ret >= 0) {
12322             fd_trans_register(ret, &target_eventfd_trans);
12323         }
12324         break;
12325     }
12326 #endif
12327 #endif /* CONFIG_EVENTFD  */
12328 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12329     case TARGET_NR_fallocate:
12330 #if TARGET_ABI_BITS == 32
12331         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12332                                   target_offset64(arg5, arg6)));
12333 #else
12334         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12335 #endif
12336         break;
12337 #endif
12338 #if defined(CONFIG_SYNC_FILE_RANGE)
12339 #if defined(TARGET_NR_sync_file_range)
12340     case TARGET_NR_sync_file_range:
12341 #if TARGET_ABI_BITS == 32
12342 #if defined(TARGET_MIPS)
12343         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12344                                         target_offset64(arg5, arg6), arg7));
12345 #else
12346         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12347                                         target_offset64(arg4, arg5), arg6));
12348 #endif /* !TARGET_MIPS */
12349 #else
12350         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12351 #endif
12352         break;
12353 #endif
12354 #if defined(TARGET_NR_sync_file_range2)
12355     case TARGET_NR_sync_file_range2:
12356         /* This is like sync_file_range but the arguments are reordered */
12357 #if TARGET_ABI_BITS == 32
12358         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12359                                         target_offset64(arg5, arg6), arg2));
12360 #else
12361         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12362 #endif
12363         break;
12364 #endif
12365 #endif
12366 #if defined(TARGET_NR_signalfd4)
12367     case TARGET_NR_signalfd4:
12368         ret = do_signalfd4(arg1, arg2, arg4);
12369         break;
12370 #endif
12371 #if defined(TARGET_NR_signalfd)
12372     case TARGET_NR_signalfd:
12373         ret = do_signalfd4(arg1, arg2, 0);
12374         break;
12375 #endif
12376 #if defined(CONFIG_EPOLL)
12377 #if defined(TARGET_NR_epoll_create)
12378     case TARGET_NR_epoll_create:
12379         ret = get_errno(epoll_create(arg1));
12380         break;
12381 #endif
12382 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12383     case TARGET_NR_epoll_create1:
12384         ret = get_errno(epoll_create1(arg1));
12385         break;
12386 #endif
12387 #if defined(TARGET_NR_epoll_ctl)
12388     case TARGET_NR_epoll_ctl:
12389     {
12390         struct epoll_event ep;
12391         struct epoll_event *epp = 0;
12392         if (arg4) {
12393             struct target_epoll_event *target_ep;
12394             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12395                 goto efault;
12396             }
12397             ep.events = tswap32(target_ep->events);
12398             /* The epoll_data_t union is just opaque data to the kernel,
12399              * so we transfer all 64 bits across and need not worry what
12400              * actual data type it is.
12401              */
12402             ep.data.u64 = tswap64(target_ep->data.u64);
12403             unlock_user_struct(target_ep, arg4, 0);
12404             epp = &ep;
12405         }
12406         ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12407         break;
12408     }
12409 #endif
12410 
12411 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12412 #if defined(TARGET_NR_epoll_wait)
12413     case TARGET_NR_epoll_wait:
12414 #endif
12415 #if defined(TARGET_NR_epoll_pwait)
12416     case TARGET_NR_epoll_pwait:
12417 #endif
12418     {
12419         struct target_epoll_event *target_ep;
12420         struct epoll_event *ep;
12421         int epfd = arg1;
12422         int maxevents = arg3;
12423         int timeout = arg4;
12424 
12425         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12426             ret = -TARGET_EINVAL;
12427             break;
12428         }
12429 
12430         target_ep = lock_user(VERIFY_WRITE, arg2,
12431                               maxevents * sizeof(struct target_epoll_event), 1);
12432         if (!target_ep) {
12433             goto efault;
12434         }
12435 
12436         ep = g_try_new(struct epoll_event, maxevents);
12437         if (!ep) {
12438             unlock_user(target_ep, arg2, 0);
12439             ret = -TARGET_ENOMEM;
12440             break;
12441         }
12442 
12443         switch (num) {
12444 #if defined(TARGET_NR_epoll_pwait)
12445         case TARGET_NR_epoll_pwait:
12446         {
12447             target_sigset_t *target_set;
12448             sigset_t _set, *set = &_set;
12449 
12450             if (arg5) {
12451                 if (arg6 != sizeof(target_sigset_t)) {
12452                     ret = -TARGET_EINVAL;
12453                     break;
12454                 }
12455 
12456                 target_set = lock_user(VERIFY_READ, arg5,
12457                                        sizeof(target_sigset_t), 1);
12458                 if (!target_set) {
12459                     ret = -TARGET_EFAULT;
12460                     break;
12461                 }
12462                 target_to_host_sigset(set, target_set);
12463                 unlock_user(target_set, arg5, 0);
12464             } else {
12465                 set = NULL;
12466             }
12467 
12468             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12469                                              set, SIGSET_T_SIZE));
12470             break;
12471         }
12472 #endif
12473 #if defined(TARGET_NR_epoll_wait)
12474         case TARGET_NR_epoll_wait:
12475             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12476                                              NULL, 0));
12477             break;
12478 #endif
12479         default:
12480             ret = -TARGET_ENOSYS;
12481         }
12482         if (!is_error(ret)) {
12483             int i;
12484             for (i = 0; i < ret; i++) {
12485                 target_ep[i].events = tswap32(ep[i].events);
12486                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12487             }
12488             unlock_user(target_ep, arg2,
12489                         ret * sizeof(struct target_epoll_event));
12490         } else {
12491             unlock_user(target_ep, arg2, 0);
12492         }
12493         g_free(ep);
12494         break;
12495     }
12496 #endif
12497 #endif
12498 #ifdef TARGET_NR_prlimit64
12499     case TARGET_NR_prlimit64:
12500     {
12501         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12502         struct target_rlimit64 *target_rnew, *target_rold;
12503         struct host_rlimit64 rnew, rold, *rnewp = 0;
12504         int resource = target_to_host_resource(arg2);
12505         if (arg3) {
12506             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12507                 goto efault;
12508             }
12509             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12510             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12511             unlock_user_struct(target_rnew, arg3, 0);
12512             rnewp = &rnew;
12513         }
12514 
12515         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12516         if (!is_error(ret) && arg4) {
12517             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12518                 goto efault;
12519             }
12520             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12521             target_rold->rlim_max = tswap64(rold.rlim_max);
12522             unlock_user_struct(target_rold, arg4, 1);
12523         }
12524         break;
12525     }
12526 #endif
12527 #ifdef TARGET_NR_gethostname
12528     case TARGET_NR_gethostname:
12529     {
12530         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12531         if (name) {
12532             ret = get_errno(gethostname(name, arg2));
12533             unlock_user(name, arg1, arg2);
12534         } else {
12535             ret = -TARGET_EFAULT;
12536         }
12537         break;
12538     }
12539 #endif
12540 #ifdef TARGET_NR_atomic_cmpxchg_32
12541     case TARGET_NR_atomic_cmpxchg_32:
12542     {
12543         /* should use start_exclusive from main.c */
12544         abi_ulong mem_value;
12545         if (get_user_u32(mem_value, arg6)) {
12546             target_siginfo_t info;
12547             info.si_signo = SIGSEGV;
12548             info.si_errno = 0;
12549             info.si_code = TARGET_SEGV_MAPERR;
12550             info._sifields._sigfault._addr = arg6;
12551             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12552                          QEMU_SI_FAULT, &info);
12553             ret = 0xdeadbeef;
12554 
12555         }
12556         if (mem_value == arg2)
12557             put_user_u32(arg1, arg6);
12558         ret = mem_value;
12559         break;
12560     }
12561 #endif
12562 #ifdef TARGET_NR_atomic_barrier
12563     case TARGET_NR_atomic_barrier:
12564     {
12565         /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12566         ret = 0;
12567         break;
12568     }
12569 #endif
12570 
12571 #ifdef TARGET_NR_timer_create
12572     case TARGET_NR_timer_create:
12573     {
12574         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12575 
12576         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12577 
12578         int clkid = arg1;
12579         int timer_index = next_free_host_timer();
12580 
12581         if (timer_index < 0) {
12582             ret = -TARGET_EAGAIN;
12583         } else {
12584             timer_t *phtimer = g_posix_timers  + timer_index;
12585 
12586             if (arg2) {
12587                 phost_sevp = &host_sevp;
12588                 ret = target_to_host_sigevent(phost_sevp, arg2);
12589                 if (ret != 0) {
12590                     break;
12591                 }
12592             }
12593 
12594             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12595             if (ret) {
12596                 phtimer = NULL;
12597             } else {
12598                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12599                     goto efault;
12600                 }
12601             }
12602         }
12603         break;
12604     }
12605 #endif
12606 
12607 #ifdef TARGET_NR_timer_settime
12608     case TARGET_NR_timer_settime:
12609     {
12610         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12611          * struct itimerspec * old_value */
12612         target_timer_t timerid = get_timer_id(arg1);
12613 
12614         if (timerid < 0) {
12615             ret = timerid;
12616         } else if (arg3 == 0) {
12617             ret = -TARGET_EINVAL;
12618         } else {
12619             timer_t htimer = g_posix_timers[timerid];
12620             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12621 
12622             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12623                 goto efault;
12624             }
12625             ret = get_errno(
12626                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12627             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12628                 goto efault;
12629             }
12630         }
12631         break;
12632     }
12633 #endif
12634 
12635 #ifdef TARGET_NR_timer_gettime
12636     case TARGET_NR_timer_gettime:
12637     {
12638         /* args: timer_t timerid, struct itimerspec *curr_value */
12639         target_timer_t timerid = get_timer_id(arg1);
12640 
12641         if (timerid < 0) {
12642             ret = timerid;
12643         } else if (!arg2) {
12644             ret = -TARGET_EFAULT;
12645         } else {
12646             timer_t htimer = g_posix_timers[timerid];
12647             struct itimerspec hspec;
12648             ret = get_errno(timer_gettime(htimer, &hspec));
12649 
12650             if (host_to_target_itimerspec(arg2, &hspec)) {
12651                 ret = -TARGET_EFAULT;
12652             }
12653         }
12654         break;
12655     }
12656 #endif
12657 
12658 #ifdef TARGET_NR_timer_getoverrun
12659     case TARGET_NR_timer_getoverrun:
12660     {
12661         /* args: timer_t timerid */
12662         target_timer_t timerid = get_timer_id(arg1);
12663 
12664         if (timerid < 0) {
12665             ret = timerid;
12666         } else {
12667             timer_t htimer = g_posix_timers[timerid];
12668             ret = get_errno(timer_getoverrun(htimer));
12669         }
12670         fd_trans_unregister(ret);
12671         break;
12672     }
12673 #endif
12674 
12675 #ifdef TARGET_NR_timer_delete
12676     case TARGET_NR_timer_delete:
12677     {
12678         /* args: timer_t timerid */
12679         target_timer_t timerid = get_timer_id(arg1);
12680 
12681         if (timerid < 0) {
12682             ret = timerid;
12683         } else {
12684             timer_t htimer = g_posix_timers[timerid];
12685             ret = get_errno(timer_delete(htimer));
12686             g_posix_timers[timerid] = 0;
12687         }
12688         break;
12689     }
12690 #endif
12691 
12692 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12693     case TARGET_NR_timerfd_create:
12694         ret = get_errno(timerfd_create(arg1,
12695                 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12696         break;
12697 #endif
12698 
12699 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12700     case TARGET_NR_timerfd_gettime:
12701         {
12702             struct itimerspec its_curr;
12703 
12704             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12705 
12706             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12707                 goto efault;
12708             }
12709         }
12710         break;
12711 #endif
12712 
12713 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12714     case TARGET_NR_timerfd_settime:
12715         {
12716             struct itimerspec its_new, its_old, *p_new;
12717 
12718             if (arg3) {
12719                 if (target_to_host_itimerspec(&its_new, arg3)) {
12720                     goto efault;
12721                 }
12722                 p_new = &its_new;
12723             } else {
12724                 p_new = NULL;
12725             }
12726 
12727             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12728 
12729             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12730                 goto efault;
12731             }
12732         }
12733         break;
12734 #endif
12735 
12736 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12737     case TARGET_NR_ioprio_get:
12738         ret = get_errno(ioprio_get(arg1, arg2));
12739         break;
12740 #endif
12741 
12742 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12743     case TARGET_NR_ioprio_set:
12744         ret = get_errno(ioprio_set(arg1, arg2, arg3));
12745         break;
12746 #endif
12747 
12748 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12749     case TARGET_NR_setns:
12750         ret = get_errno(setns(arg1, arg2));
12751         break;
12752 #endif
12753 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12754     case TARGET_NR_unshare:
12755         ret = get_errno(unshare(arg1));
12756         break;
12757 #endif
12758 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12759     case TARGET_NR_kcmp:
12760         ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12761         break;
12762 #endif
12763 
12764     default:
12765     unimplemented:
12766         gemu_log("qemu: Unsupported syscall: %d\n", num);
12767 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12768     unimplemented_nowarn:
12769 #endif
12770         ret = -TARGET_ENOSYS;
12771         break;
12772     }
12773 fail:
12774 #ifdef DEBUG
12775     gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12776 #endif
12777     if(do_strace)
12778         print_syscall_ret(num, ret);
12779     trace_guest_user_syscall_ret(cpu, num, ret);
12780     return ret;
12781 efault:
12782     ret = -TARGET_EFAULT;
12783     goto fail;
12784 }
12785