xref: /openbmc/qemu/linux-user/elfload.c (revision 2038f8c8)
1 /* This is the Linux kernel elf-loading code, ported into user space */
2 #include "qemu/osdep.h"
3 #include <sys/param.h>
4 
5 #include <sys/resource.h>
6 #include <sys/shm.h>
7 
8 #include "qemu.h"
9 #include "disas/disas.h"
10 #include "qemu/path.h"
11 #include "qemu/queue.h"
12 #include "qemu/guest-random.h"
13 
14 #ifdef _ARCH_PPC64
15 #undef ARCH_DLINFO
16 #undef ELF_PLATFORM
17 #undef ELF_HWCAP
18 #undef ELF_HWCAP2
19 #undef ELF_CLASS
20 #undef ELF_DATA
21 #undef ELF_ARCH
22 #endif
23 
24 #define ELF_OSABI   ELFOSABI_SYSV
25 
26 /* from personality.h */
27 
28 /*
29  * Flags for bug emulation.
30  *
31  * These occupy the top three bytes.
32  */
33 enum {
34     ADDR_NO_RANDOMIZE = 0x0040000,      /* disable randomization of VA space */
35     FDPIC_FUNCPTRS =    0x0080000,      /* userspace function ptrs point to
36                                            descriptors (signal handling) */
37     MMAP_PAGE_ZERO =    0x0100000,
38     ADDR_COMPAT_LAYOUT = 0x0200000,
39     READ_IMPLIES_EXEC = 0x0400000,
40     ADDR_LIMIT_32BIT =  0x0800000,
41     SHORT_INODE =       0x1000000,
42     WHOLE_SECONDS =     0x2000000,
43     STICKY_TIMEOUTS =   0x4000000,
44     ADDR_LIMIT_3GB =    0x8000000,
45 };
46 
47 /*
48  * Personality types.
49  *
50  * These go in the low byte.  Avoid using the top bit, it will
51  * conflict with error returns.
52  */
53 enum {
54     PER_LINUX =         0x0000,
55     PER_LINUX_32BIT =   0x0000 | ADDR_LIMIT_32BIT,
56     PER_LINUX_FDPIC =   0x0000 | FDPIC_FUNCPTRS,
57     PER_SVR4 =          0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
58     PER_SVR3 =          0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
59     PER_SCOSVR3 =       0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE,
60     PER_OSR5 =          0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
61     PER_WYSEV386 =      0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
62     PER_ISCR4 =         0x0005 | STICKY_TIMEOUTS,
63     PER_BSD =           0x0006,
64     PER_SUNOS =         0x0006 | STICKY_TIMEOUTS,
65     PER_XENIX =         0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
66     PER_LINUX32 =       0x0008,
67     PER_LINUX32_3GB =   0x0008 | ADDR_LIMIT_3GB,
68     PER_IRIX32 =        0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
69     PER_IRIXN32 =       0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
70     PER_IRIX64 =        0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
71     PER_RISCOS =        0x000c,
72     PER_SOLARIS =       0x000d | STICKY_TIMEOUTS,
73     PER_UW7 =           0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
74     PER_OSF4 =          0x000f,                  /* OSF/1 v4 */
75     PER_HPUX =          0x0010,
76     PER_MASK =          0x00ff,
77 };
78 
79 /*
80  * Return the base personality without flags.
81  */
82 #define personality(pers)       (pers & PER_MASK)
83 
84 int info_is_fdpic(struct image_info *info)
85 {
86     return info->personality == PER_LINUX_FDPIC;
87 }
88 
89 /* this flag is uneffective under linux too, should be deleted */
90 #ifndef MAP_DENYWRITE
91 #define MAP_DENYWRITE 0
92 #endif
93 
94 /* should probably go in elf.h */
95 #ifndef ELIBBAD
96 #define ELIBBAD 80
97 #endif
98 
99 #ifdef TARGET_WORDS_BIGENDIAN
100 #define ELF_DATA        ELFDATA2MSB
101 #else
102 #define ELF_DATA        ELFDATA2LSB
103 #endif
104 
105 #ifdef TARGET_ABI_MIPSN32
106 typedef abi_ullong      target_elf_greg_t;
107 #define tswapreg(ptr)   tswap64(ptr)
108 #else
109 typedef abi_ulong       target_elf_greg_t;
110 #define tswapreg(ptr)   tswapal(ptr)
111 #endif
112 
113 #ifdef USE_UID16
114 typedef abi_ushort      target_uid_t;
115 typedef abi_ushort      target_gid_t;
116 #else
117 typedef abi_uint        target_uid_t;
118 typedef abi_uint        target_gid_t;
119 #endif
120 typedef abi_int         target_pid_t;
121 
122 #ifdef TARGET_I386
123 
124 #define ELF_PLATFORM get_elf_platform()
125 
126 static const char *get_elf_platform(void)
127 {
128     static char elf_platform[] = "i386";
129     int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL);
130     if (family > 6)
131         family = 6;
132     if (family >= 3)
133         elf_platform[1] = '0' + family;
134     return elf_platform;
135 }
136 
137 #define ELF_HWCAP get_elf_hwcap()
138 
139 static uint32_t get_elf_hwcap(void)
140 {
141     X86CPU *cpu = X86_CPU(thread_cpu);
142 
143     return cpu->env.features[FEAT_1_EDX];
144 }
145 
146 #ifdef TARGET_X86_64
147 #define ELF_START_MMAP 0x2aaaaab000ULL
148 
149 #define ELF_CLASS      ELFCLASS64
150 #define ELF_ARCH       EM_X86_64
151 
152 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
153 {
154     regs->rax = 0;
155     regs->rsp = infop->start_stack;
156     regs->rip = infop->entry;
157 }
158 
159 #define ELF_NREG    27
160 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
161 
162 /*
163  * Note that ELF_NREG should be 29 as there should be place for
164  * TRAPNO and ERR "registers" as well but linux doesn't dump
165  * those.
166  *
167  * See linux kernel: arch/x86/include/asm/elf.h
168  */
169 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env)
170 {
171     (*regs)[0] = env->regs[15];
172     (*regs)[1] = env->regs[14];
173     (*regs)[2] = env->regs[13];
174     (*regs)[3] = env->regs[12];
175     (*regs)[4] = env->regs[R_EBP];
176     (*regs)[5] = env->regs[R_EBX];
177     (*regs)[6] = env->regs[11];
178     (*regs)[7] = env->regs[10];
179     (*regs)[8] = env->regs[9];
180     (*regs)[9] = env->regs[8];
181     (*regs)[10] = env->regs[R_EAX];
182     (*regs)[11] = env->regs[R_ECX];
183     (*regs)[12] = env->regs[R_EDX];
184     (*regs)[13] = env->regs[R_ESI];
185     (*regs)[14] = env->regs[R_EDI];
186     (*regs)[15] = env->regs[R_EAX]; /* XXX */
187     (*regs)[16] = env->eip;
188     (*regs)[17] = env->segs[R_CS].selector & 0xffff;
189     (*regs)[18] = env->eflags;
190     (*regs)[19] = env->regs[R_ESP];
191     (*regs)[20] = env->segs[R_SS].selector & 0xffff;
192     (*regs)[21] = env->segs[R_FS].selector & 0xffff;
193     (*regs)[22] = env->segs[R_GS].selector & 0xffff;
194     (*regs)[23] = env->segs[R_DS].selector & 0xffff;
195     (*regs)[24] = env->segs[R_ES].selector & 0xffff;
196     (*regs)[25] = env->segs[R_FS].selector & 0xffff;
197     (*regs)[26] = env->segs[R_GS].selector & 0xffff;
198 }
199 
200 #else
201 
202 #define ELF_START_MMAP 0x80000000
203 
204 /*
205  * This is used to ensure we don't load something for the wrong architecture.
206  */
207 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
208 
209 /*
210  * These are used to set parameters in the core dumps.
211  */
212 #define ELF_CLASS       ELFCLASS32
213 #define ELF_ARCH        EM_386
214 
215 static inline void init_thread(struct target_pt_regs *regs,
216                                struct image_info *infop)
217 {
218     regs->esp = infop->start_stack;
219     regs->eip = infop->entry;
220 
221     /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
222        starts %edx contains a pointer to a function which might be
223        registered using `atexit'.  This provides a mean for the
224        dynamic linker to call DT_FINI functions for shared libraries
225        that have been loaded before the code runs.
226 
227        A value of 0 tells we have no such handler.  */
228     regs->edx = 0;
229 }
230 
231 #define ELF_NREG    17
232 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
233 
234 /*
235  * Note that ELF_NREG should be 19 as there should be place for
236  * TRAPNO and ERR "registers" as well but linux doesn't dump
237  * those.
238  *
239  * See linux kernel: arch/x86/include/asm/elf.h
240  */
241 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env)
242 {
243     (*regs)[0] = env->regs[R_EBX];
244     (*regs)[1] = env->regs[R_ECX];
245     (*regs)[2] = env->regs[R_EDX];
246     (*regs)[3] = env->regs[R_ESI];
247     (*regs)[4] = env->regs[R_EDI];
248     (*regs)[5] = env->regs[R_EBP];
249     (*regs)[6] = env->regs[R_EAX];
250     (*regs)[7] = env->segs[R_DS].selector & 0xffff;
251     (*regs)[8] = env->segs[R_ES].selector & 0xffff;
252     (*regs)[9] = env->segs[R_FS].selector & 0xffff;
253     (*regs)[10] = env->segs[R_GS].selector & 0xffff;
254     (*regs)[11] = env->regs[R_EAX]; /* XXX */
255     (*regs)[12] = env->eip;
256     (*regs)[13] = env->segs[R_CS].selector & 0xffff;
257     (*regs)[14] = env->eflags;
258     (*regs)[15] = env->regs[R_ESP];
259     (*regs)[16] = env->segs[R_SS].selector & 0xffff;
260 }
261 #endif
262 
263 #define USE_ELF_CORE_DUMP
264 #define ELF_EXEC_PAGESIZE       4096
265 
266 #endif
267 
268 #ifdef TARGET_ARM
269 
270 #ifndef TARGET_AARCH64
271 /* 32 bit ARM definitions */
272 
273 #define ELF_START_MMAP 0x80000000
274 
275 #define ELF_ARCH        EM_ARM
276 #define ELF_CLASS       ELFCLASS32
277 
278 static inline void init_thread(struct target_pt_regs *regs,
279                                struct image_info *infop)
280 {
281     abi_long stack = infop->start_stack;
282     memset(regs, 0, sizeof(*regs));
283 
284     regs->uregs[16] = ARM_CPU_MODE_USR;
285     if (infop->entry & 1) {
286         regs->uregs[16] |= CPSR_T;
287     }
288     regs->uregs[15] = infop->entry & 0xfffffffe;
289     regs->uregs[13] = infop->start_stack;
290     /* FIXME - what to for failure of get_user()? */
291     get_user_ual(regs->uregs[2], stack + 8); /* envp */
292     get_user_ual(regs->uregs[1], stack + 4); /* envp */
293     /* XXX: it seems that r0 is zeroed after ! */
294     regs->uregs[0] = 0;
295     /* For uClinux PIC binaries.  */
296     /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
297     regs->uregs[10] = infop->start_data;
298 
299     /* Support ARM FDPIC.  */
300     if (info_is_fdpic(infop)) {
301         /* As described in the ABI document, r7 points to the loadmap info
302          * prepared by the kernel. If an interpreter is needed, r8 points
303          * to the interpreter loadmap and r9 points to the interpreter
304          * PT_DYNAMIC info. If no interpreter is needed, r8 is zero, and
305          * r9 points to the main program PT_DYNAMIC info.
306          */
307         regs->uregs[7] = infop->loadmap_addr;
308         if (infop->interpreter_loadmap_addr) {
309             /* Executable is dynamically loaded.  */
310             regs->uregs[8] = infop->interpreter_loadmap_addr;
311             regs->uregs[9] = infop->interpreter_pt_dynamic_addr;
312         } else {
313             regs->uregs[8] = 0;
314             regs->uregs[9] = infop->pt_dynamic_addr;
315         }
316     }
317 }
318 
319 #define ELF_NREG    18
320 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
321 
322 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUARMState *env)
323 {
324     (*regs)[0] = tswapreg(env->regs[0]);
325     (*regs)[1] = tswapreg(env->regs[1]);
326     (*regs)[2] = tswapreg(env->regs[2]);
327     (*regs)[3] = tswapreg(env->regs[3]);
328     (*regs)[4] = tswapreg(env->regs[4]);
329     (*regs)[5] = tswapreg(env->regs[5]);
330     (*regs)[6] = tswapreg(env->regs[6]);
331     (*regs)[7] = tswapreg(env->regs[7]);
332     (*regs)[8] = tswapreg(env->regs[8]);
333     (*regs)[9] = tswapreg(env->regs[9]);
334     (*regs)[10] = tswapreg(env->regs[10]);
335     (*regs)[11] = tswapreg(env->regs[11]);
336     (*regs)[12] = tswapreg(env->regs[12]);
337     (*regs)[13] = tswapreg(env->regs[13]);
338     (*regs)[14] = tswapreg(env->regs[14]);
339     (*regs)[15] = tswapreg(env->regs[15]);
340 
341     (*regs)[16] = tswapreg(cpsr_read((CPUARMState *)env));
342     (*regs)[17] = tswapreg(env->regs[0]); /* XXX */
343 }
344 
345 #define USE_ELF_CORE_DUMP
346 #define ELF_EXEC_PAGESIZE       4096
347 
348 enum
349 {
350     ARM_HWCAP_ARM_SWP       = 1 << 0,
351     ARM_HWCAP_ARM_HALF      = 1 << 1,
352     ARM_HWCAP_ARM_THUMB     = 1 << 2,
353     ARM_HWCAP_ARM_26BIT     = 1 << 3,
354     ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
355     ARM_HWCAP_ARM_FPA       = 1 << 5,
356     ARM_HWCAP_ARM_VFP       = 1 << 6,
357     ARM_HWCAP_ARM_EDSP      = 1 << 7,
358     ARM_HWCAP_ARM_JAVA      = 1 << 8,
359     ARM_HWCAP_ARM_IWMMXT    = 1 << 9,
360     ARM_HWCAP_ARM_CRUNCH    = 1 << 10,
361     ARM_HWCAP_ARM_THUMBEE   = 1 << 11,
362     ARM_HWCAP_ARM_NEON      = 1 << 12,
363     ARM_HWCAP_ARM_VFPv3     = 1 << 13,
364     ARM_HWCAP_ARM_VFPv3D16  = 1 << 14,
365     ARM_HWCAP_ARM_TLS       = 1 << 15,
366     ARM_HWCAP_ARM_VFPv4     = 1 << 16,
367     ARM_HWCAP_ARM_IDIVA     = 1 << 17,
368     ARM_HWCAP_ARM_IDIVT     = 1 << 18,
369     ARM_HWCAP_ARM_VFPD32    = 1 << 19,
370     ARM_HWCAP_ARM_LPAE      = 1 << 20,
371     ARM_HWCAP_ARM_EVTSTRM   = 1 << 21,
372 };
373 
374 enum {
375     ARM_HWCAP2_ARM_AES      = 1 << 0,
376     ARM_HWCAP2_ARM_PMULL    = 1 << 1,
377     ARM_HWCAP2_ARM_SHA1     = 1 << 2,
378     ARM_HWCAP2_ARM_SHA2     = 1 << 3,
379     ARM_HWCAP2_ARM_CRC32    = 1 << 4,
380 };
381 
382 /* The commpage only exists for 32 bit kernels */
383 
384 /* Return 1 if the proposed guest space is suitable for the guest.
385  * Return 0 if the proposed guest space isn't suitable, but another
386  * address space should be tried.
387  * Return -1 if there is no way the proposed guest space can be
388  * valid regardless of the base.
389  * The guest code may leave a page mapped and populate it if the
390  * address is suitable.
391  */
392 static int init_guest_commpage(unsigned long guest_base,
393                                unsigned long guest_size)
394 {
395     unsigned long real_start, test_page_addr;
396 
397     /* We need to check that we can force a fault on access to the
398      * commpage at 0xffff0fxx
399      */
400     test_page_addr = guest_base + (0xffff0f00 & qemu_host_page_mask);
401 
402     /* If the commpage lies within the already allocated guest space,
403      * then there is no way we can allocate it.
404      *
405      * You may be thinking that that this check is redundant because
406      * we already validated the guest size against MAX_RESERVED_VA;
407      * but if qemu_host_page_mask is unusually large, then
408      * test_page_addr may be lower.
409      */
410     if (test_page_addr >= guest_base
411         && test_page_addr < (guest_base + guest_size)) {
412         return -1;
413     }
414 
415     /* Note it needs to be writeable to let us initialise it */
416     real_start = (unsigned long)
417                  mmap((void *)test_page_addr, qemu_host_page_size,
418                      PROT_READ | PROT_WRITE,
419                      MAP_ANONYMOUS | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
420 
421     /* If we can't map it then try another address */
422     if (real_start == -1ul) {
423         return 0;
424     }
425 
426     if (real_start != test_page_addr) {
427         /* OS didn't put the page where we asked - unmap and reject */
428         munmap((void *)real_start, qemu_host_page_size);
429         return 0;
430     }
431 
432     /* Leave the page mapped
433      * Populate it (mmap should have left it all 0'd)
434      */
435 
436     /* Kernel helper versions */
437     __put_user(5, (uint32_t *)g2h(0xffff0ffcul));
438 
439     /* Now it's populated make it RO */
440     if (mprotect((void *)test_page_addr, qemu_host_page_size, PROT_READ)) {
441         perror("Protecting guest commpage");
442         exit(-1);
443     }
444 
445     return 1; /* All good */
446 }
447 
448 #define ELF_HWCAP get_elf_hwcap()
449 #define ELF_HWCAP2 get_elf_hwcap2()
450 
451 static uint32_t get_elf_hwcap(void)
452 {
453     ARMCPU *cpu = ARM_CPU(thread_cpu);
454     uint32_t hwcaps = 0;
455 
456     hwcaps |= ARM_HWCAP_ARM_SWP;
457     hwcaps |= ARM_HWCAP_ARM_HALF;
458     hwcaps |= ARM_HWCAP_ARM_THUMB;
459     hwcaps |= ARM_HWCAP_ARM_FAST_MULT;
460 
461     /* probe for the extra features */
462 #define GET_FEATURE(feat, hwcap) \
463     do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
464 
465 #define GET_FEATURE_ID(feat, hwcap) \
466     do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
467 
468     /* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */
469     GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP);
470     GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP);
471     GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT);
472     GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE);
473     GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON);
474     GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3);
475     GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS);
476     GET_FEATURE(ARM_FEATURE_VFP4, ARM_HWCAP_ARM_VFPv4);
477     GET_FEATURE_ID(arm_div, ARM_HWCAP_ARM_IDIVA);
478     GET_FEATURE_ID(thumb_div, ARM_HWCAP_ARM_IDIVT);
479     /* All QEMU's VFPv3 CPUs have 32 registers, see VFP_DREG in translate.c.
480      * Note that the ARM_HWCAP_ARM_VFPv3D16 bit is always the inverse of
481      * ARM_HWCAP_ARM_VFPD32 (and so always clear for QEMU); it is unrelated
482      * to our VFP_FP16 feature bit.
483      */
484     GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPD32);
485     GET_FEATURE(ARM_FEATURE_LPAE, ARM_HWCAP_ARM_LPAE);
486 
487     return hwcaps;
488 }
489 
490 static uint32_t get_elf_hwcap2(void)
491 {
492     ARMCPU *cpu = ARM_CPU(thread_cpu);
493     uint32_t hwcaps = 0;
494 
495     GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES);
496     GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL);
497     GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1);
498     GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2);
499     GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32);
500     return hwcaps;
501 }
502 
503 #undef GET_FEATURE
504 #undef GET_FEATURE_ID
505 
506 #define ELF_PLATFORM get_elf_platform()
507 
508 static const char *get_elf_platform(void)
509 {
510     CPUARMState *env = thread_cpu->env_ptr;
511 
512 #ifdef TARGET_WORDS_BIGENDIAN
513 # define END  "b"
514 #else
515 # define END  "l"
516 #endif
517 
518     if (arm_feature(env, ARM_FEATURE_V8)) {
519         return "v8" END;
520     } else if (arm_feature(env, ARM_FEATURE_V7)) {
521         if (arm_feature(env, ARM_FEATURE_M)) {
522             return "v7m" END;
523         } else {
524             return "v7" END;
525         }
526     } else if (arm_feature(env, ARM_FEATURE_V6)) {
527         return "v6" END;
528     } else if (arm_feature(env, ARM_FEATURE_V5)) {
529         return "v5" END;
530     } else {
531         return "v4" END;
532     }
533 
534 #undef END
535 }
536 
537 #else
538 /* 64 bit ARM definitions */
539 #define ELF_START_MMAP 0x80000000
540 
541 #define ELF_ARCH        EM_AARCH64
542 #define ELF_CLASS       ELFCLASS64
543 #ifdef TARGET_WORDS_BIGENDIAN
544 # define ELF_PLATFORM    "aarch64_be"
545 #else
546 # define ELF_PLATFORM    "aarch64"
547 #endif
548 
549 static inline void init_thread(struct target_pt_regs *regs,
550                                struct image_info *infop)
551 {
552     abi_long stack = infop->start_stack;
553     memset(regs, 0, sizeof(*regs));
554 
555     regs->pc = infop->entry & ~0x3ULL;
556     regs->sp = stack;
557 }
558 
559 #define ELF_NREG    34
560 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
561 
562 static void elf_core_copy_regs(target_elf_gregset_t *regs,
563                                const CPUARMState *env)
564 {
565     int i;
566 
567     for (i = 0; i < 32; i++) {
568         (*regs)[i] = tswapreg(env->xregs[i]);
569     }
570     (*regs)[32] = tswapreg(env->pc);
571     (*regs)[33] = tswapreg(pstate_read((CPUARMState *)env));
572 }
573 
574 #define USE_ELF_CORE_DUMP
575 #define ELF_EXEC_PAGESIZE       4096
576 
577 enum {
578     ARM_HWCAP_A64_FP            = 1 << 0,
579     ARM_HWCAP_A64_ASIMD         = 1 << 1,
580     ARM_HWCAP_A64_EVTSTRM       = 1 << 2,
581     ARM_HWCAP_A64_AES           = 1 << 3,
582     ARM_HWCAP_A64_PMULL         = 1 << 4,
583     ARM_HWCAP_A64_SHA1          = 1 << 5,
584     ARM_HWCAP_A64_SHA2          = 1 << 6,
585     ARM_HWCAP_A64_CRC32         = 1 << 7,
586     ARM_HWCAP_A64_ATOMICS       = 1 << 8,
587     ARM_HWCAP_A64_FPHP          = 1 << 9,
588     ARM_HWCAP_A64_ASIMDHP       = 1 << 10,
589     ARM_HWCAP_A64_CPUID         = 1 << 11,
590     ARM_HWCAP_A64_ASIMDRDM      = 1 << 12,
591     ARM_HWCAP_A64_JSCVT         = 1 << 13,
592     ARM_HWCAP_A64_FCMA          = 1 << 14,
593     ARM_HWCAP_A64_LRCPC         = 1 << 15,
594     ARM_HWCAP_A64_DCPOP         = 1 << 16,
595     ARM_HWCAP_A64_SHA3          = 1 << 17,
596     ARM_HWCAP_A64_SM3           = 1 << 18,
597     ARM_HWCAP_A64_SM4           = 1 << 19,
598     ARM_HWCAP_A64_ASIMDDP       = 1 << 20,
599     ARM_HWCAP_A64_SHA512        = 1 << 21,
600     ARM_HWCAP_A64_SVE           = 1 << 22,
601     ARM_HWCAP_A64_ASIMDFHM      = 1 << 23,
602     ARM_HWCAP_A64_DIT           = 1 << 24,
603     ARM_HWCAP_A64_USCAT         = 1 << 25,
604     ARM_HWCAP_A64_ILRCPC        = 1 << 26,
605     ARM_HWCAP_A64_FLAGM         = 1 << 27,
606     ARM_HWCAP_A64_SSBS          = 1 << 28,
607     ARM_HWCAP_A64_SB            = 1 << 29,
608     ARM_HWCAP_A64_PACA          = 1 << 30,
609     ARM_HWCAP_A64_PACG          = 1UL << 31,
610 };
611 
612 #define ELF_HWCAP get_elf_hwcap()
613 
614 static uint32_t get_elf_hwcap(void)
615 {
616     ARMCPU *cpu = ARM_CPU(thread_cpu);
617     uint32_t hwcaps = 0;
618 
619     hwcaps |= ARM_HWCAP_A64_FP;
620     hwcaps |= ARM_HWCAP_A64_ASIMD;
621     hwcaps |= ARM_HWCAP_A64_CPUID;
622 
623     /* probe for the extra features */
624 #define GET_FEATURE_ID(feat, hwcap) \
625     do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
626 
627     GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES);
628     GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL);
629     GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1);
630     GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2);
631     GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512);
632     GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32);
633     GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3);
634     GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3);
635     GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4);
636     GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
637     GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS);
638     GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM);
639     GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP);
640     GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA);
641     GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE);
642     GET_FEATURE_ID(aa64_pauth, ARM_HWCAP_A64_PACA | ARM_HWCAP_A64_PACG);
643     GET_FEATURE_ID(aa64_fhm, ARM_HWCAP_A64_ASIMDFHM);
644     GET_FEATURE_ID(aa64_jscvt, ARM_HWCAP_A64_JSCVT);
645     GET_FEATURE_ID(aa64_sb, ARM_HWCAP_A64_SB);
646     GET_FEATURE_ID(aa64_condm_4, ARM_HWCAP_A64_FLAGM);
647 
648 #undef GET_FEATURE_ID
649 
650     return hwcaps;
651 }
652 
653 #endif /* not TARGET_AARCH64 */
654 #endif /* TARGET_ARM */
655 
656 #ifdef TARGET_SPARC
657 #ifdef TARGET_SPARC64
658 
659 #define ELF_START_MMAP 0x80000000
660 #define ELF_HWCAP  (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
661                     | HWCAP_SPARC_MULDIV | HWCAP_SPARC_V9)
662 #ifndef TARGET_ABI32
663 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
664 #else
665 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
666 #endif
667 
668 #define ELF_CLASS   ELFCLASS64
669 #define ELF_ARCH    EM_SPARCV9
670 
671 #define STACK_BIAS              2047
672 
673 static inline void init_thread(struct target_pt_regs *regs,
674                                struct image_info *infop)
675 {
676 #ifndef TARGET_ABI32
677     regs->tstate = 0;
678 #endif
679     regs->pc = infop->entry;
680     regs->npc = regs->pc + 4;
681     regs->y = 0;
682 #ifdef TARGET_ABI32
683     regs->u_regs[14] = infop->start_stack - 16 * 4;
684 #else
685     if (personality(infop->personality) == PER_LINUX32)
686         regs->u_regs[14] = infop->start_stack - 16 * 4;
687     else
688         regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
689 #endif
690 }
691 
692 #else
693 #define ELF_START_MMAP 0x80000000
694 #define ELF_HWCAP  (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
695                     | HWCAP_SPARC_MULDIV)
696 
697 #define ELF_CLASS   ELFCLASS32
698 #define ELF_ARCH    EM_SPARC
699 
700 static inline void init_thread(struct target_pt_regs *regs,
701                                struct image_info *infop)
702 {
703     regs->psr = 0;
704     regs->pc = infop->entry;
705     regs->npc = regs->pc + 4;
706     regs->y = 0;
707     regs->u_regs[14] = infop->start_stack - 16 * 4;
708 }
709 
710 #endif
711 #endif
712 
713 #ifdef TARGET_PPC
714 
715 #define ELF_MACHINE    PPC_ELF_MACHINE
716 #define ELF_START_MMAP 0x80000000
717 
718 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
719 
720 #define elf_check_arch(x) ( (x) == EM_PPC64 )
721 
722 #define ELF_CLASS       ELFCLASS64
723 
724 #else
725 
726 #define ELF_CLASS       ELFCLASS32
727 
728 #endif
729 
730 #define ELF_ARCH        EM_PPC
731 
732 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
733    See arch/powerpc/include/asm/cputable.h.  */
734 enum {
735     QEMU_PPC_FEATURE_32 = 0x80000000,
736     QEMU_PPC_FEATURE_64 = 0x40000000,
737     QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
738     QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
739     QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
740     QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
741     QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
742     QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
743     QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
744     QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
745     QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
746     QEMU_PPC_FEATURE_NO_TB = 0x00100000,
747     QEMU_PPC_FEATURE_POWER4 = 0x00080000,
748     QEMU_PPC_FEATURE_POWER5 = 0x00040000,
749     QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
750     QEMU_PPC_FEATURE_CELL = 0x00010000,
751     QEMU_PPC_FEATURE_BOOKE = 0x00008000,
752     QEMU_PPC_FEATURE_SMT = 0x00004000,
753     QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
754     QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
755     QEMU_PPC_FEATURE_PA6T = 0x00000800,
756     QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
757     QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
758     QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
759     QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
760     QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
761 
762     QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
763     QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
764 
765     /* Feature definitions in AT_HWCAP2.  */
766     QEMU_PPC_FEATURE2_ARCH_2_07 = 0x80000000, /* ISA 2.07 */
767     QEMU_PPC_FEATURE2_HAS_HTM = 0x40000000, /* Hardware Transactional Memory */
768     QEMU_PPC_FEATURE2_HAS_DSCR = 0x20000000, /* Data Stream Control Register */
769     QEMU_PPC_FEATURE2_HAS_EBB = 0x10000000, /* Event Base Branching */
770     QEMU_PPC_FEATURE2_HAS_ISEL = 0x08000000, /* Integer Select */
771     QEMU_PPC_FEATURE2_HAS_TAR = 0x04000000, /* Target Address Register */
772     QEMU_PPC_FEATURE2_VEC_CRYPTO = 0x02000000,
773     QEMU_PPC_FEATURE2_HTM_NOSC = 0x01000000,
774     QEMU_PPC_FEATURE2_ARCH_3_00 = 0x00800000, /* ISA 3.00 */
775     QEMU_PPC_FEATURE2_HAS_IEEE128 = 0x00400000, /* VSX IEEE Bin Float 128-bit */
776     QEMU_PPC_FEATURE2_DARN = 0x00200000, /* darn random number insn */
777     QEMU_PPC_FEATURE2_SCV = 0x00100000, /* scv syscall */
778     QEMU_PPC_FEATURE2_HTM_NO_SUSPEND = 0x00080000, /* TM w/o suspended state */
779 };
780 
781 #define ELF_HWCAP get_elf_hwcap()
782 
783 static uint32_t get_elf_hwcap(void)
784 {
785     PowerPCCPU *cpu = POWERPC_CPU(thread_cpu);
786     uint32_t features = 0;
787 
788     /* We don't have to be terribly complete here; the high points are
789        Altivec/FP/SPE support.  Anything else is just a bonus.  */
790 #define GET_FEATURE(flag, feature)                                      \
791     do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0)
792 #define GET_FEATURE2(flags, feature) \
793     do { \
794         if ((cpu->env.insns_flags2 & flags) == flags) { \
795             features |= feature; \
796         } \
797     } while (0)
798     GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
799     GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
800     GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
801     GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
802     GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
803     GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
804     GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
805     GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
806     GET_FEATURE2(PPC2_DFP, QEMU_PPC_FEATURE_HAS_DFP);
807     GET_FEATURE2(PPC2_VSX, QEMU_PPC_FEATURE_HAS_VSX);
808     GET_FEATURE2((PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 |
809                   PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206),
810                   QEMU_PPC_FEATURE_ARCH_2_06);
811 #undef GET_FEATURE
812 #undef GET_FEATURE2
813 
814     return features;
815 }
816 
817 #define ELF_HWCAP2 get_elf_hwcap2()
818 
819 static uint32_t get_elf_hwcap2(void)
820 {
821     PowerPCCPU *cpu = POWERPC_CPU(thread_cpu);
822     uint32_t features = 0;
823 
824 #define GET_FEATURE(flag, feature)                                      \
825     do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0)
826 #define GET_FEATURE2(flag, feature)                                      \
827     do { if (cpu->env.insns_flags2 & flag) { features |= feature; } } while (0)
828 
829     GET_FEATURE(PPC_ISEL, QEMU_PPC_FEATURE2_HAS_ISEL);
830     GET_FEATURE2(PPC2_BCTAR_ISA207, QEMU_PPC_FEATURE2_HAS_TAR);
831     GET_FEATURE2((PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
832                   PPC2_ISA207S), QEMU_PPC_FEATURE2_ARCH_2_07 |
833                   QEMU_PPC_FEATURE2_VEC_CRYPTO);
834     GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00 |
835                  QEMU_PPC_FEATURE2_DARN);
836 
837 #undef GET_FEATURE
838 #undef GET_FEATURE2
839 
840     return features;
841 }
842 
843 /*
844  * The requirements here are:
845  * - keep the final alignment of sp (sp & 0xf)
846  * - make sure the 32-bit value at the first 16 byte aligned position of
847  *   AUXV is greater than 16 for glibc compatibility.
848  *   AT_IGNOREPPC is used for that.
849  * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
850  *   even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
851  */
852 #define DLINFO_ARCH_ITEMS       5
853 #define ARCH_DLINFO                                     \
854     do {                                                \
855         PowerPCCPU *cpu = POWERPC_CPU(thread_cpu);              \
856         /*                                              \
857          * Handle glibc compatibility: these magic entries must \
858          * be at the lowest addresses in the final auxv.        \
859          */                                             \
860         NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);        \
861         NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);        \
862         NEW_AUX_ENT(AT_DCACHEBSIZE, cpu->env.dcache_line_size); \
863         NEW_AUX_ENT(AT_ICACHEBSIZE, cpu->env.icache_line_size); \
864         NEW_AUX_ENT(AT_UCACHEBSIZE, 0);                 \
865     } while (0)
866 
867 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
868 {
869     _regs->gpr[1] = infop->start_stack;
870 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
871     if (get_ppc64_abi(infop) < 2) {
872         uint64_t val;
873         get_user_u64(val, infop->entry + 8);
874         _regs->gpr[2] = val + infop->load_bias;
875         get_user_u64(val, infop->entry);
876         infop->entry = val + infop->load_bias;
877     } else {
878         _regs->gpr[12] = infop->entry;  /* r12 set to global entry address */
879     }
880 #endif
881     _regs->nip = infop->entry;
882 }
883 
884 /* See linux kernel: arch/powerpc/include/asm/elf.h.  */
885 #define ELF_NREG 48
886 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
887 
888 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *env)
889 {
890     int i;
891     target_ulong ccr = 0;
892 
893     for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
894         (*regs)[i] = tswapreg(env->gpr[i]);
895     }
896 
897     (*regs)[32] = tswapreg(env->nip);
898     (*regs)[33] = tswapreg(env->msr);
899     (*regs)[35] = tswapreg(env->ctr);
900     (*regs)[36] = tswapreg(env->lr);
901     (*regs)[37] = tswapreg(env->xer);
902 
903     for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
904         ccr |= env->crf[i] << (32 - ((i + 1) * 4));
905     }
906     (*regs)[38] = tswapreg(ccr);
907 }
908 
909 #define USE_ELF_CORE_DUMP
910 #define ELF_EXEC_PAGESIZE       4096
911 
912 #endif
913 
914 #ifdef TARGET_MIPS
915 
916 #define ELF_START_MMAP 0x80000000
917 
918 #ifdef TARGET_MIPS64
919 #define ELF_CLASS   ELFCLASS64
920 #else
921 #define ELF_CLASS   ELFCLASS32
922 #endif
923 #define ELF_ARCH    EM_MIPS
924 
925 #define elf_check_arch(x) ((x) == EM_MIPS || (x) == EM_NANOMIPS)
926 
927 static inline void init_thread(struct target_pt_regs *regs,
928                                struct image_info *infop)
929 {
930     regs->cp0_status = 2 << CP0St_KSU;
931     regs->cp0_epc = infop->entry;
932     regs->regs[29] = infop->start_stack;
933 }
934 
935 /* See linux kernel: arch/mips/include/asm/elf.h.  */
936 #define ELF_NREG 45
937 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
938 
939 /* See linux kernel: arch/mips/include/asm/reg.h.  */
940 enum {
941 #ifdef TARGET_MIPS64
942     TARGET_EF_R0 = 0,
943 #else
944     TARGET_EF_R0 = 6,
945 #endif
946     TARGET_EF_R26 = TARGET_EF_R0 + 26,
947     TARGET_EF_R27 = TARGET_EF_R0 + 27,
948     TARGET_EF_LO = TARGET_EF_R0 + 32,
949     TARGET_EF_HI = TARGET_EF_R0 + 33,
950     TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34,
951     TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35,
952     TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36,
953     TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37
954 };
955 
956 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
957 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMIPSState *env)
958 {
959     int i;
960 
961     for (i = 0; i < TARGET_EF_R0; i++) {
962         (*regs)[i] = 0;
963     }
964     (*regs)[TARGET_EF_R0] = 0;
965 
966     for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) {
967         (*regs)[TARGET_EF_R0 + i] = tswapreg(env->active_tc.gpr[i]);
968     }
969 
970     (*regs)[TARGET_EF_R26] = 0;
971     (*regs)[TARGET_EF_R27] = 0;
972     (*regs)[TARGET_EF_LO] = tswapreg(env->active_tc.LO[0]);
973     (*regs)[TARGET_EF_HI] = tswapreg(env->active_tc.HI[0]);
974     (*regs)[TARGET_EF_CP0_EPC] = tswapreg(env->active_tc.PC);
975     (*regs)[TARGET_EF_CP0_BADVADDR] = tswapreg(env->CP0_BadVAddr);
976     (*regs)[TARGET_EF_CP0_STATUS] = tswapreg(env->CP0_Status);
977     (*regs)[TARGET_EF_CP0_CAUSE] = tswapreg(env->CP0_Cause);
978 }
979 
980 #define USE_ELF_CORE_DUMP
981 #define ELF_EXEC_PAGESIZE        4096
982 
983 /* See arch/mips/include/uapi/asm/hwcap.h.  */
984 enum {
985     HWCAP_MIPS_R6           = (1 << 0),
986     HWCAP_MIPS_MSA          = (1 << 1),
987 };
988 
989 #define ELF_HWCAP get_elf_hwcap()
990 
991 static uint32_t get_elf_hwcap(void)
992 {
993     MIPSCPU *cpu = MIPS_CPU(thread_cpu);
994     uint32_t hwcaps = 0;
995 
996 #define GET_FEATURE(flag, hwcap) \
997     do { if (cpu->env.insn_flags & (flag)) { hwcaps |= hwcap; } } while (0)
998 
999     GET_FEATURE(ISA_MIPS32R6 | ISA_MIPS64R6, HWCAP_MIPS_R6);
1000     GET_FEATURE(ASE_MSA, HWCAP_MIPS_MSA);
1001 
1002 #undef GET_FEATURE
1003 
1004     return hwcaps;
1005 }
1006 
1007 #endif /* TARGET_MIPS */
1008 
1009 #ifdef TARGET_MICROBLAZE
1010 
1011 #define ELF_START_MMAP 0x80000000
1012 
1013 #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD)
1014 
1015 #define ELF_CLASS   ELFCLASS32
1016 #define ELF_ARCH    EM_MICROBLAZE
1017 
1018 static inline void init_thread(struct target_pt_regs *regs,
1019                                struct image_info *infop)
1020 {
1021     regs->pc = infop->entry;
1022     regs->r1 = infop->start_stack;
1023 
1024 }
1025 
1026 #define ELF_EXEC_PAGESIZE        4096
1027 
1028 #define USE_ELF_CORE_DUMP
1029 #define ELF_NREG 38
1030 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1031 
1032 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
1033 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env)
1034 {
1035     int i, pos = 0;
1036 
1037     for (i = 0; i < 32; i++) {
1038         (*regs)[pos++] = tswapreg(env->regs[i]);
1039     }
1040 
1041     for (i = 0; i < 6; i++) {
1042         (*regs)[pos++] = tswapreg(env->sregs[i]);
1043     }
1044 }
1045 
1046 #endif /* TARGET_MICROBLAZE */
1047 
1048 #ifdef TARGET_NIOS2
1049 
1050 #define ELF_START_MMAP 0x80000000
1051 
1052 #define elf_check_arch(x) ((x) == EM_ALTERA_NIOS2)
1053 
1054 #define ELF_CLASS   ELFCLASS32
1055 #define ELF_ARCH    EM_ALTERA_NIOS2
1056 
1057 static void init_thread(struct target_pt_regs *regs, struct image_info *infop)
1058 {
1059     regs->ea = infop->entry;
1060     regs->sp = infop->start_stack;
1061     regs->estatus = 0x3;
1062 }
1063 
1064 #define ELF_EXEC_PAGESIZE        4096
1065 
1066 #define USE_ELF_CORE_DUMP
1067 #define ELF_NREG 49
1068 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1069 
1070 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
1071 static void elf_core_copy_regs(target_elf_gregset_t *regs,
1072                                const CPUNios2State *env)
1073 {
1074     int i;
1075 
1076     (*regs)[0] = -1;
1077     for (i = 1; i < 8; i++)    /* r0-r7 */
1078         (*regs)[i] = tswapreg(env->regs[i + 7]);
1079 
1080     for (i = 8; i < 16; i++)   /* r8-r15 */
1081         (*regs)[i] = tswapreg(env->regs[i - 8]);
1082 
1083     for (i = 16; i < 24; i++)  /* r16-r23 */
1084         (*regs)[i] = tswapreg(env->regs[i + 7]);
1085     (*regs)[24] = -1;    /* R_ET */
1086     (*regs)[25] = -1;    /* R_BT */
1087     (*regs)[26] = tswapreg(env->regs[R_GP]);
1088     (*regs)[27] = tswapreg(env->regs[R_SP]);
1089     (*regs)[28] = tswapreg(env->regs[R_FP]);
1090     (*regs)[29] = tswapreg(env->regs[R_EA]);
1091     (*regs)[30] = -1;    /* R_SSTATUS */
1092     (*regs)[31] = tswapreg(env->regs[R_RA]);
1093 
1094     (*regs)[32] = tswapreg(env->regs[R_PC]);
1095 
1096     (*regs)[33] = -1; /* R_STATUS */
1097     (*regs)[34] = tswapreg(env->regs[CR_ESTATUS]);
1098 
1099     for (i = 35; i < 49; i++)    /* ... */
1100         (*regs)[i] = -1;
1101 }
1102 
1103 #endif /* TARGET_NIOS2 */
1104 
1105 #ifdef TARGET_OPENRISC
1106 
1107 #define ELF_START_MMAP 0x08000000
1108 
1109 #define ELF_ARCH EM_OPENRISC
1110 #define ELF_CLASS ELFCLASS32
1111 #define ELF_DATA  ELFDATA2MSB
1112 
1113 static inline void init_thread(struct target_pt_regs *regs,
1114                                struct image_info *infop)
1115 {
1116     regs->pc = infop->entry;
1117     regs->gpr[1] = infop->start_stack;
1118 }
1119 
1120 #define USE_ELF_CORE_DUMP
1121 #define ELF_EXEC_PAGESIZE 8192
1122 
1123 /* See linux kernel arch/openrisc/include/asm/elf.h.  */
1124 #define ELF_NREG 34 /* gprs and pc, sr */
1125 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1126 
1127 static void elf_core_copy_regs(target_elf_gregset_t *regs,
1128                                const CPUOpenRISCState *env)
1129 {
1130     int i;
1131 
1132     for (i = 0; i < 32; i++) {
1133         (*regs)[i] = tswapreg(cpu_get_gpr(env, i));
1134     }
1135     (*regs)[32] = tswapreg(env->pc);
1136     (*regs)[33] = tswapreg(cpu_get_sr(env));
1137 }
1138 #define ELF_HWCAP 0
1139 #define ELF_PLATFORM NULL
1140 
1141 #endif /* TARGET_OPENRISC */
1142 
1143 #ifdef TARGET_SH4
1144 
1145 #define ELF_START_MMAP 0x80000000
1146 
1147 #define ELF_CLASS ELFCLASS32
1148 #define ELF_ARCH  EM_SH
1149 
1150 static inline void init_thread(struct target_pt_regs *regs,
1151                                struct image_info *infop)
1152 {
1153     /* Check other registers XXXXX */
1154     regs->pc = infop->entry;
1155     regs->regs[15] = infop->start_stack;
1156 }
1157 
1158 /* See linux kernel: arch/sh/include/asm/elf.h.  */
1159 #define ELF_NREG 23
1160 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1161 
1162 /* See linux kernel: arch/sh/include/asm/ptrace.h.  */
1163 enum {
1164     TARGET_REG_PC = 16,
1165     TARGET_REG_PR = 17,
1166     TARGET_REG_SR = 18,
1167     TARGET_REG_GBR = 19,
1168     TARGET_REG_MACH = 20,
1169     TARGET_REG_MACL = 21,
1170     TARGET_REG_SYSCALL = 22
1171 };
1172 
1173 static inline void elf_core_copy_regs(target_elf_gregset_t *regs,
1174                                       const CPUSH4State *env)
1175 {
1176     int i;
1177 
1178     for (i = 0; i < 16; i++) {
1179         (*regs)[i] = tswapreg(env->gregs[i]);
1180     }
1181 
1182     (*regs)[TARGET_REG_PC] = tswapreg(env->pc);
1183     (*regs)[TARGET_REG_PR] = tswapreg(env->pr);
1184     (*regs)[TARGET_REG_SR] = tswapreg(env->sr);
1185     (*regs)[TARGET_REG_GBR] = tswapreg(env->gbr);
1186     (*regs)[TARGET_REG_MACH] = tswapreg(env->mach);
1187     (*regs)[TARGET_REG_MACL] = tswapreg(env->macl);
1188     (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */
1189 }
1190 
1191 #define USE_ELF_CORE_DUMP
1192 #define ELF_EXEC_PAGESIZE        4096
1193 
1194 enum {
1195     SH_CPU_HAS_FPU            = 0x0001, /* Hardware FPU support */
1196     SH_CPU_HAS_P2_FLUSH_BUG   = 0x0002, /* Need to flush the cache in P2 area */
1197     SH_CPU_HAS_MMU_PAGE_ASSOC = 0x0004, /* SH3: TLB way selection bit support */
1198     SH_CPU_HAS_DSP            = 0x0008, /* SH-DSP: DSP support */
1199     SH_CPU_HAS_PERF_COUNTER   = 0x0010, /* Hardware performance counters */
1200     SH_CPU_HAS_PTEA           = 0x0020, /* PTEA register */
1201     SH_CPU_HAS_LLSC           = 0x0040, /* movli.l/movco.l */
1202     SH_CPU_HAS_L2_CACHE       = 0x0080, /* Secondary cache / URAM */
1203     SH_CPU_HAS_OP32           = 0x0100, /* 32-bit instruction support */
1204     SH_CPU_HAS_PTEAEX         = 0x0200, /* PTE ASID Extension support */
1205 };
1206 
1207 #define ELF_HWCAP get_elf_hwcap()
1208 
1209 static uint32_t get_elf_hwcap(void)
1210 {
1211     SuperHCPU *cpu = SUPERH_CPU(thread_cpu);
1212     uint32_t hwcap = 0;
1213 
1214     hwcap |= SH_CPU_HAS_FPU;
1215 
1216     if (cpu->env.features & SH_FEATURE_SH4A) {
1217         hwcap |= SH_CPU_HAS_LLSC;
1218     }
1219 
1220     return hwcap;
1221 }
1222 
1223 #endif
1224 
1225 #ifdef TARGET_CRIS
1226 
1227 #define ELF_START_MMAP 0x80000000
1228 
1229 #define ELF_CLASS ELFCLASS32
1230 #define ELF_ARCH  EM_CRIS
1231 
1232 static inline void init_thread(struct target_pt_regs *regs,
1233                                struct image_info *infop)
1234 {
1235     regs->erp = infop->entry;
1236 }
1237 
1238 #define ELF_EXEC_PAGESIZE        8192
1239 
1240 #endif
1241 
1242 #ifdef TARGET_M68K
1243 
1244 #define ELF_START_MMAP 0x80000000
1245 
1246 #define ELF_CLASS       ELFCLASS32
1247 #define ELF_ARCH        EM_68K
1248 
1249 /* ??? Does this need to do anything?
1250    #define ELF_PLAT_INIT(_r) */
1251 
1252 static inline void init_thread(struct target_pt_regs *regs,
1253                                struct image_info *infop)
1254 {
1255     regs->usp = infop->start_stack;
1256     regs->sr = 0;
1257     regs->pc = infop->entry;
1258 }
1259 
1260 /* See linux kernel: arch/m68k/include/asm/elf.h.  */
1261 #define ELF_NREG 20
1262 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1263 
1264 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUM68KState *env)
1265 {
1266     (*regs)[0] = tswapreg(env->dregs[1]);
1267     (*regs)[1] = tswapreg(env->dregs[2]);
1268     (*regs)[2] = tswapreg(env->dregs[3]);
1269     (*regs)[3] = tswapreg(env->dregs[4]);
1270     (*regs)[4] = tswapreg(env->dregs[5]);
1271     (*regs)[5] = tswapreg(env->dregs[6]);
1272     (*regs)[6] = tswapreg(env->dregs[7]);
1273     (*regs)[7] = tswapreg(env->aregs[0]);
1274     (*regs)[8] = tswapreg(env->aregs[1]);
1275     (*regs)[9] = tswapreg(env->aregs[2]);
1276     (*regs)[10] = tswapreg(env->aregs[3]);
1277     (*regs)[11] = tswapreg(env->aregs[4]);
1278     (*regs)[12] = tswapreg(env->aregs[5]);
1279     (*regs)[13] = tswapreg(env->aregs[6]);
1280     (*regs)[14] = tswapreg(env->dregs[0]);
1281     (*regs)[15] = tswapreg(env->aregs[7]);
1282     (*regs)[16] = tswapreg(env->dregs[0]); /* FIXME: orig_d0 */
1283     (*regs)[17] = tswapreg(env->sr);
1284     (*regs)[18] = tswapreg(env->pc);
1285     (*regs)[19] = 0;  /* FIXME: regs->format | regs->vector */
1286 }
1287 
1288 #define USE_ELF_CORE_DUMP
1289 #define ELF_EXEC_PAGESIZE       8192
1290 
1291 #endif
1292 
1293 #ifdef TARGET_ALPHA
1294 
1295 #define ELF_START_MMAP (0x30000000000ULL)
1296 
1297 #define ELF_CLASS      ELFCLASS64
1298 #define ELF_ARCH       EM_ALPHA
1299 
1300 static inline void init_thread(struct target_pt_regs *regs,
1301                                struct image_info *infop)
1302 {
1303     regs->pc = infop->entry;
1304     regs->ps = 8;
1305     regs->usp = infop->start_stack;
1306 }
1307 
1308 #define ELF_EXEC_PAGESIZE        8192
1309 
1310 #endif /* TARGET_ALPHA */
1311 
1312 #ifdef TARGET_S390X
1313 
1314 #define ELF_START_MMAP (0x20000000000ULL)
1315 
1316 #define ELF_CLASS	ELFCLASS64
1317 #define ELF_DATA	ELFDATA2MSB
1318 #define ELF_ARCH	EM_S390
1319 
1320 #include "elf.h"
1321 
1322 #define ELF_HWCAP get_elf_hwcap()
1323 
1324 #define GET_FEATURE(_feat, _hwcap) \
1325     do { if (s390_has_feat(_feat)) { hwcap |= _hwcap; } } while (0)
1326 
1327 static uint32_t get_elf_hwcap(void)
1328 {
1329     /*
1330      * Let's assume we always have esan3 and zarch.
1331      * 31-bit processes can use 64-bit registers (high gprs).
1332      */
1333     uint32_t hwcap = HWCAP_S390_ESAN3 | HWCAP_S390_ZARCH | HWCAP_S390_HIGH_GPRS;
1334 
1335     GET_FEATURE(S390_FEAT_STFLE, HWCAP_S390_STFLE);
1336     GET_FEATURE(S390_FEAT_MSA, HWCAP_S390_MSA);
1337     GET_FEATURE(S390_FEAT_LONG_DISPLACEMENT, HWCAP_S390_LDISP);
1338     GET_FEATURE(S390_FEAT_EXTENDED_IMMEDIATE, HWCAP_S390_EIMM);
1339     if (s390_has_feat(S390_FEAT_EXTENDED_TRANSLATION_3) &&
1340         s390_has_feat(S390_FEAT_ETF3_ENH)) {
1341         hwcap |= HWCAP_S390_ETF3EH;
1342     }
1343     GET_FEATURE(S390_FEAT_VECTOR, HWCAP_S390_VXRS);
1344 
1345     return hwcap;
1346 }
1347 
1348 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
1349 {
1350     regs->psw.addr = infop->entry;
1351     regs->psw.mask = PSW_MASK_64 | PSW_MASK_32;
1352     regs->gprs[15] = infop->start_stack;
1353 }
1354 
1355 #endif /* TARGET_S390X */
1356 
1357 #ifdef TARGET_TILEGX
1358 
1359 /* 42 bits real used address, a half for user mode */
1360 #define ELF_START_MMAP (0x00000020000000000ULL)
1361 
1362 #define elf_check_arch(x) ((x) == EM_TILEGX)
1363 
1364 #define ELF_CLASS   ELFCLASS64
1365 #define ELF_DATA    ELFDATA2LSB
1366 #define ELF_ARCH    EM_TILEGX
1367 
1368 static inline void init_thread(struct target_pt_regs *regs,
1369                                struct image_info *infop)
1370 {
1371     regs->pc = infop->entry;
1372     regs->sp = infop->start_stack;
1373 
1374 }
1375 
1376 #define ELF_EXEC_PAGESIZE        65536 /* TILE-Gx page size is 64KB */
1377 
1378 #endif /* TARGET_TILEGX */
1379 
1380 #ifdef TARGET_RISCV
1381 
1382 #define ELF_START_MMAP 0x80000000
1383 #define ELF_ARCH  EM_RISCV
1384 
1385 #ifdef TARGET_RISCV32
1386 #define ELF_CLASS ELFCLASS32
1387 #else
1388 #define ELF_CLASS ELFCLASS64
1389 #endif
1390 
1391 static inline void init_thread(struct target_pt_regs *regs,
1392                                struct image_info *infop)
1393 {
1394     regs->sepc = infop->entry;
1395     regs->sp = infop->start_stack;
1396 }
1397 
1398 #define ELF_EXEC_PAGESIZE 4096
1399 
1400 #endif /* TARGET_RISCV */
1401 
1402 #ifdef TARGET_HPPA
1403 
1404 #define ELF_START_MMAP  0x80000000
1405 #define ELF_CLASS       ELFCLASS32
1406 #define ELF_ARCH        EM_PARISC
1407 #define ELF_PLATFORM    "PARISC"
1408 #define STACK_GROWS_DOWN 0
1409 #define STACK_ALIGNMENT  64
1410 
1411 static inline void init_thread(struct target_pt_regs *regs,
1412                                struct image_info *infop)
1413 {
1414     regs->iaoq[0] = infop->entry;
1415     regs->iaoq[1] = infop->entry + 4;
1416     regs->gr[23] = 0;
1417     regs->gr[24] = infop->arg_start;
1418     regs->gr[25] = (infop->arg_end - infop->arg_start) / sizeof(abi_ulong);
1419     /* The top-of-stack contains a linkage buffer.  */
1420     regs->gr[30] = infop->start_stack + 64;
1421     regs->gr[31] = infop->entry;
1422 }
1423 
1424 #endif /* TARGET_HPPA */
1425 
1426 #ifdef TARGET_XTENSA
1427 
1428 #define ELF_START_MMAP 0x20000000
1429 
1430 #define ELF_CLASS       ELFCLASS32
1431 #define ELF_ARCH        EM_XTENSA
1432 
1433 static inline void init_thread(struct target_pt_regs *regs,
1434                                struct image_info *infop)
1435 {
1436     regs->windowbase = 0;
1437     regs->windowstart = 1;
1438     regs->areg[1] = infop->start_stack;
1439     regs->pc = infop->entry;
1440 }
1441 
1442 /* See linux kernel: arch/xtensa/include/asm/elf.h.  */
1443 #define ELF_NREG 128
1444 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1445 
1446 enum {
1447     TARGET_REG_PC,
1448     TARGET_REG_PS,
1449     TARGET_REG_LBEG,
1450     TARGET_REG_LEND,
1451     TARGET_REG_LCOUNT,
1452     TARGET_REG_SAR,
1453     TARGET_REG_WINDOWSTART,
1454     TARGET_REG_WINDOWBASE,
1455     TARGET_REG_THREADPTR,
1456     TARGET_REG_AR0 = 64,
1457 };
1458 
1459 static void elf_core_copy_regs(target_elf_gregset_t *regs,
1460                                const CPUXtensaState *env)
1461 {
1462     unsigned i;
1463 
1464     (*regs)[TARGET_REG_PC] = tswapreg(env->pc);
1465     (*regs)[TARGET_REG_PS] = tswapreg(env->sregs[PS] & ~PS_EXCM);
1466     (*regs)[TARGET_REG_LBEG] = tswapreg(env->sregs[LBEG]);
1467     (*regs)[TARGET_REG_LEND] = tswapreg(env->sregs[LEND]);
1468     (*regs)[TARGET_REG_LCOUNT] = tswapreg(env->sregs[LCOUNT]);
1469     (*regs)[TARGET_REG_SAR] = tswapreg(env->sregs[SAR]);
1470     (*regs)[TARGET_REG_WINDOWSTART] = tswapreg(env->sregs[WINDOW_START]);
1471     (*regs)[TARGET_REG_WINDOWBASE] = tswapreg(env->sregs[WINDOW_BASE]);
1472     (*regs)[TARGET_REG_THREADPTR] = tswapreg(env->uregs[THREADPTR]);
1473     xtensa_sync_phys_from_window((CPUXtensaState *)env);
1474     for (i = 0; i < env->config->nareg; ++i) {
1475         (*regs)[TARGET_REG_AR0 + i] = tswapreg(env->phys_regs[i]);
1476     }
1477 }
1478 
1479 #define USE_ELF_CORE_DUMP
1480 #define ELF_EXEC_PAGESIZE       4096
1481 
1482 #endif /* TARGET_XTENSA */
1483 
1484 #ifndef ELF_PLATFORM
1485 #define ELF_PLATFORM (NULL)
1486 #endif
1487 
1488 #ifndef ELF_MACHINE
1489 #define ELF_MACHINE ELF_ARCH
1490 #endif
1491 
1492 #ifndef elf_check_arch
1493 #define elf_check_arch(x) ((x) == ELF_ARCH)
1494 #endif
1495 
1496 #ifndef ELF_HWCAP
1497 #define ELF_HWCAP 0
1498 #endif
1499 
1500 #ifndef STACK_GROWS_DOWN
1501 #define STACK_GROWS_DOWN 1
1502 #endif
1503 
1504 #ifndef STACK_ALIGNMENT
1505 #define STACK_ALIGNMENT 16
1506 #endif
1507 
1508 #ifdef TARGET_ABI32
1509 #undef ELF_CLASS
1510 #define ELF_CLASS ELFCLASS32
1511 #undef bswaptls
1512 #define bswaptls(ptr) bswap32s(ptr)
1513 #endif
1514 
1515 #include "elf.h"
1516 
1517 struct exec
1518 {
1519     unsigned int a_info;   /* Use macros N_MAGIC, etc for access */
1520     unsigned int a_text;   /* length of text, in bytes */
1521     unsigned int a_data;   /* length of data, in bytes */
1522     unsigned int a_bss;    /* length of uninitialized data area, in bytes */
1523     unsigned int a_syms;   /* length of symbol table data in file, in bytes */
1524     unsigned int a_entry;  /* start address */
1525     unsigned int a_trsize; /* length of relocation info for text, in bytes */
1526     unsigned int a_drsize; /* length of relocation info for data, in bytes */
1527 };
1528 
1529 
1530 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
1531 #define OMAGIC 0407
1532 #define NMAGIC 0410
1533 #define ZMAGIC 0413
1534 #define QMAGIC 0314
1535 
1536 /* Necessary parameters */
1537 #define TARGET_ELF_EXEC_PAGESIZE \
1538         (((eppnt->p_align & ~qemu_host_page_mask) != 0) ? \
1539          TARGET_PAGE_SIZE : MAX(qemu_host_page_size, TARGET_PAGE_SIZE))
1540 #define TARGET_ELF_PAGELENGTH(_v) ROUND_UP((_v), TARGET_ELF_EXEC_PAGESIZE)
1541 #define TARGET_ELF_PAGESTART(_v) ((_v) & \
1542                                  ~(abi_ulong)(TARGET_ELF_EXEC_PAGESIZE-1))
1543 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
1544 
1545 #define DLINFO_ITEMS 15
1546 
1547 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
1548 {
1549     memcpy(to, from, n);
1550 }
1551 
1552 #ifdef BSWAP_NEEDED
1553 static void bswap_ehdr(struct elfhdr *ehdr)
1554 {
1555     bswap16s(&ehdr->e_type);            /* Object file type */
1556     bswap16s(&ehdr->e_machine);         /* Architecture */
1557     bswap32s(&ehdr->e_version);         /* Object file version */
1558     bswaptls(&ehdr->e_entry);           /* Entry point virtual address */
1559     bswaptls(&ehdr->e_phoff);           /* Program header table file offset */
1560     bswaptls(&ehdr->e_shoff);           /* Section header table file offset */
1561     bswap32s(&ehdr->e_flags);           /* Processor-specific flags */
1562     bswap16s(&ehdr->e_ehsize);          /* ELF header size in bytes */
1563     bswap16s(&ehdr->e_phentsize);       /* Program header table entry size */
1564     bswap16s(&ehdr->e_phnum);           /* Program header table entry count */
1565     bswap16s(&ehdr->e_shentsize);       /* Section header table entry size */
1566     bswap16s(&ehdr->e_shnum);           /* Section header table entry count */
1567     bswap16s(&ehdr->e_shstrndx);        /* Section header string table index */
1568 }
1569 
1570 static void bswap_phdr(struct elf_phdr *phdr, int phnum)
1571 {
1572     int i;
1573     for (i = 0; i < phnum; ++i, ++phdr) {
1574         bswap32s(&phdr->p_type);        /* Segment type */
1575         bswap32s(&phdr->p_flags);       /* Segment flags */
1576         bswaptls(&phdr->p_offset);      /* Segment file offset */
1577         bswaptls(&phdr->p_vaddr);       /* Segment virtual address */
1578         bswaptls(&phdr->p_paddr);       /* Segment physical address */
1579         bswaptls(&phdr->p_filesz);      /* Segment size in file */
1580         bswaptls(&phdr->p_memsz);       /* Segment size in memory */
1581         bswaptls(&phdr->p_align);       /* Segment alignment */
1582     }
1583 }
1584 
1585 static void bswap_shdr(struct elf_shdr *shdr, int shnum)
1586 {
1587     int i;
1588     for (i = 0; i < shnum; ++i, ++shdr) {
1589         bswap32s(&shdr->sh_name);
1590         bswap32s(&shdr->sh_type);
1591         bswaptls(&shdr->sh_flags);
1592         bswaptls(&shdr->sh_addr);
1593         bswaptls(&shdr->sh_offset);
1594         bswaptls(&shdr->sh_size);
1595         bswap32s(&shdr->sh_link);
1596         bswap32s(&shdr->sh_info);
1597         bswaptls(&shdr->sh_addralign);
1598         bswaptls(&shdr->sh_entsize);
1599     }
1600 }
1601 
1602 static void bswap_sym(struct elf_sym *sym)
1603 {
1604     bswap32s(&sym->st_name);
1605     bswaptls(&sym->st_value);
1606     bswaptls(&sym->st_size);
1607     bswap16s(&sym->st_shndx);
1608 }
1609 
1610 #ifdef TARGET_MIPS
1611 static void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags)
1612 {
1613     bswap16s(&abiflags->version);
1614     bswap32s(&abiflags->ases);
1615     bswap32s(&abiflags->isa_ext);
1616     bswap32s(&abiflags->flags1);
1617     bswap32s(&abiflags->flags2);
1618 }
1619 #endif
1620 #else
1621 static inline void bswap_ehdr(struct elfhdr *ehdr) { }
1622 static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { }
1623 static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { }
1624 static inline void bswap_sym(struct elf_sym *sym) { }
1625 #ifdef TARGET_MIPS
1626 static inline void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) { }
1627 #endif
1628 #endif
1629 
1630 #ifdef USE_ELF_CORE_DUMP
1631 static int elf_core_dump(int, const CPUArchState *);
1632 #endif /* USE_ELF_CORE_DUMP */
1633 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias);
1634 
1635 /* Verify the portions of EHDR within E_IDENT for the target.
1636    This can be performed before bswapping the entire header.  */
1637 static bool elf_check_ident(struct elfhdr *ehdr)
1638 {
1639     return (ehdr->e_ident[EI_MAG0] == ELFMAG0
1640             && ehdr->e_ident[EI_MAG1] == ELFMAG1
1641             && ehdr->e_ident[EI_MAG2] == ELFMAG2
1642             && ehdr->e_ident[EI_MAG3] == ELFMAG3
1643             && ehdr->e_ident[EI_CLASS] == ELF_CLASS
1644             && ehdr->e_ident[EI_DATA] == ELF_DATA
1645             && ehdr->e_ident[EI_VERSION] == EV_CURRENT);
1646 }
1647 
1648 /* Verify the portions of EHDR outside of E_IDENT for the target.
1649    This has to wait until after bswapping the header.  */
1650 static bool elf_check_ehdr(struct elfhdr *ehdr)
1651 {
1652     return (elf_check_arch(ehdr->e_machine)
1653             && ehdr->e_ehsize == sizeof(struct elfhdr)
1654             && ehdr->e_phentsize == sizeof(struct elf_phdr)
1655             && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN));
1656 }
1657 
1658 /*
1659  * 'copy_elf_strings()' copies argument/envelope strings from user
1660  * memory to free pages in kernel mem. These are in a format ready
1661  * to be put directly into the top of new user memory.
1662  *
1663  */
1664 static abi_ulong copy_elf_strings(int argc, char **argv, char *scratch,
1665                                   abi_ulong p, abi_ulong stack_limit)
1666 {
1667     char *tmp;
1668     int len, i;
1669     abi_ulong top = p;
1670 
1671     if (!p) {
1672         return 0;       /* bullet-proofing */
1673     }
1674 
1675     if (STACK_GROWS_DOWN) {
1676         int offset = ((p - 1) % TARGET_PAGE_SIZE) + 1;
1677         for (i = argc - 1; i >= 0; --i) {
1678             tmp = argv[i];
1679             if (!tmp) {
1680                 fprintf(stderr, "VFS: argc is wrong");
1681                 exit(-1);
1682             }
1683             len = strlen(tmp) + 1;
1684             tmp += len;
1685 
1686             if (len > (p - stack_limit)) {
1687                 return 0;
1688             }
1689             while (len) {
1690                 int bytes_to_copy = (len > offset) ? offset : len;
1691                 tmp -= bytes_to_copy;
1692                 p -= bytes_to_copy;
1693                 offset -= bytes_to_copy;
1694                 len -= bytes_to_copy;
1695 
1696                 memcpy_fromfs(scratch + offset, tmp, bytes_to_copy);
1697 
1698                 if (offset == 0) {
1699                     memcpy_to_target(p, scratch, top - p);
1700                     top = p;
1701                     offset = TARGET_PAGE_SIZE;
1702                 }
1703             }
1704         }
1705         if (p != top) {
1706             memcpy_to_target(p, scratch + offset, top - p);
1707         }
1708     } else {
1709         int remaining = TARGET_PAGE_SIZE - (p % TARGET_PAGE_SIZE);
1710         for (i = 0; i < argc; ++i) {
1711             tmp = argv[i];
1712             if (!tmp) {
1713                 fprintf(stderr, "VFS: argc is wrong");
1714                 exit(-1);
1715             }
1716             len = strlen(tmp) + 1;
1717             if (len > (stack_limit - p)) {
1718                 return 0;
1719             }
1720             while (len) {
1721                 int bytes_to_copy = (len > remaining) ? remaining : len;
1722 
1723                 memcpy_fromfs(scratch + (p - top), tmp, bytes_to_copy);
1724 
1725                 tmp += bytes_to_copy;
1726                 remaining -= bytes_to_copy;
1727                 p += bytes_to_copy;
1728                 len -= bytes_to_copy;
1729 
1730                 if (remaining == 0) {
1731                     memcpy_to_target(top, scratch, p - top);
1732                     top = p;
1733                     remaining = TARGET_PAGE_SIZE;
1734                 }
1735             }
1736         }
1737         if (p != top) {
1738             memcpy_to_target(top, scratch, p - top);
1739         }
1740     }
1741 
1742     return p;
1743 }
1744 
1745 /* Older linux kernels provide up to MAX_ARG_PAGES (default: 32) of
1746  * argument/environment space. Newer kernels (>2.6.33) allow more,
1747  * dependent on stack size, but guarantee at least 32 pages for
1748  * backwards compatibility.
1749  */
1750 #define STACK_LOWER_LIMIT (32 * TARGET_PAGE_SIZE)
1751 
1752 static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
1753                                  struct image_info *info)
1754 {
1755     abi_ulong size, error, guard;
1756 
1757     size = guest_stack_size;
1758     if (size < STACK_LOWER_LIMIT) {
1759         size = STACK_LOWER_LIMIT;
1760     }
1761     guard = TARGET_PAGE_SIZE;
1762     if (guard < qemu_real_host_page_size) {
1763         guard = qemu_real_host_page_size;
1764     }
1765 
1766     error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE,
1767                         MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1768     if (error == -1) {
1769         perror("mmap stack");
1770         exit(-1);
1771     }
1772 
1773     /* We reserve one extra page at the top of the stack as guard.  */
1774     if (STACK_GROWS_DOWN) {
1775         target_mprotect(error, guard, PROT_NONE);
1776         info->stack_limit = error + guard;
1777         return info->stack_limit + size - sizeof(void *);
1778     } else {
1779         target_mprotect(error + size, guard, PROT_NONE);
1780         info->stack_limit = error + size;
1781         return error;
1782     }
1783 }
1784 
1785 /* Map and zero the bss.  We need to explicitly zero any fractional pages
1786    after the data section (i.e. bss).  */
1787 static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
1788 {
1789     uintptr_t host_start, host_map_start, host_end;
1790 
1791     last_bss = TARGET_PAGE_ALIGN(last_bss);
1792 
1793     /* ??? There is confusion between qemu_real_host_page_size and
1794        qemu_host_page_size here and elsewhere in target_mmap, which
1795        may lead to the end of the data section mapping from the file
1796        not being mapped.  At least there was an explicit test and
1797        comment for that here, suggesting that "the file size must
1798        be known".  The comment probably pre-dates the introduction
1799        of the fstat system call in target_mmap which does in fact
1800        find out the size.  What isn't clear is if the workaround
1801        here is still actually needed.  For now, continue with it,
1802        but merge it with the "normal" mmap that would allocate the bss.  */
1803 
1804     host_start = (uintptr_t) g2h(elf_bss);
1805     host_end = (uintptr_t) g2h(last_bss);
1806     host_map_start = REAL_HOST_PAGE_ALIGN(host_start);
1807 
1808     if (host_map_start < host_end) {
1809         void *p = mmap((void *)host_map_start, host_end - host_map_start,
1810                        prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1811         if (p == MAP_FAILED) {
1812             perror("cannot mmap brk");
1813             exit(-1);
1814         }
1815     }
1816 
1817     /* Ensure that the bss page(s) are valid */
1818     if ((page_get_flags(last_bss-1) & prot) != prot) {
1819         page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot | PAGE_VALID);
1820     }
1821 
1822     if (host_start < host_map_start) {
1823         memset((void *)host_start, 0, host_map_start - host_start);
1824     }
1825 }
1826 
1827 #ifdef TARGET_ARM
1828 static int elf_is_fdpic(struct elfhdr *exec)
1829 {
1830     return exec->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC;
1831 }
1832 #else
1833 /* Default implementation, always false.  */
1834 static int elf_is_fdpic(struct elfhdr *exec)
1835 {
1836     return 0;
1837 }
1838 #endif
1839 
1840 static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp)
1841 {
1842     uint16_t n;
1843     struct elf32_fdpic_loadseg *loadsegs = info->loadsegs;
1844 
1845     /* elf32_fdpic_loadseg */
1846     n = info->nsegs;
1847     while (n--) {
1848         sp -= 12;
1849         put_user_u32(loadsegs[n].addr, sp+0);
1850         put_user_u32(loadsegs[n].p_vaddr, sp+4);
1851         put_user_u32(loadsegs[n].p_memsz, sp+8);
1852     }
1853 
1854     /* elf32_fdpic_loadmap */
1855     sp -= 4;
1856     put_user_u16(0, sp+0); /* version */
1857     put_user_u16(info->nsegs, sp+2); /* nsegs */
1858 
1859     info->personality = PER_LINUX_FDPIC;
1860     info->loadmap_addr = sp;
1861 
1862     return sp;
1863 }
1864 
1865 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
1866                                    struct elfhdr *exec,
1867                                    struct image_info *info,
1868                                    struct image_info *interp_info)
1869 {
1870     abi_ulong sp;
1871     abi_ulong u_argc, u_argv, u_envp, u_auxv;
1872     int size;
1873     int i;
1874     abi_ulong u_rand_bytes;
1875     uint8_t k_rand_bytes[16];
1876     abi_ulong u_platform;
1877     const char *k_platform;
1878     const int n = sizeof(elf_addr_t);
1879 
1880     sp = p;
1881 
1882     /* Needs to be before we load the env/argc/... */
1883     if (elf_is_fdpic(exec)) {
1884         /* Need 4 byte alignment for these structs */
1885         sp &= ~3;
1886         sp = loader_build_fdpic_loadmap(info, sp);
1887         info->other_info = interp_info;
1888         if (interp_info) {
1889             interp_info->other_info = info;
1890             sp = loader_build_fdpic_loadmap(interp_info, sp);
1891             info->interpreter_loadmap_addr = interp_info->loadmap_addr;
1892             info->interpreter_pt_dynamic_addr = interp_info->pt_dynamic_addr;
1893         } else {
1894             info->interpreter_loadmap_addr = 0;
1895             info->interpreter_pt_dynamic_addr = 0;
1896         }
1897     }
1898 
1899     u_platform = 0;
1900     k_platform = ELF_PLATFORM;
1901     if (k_platform) {
1902         size_t len = strlen(k_platform) + 1;
1903         if (STACK_GROWS_DOWN) {
1904             sp -= (len + n - 1) & ~(n - 1);
1905             u_platform = sp;
1906             /* FIXME - check return value of memcpy_to_target() for failure */
1907             memcpy_to_target(sp, k_platform, len);
1908         } else {
1909             memcpy_to_target(sp, k_platform, len);
1910             u_platform = sp;
1911             sp += len + 1;
1912         }
1913     }
1914 
1915     /* Provide 16 byte alignment for the PRNG, and basic alignment for
1916      * the argv and envp pointers.
1917      */
1918     if (STACK_GROWS_DOWN) {
1919         sp = QEMU_ALIGN_DOWN(sp, 16);
1920     } else {
1921         sp = QEMU_ALIGN_UP(sp, 16);
1922     }
1923 
1924     /*
1925      * Generate 16 random bytes for userspace PRNG seeding.
1926      */
1927     qemu_guest_getrandom_nofail(k_rand_bytes, sizeof(k_rand_bytes));
1928     if (STACK_GROWS_DOWN) {
1929         sp -= 16;
1930         u_rand_bytes = sp;
1931         /* FIXME - check return value of memcpy_to_target() for failure */
1932         memcpy_to_target(sp, k_rand_bytes, 16);
1933     } else {
1934         memcpy_to_target(sp, k_rand_bytes, 16);
1935         u_rand_bytes = sp;
1936         sp += 16;
1937     }
1938 
1939     size = (DLINFO_ITEMS + 1) * 2;
1940     if (k_platform)
1941         size += 2;
1942 #ifdef DLINFO_ARCH_ITEMS
1943     size += DLINFO_ARCH_ITEMS * 2;
1944 #endif
1945 #ifdef ELF_HWCAP2
1946     size += 2;
1947 #endif
1948     info->auxv_len = size * n;
1949 
1950     size += envc + argc + 2;
1951     size += 1;  /* argc itself */
1952     size *= n;
1953 
1954     /* Allocate space and finalize stack alignment for entry now.  */
1955     if (STACK_GROWS_DOWN) {
1956         u_argc = QEMU_ALIGN_DOWN(sp - size, STACK_ALIGNMENT);
1957         sp = u_argc;
1958     } else {
1959         u_argc = sp;
1960         sp = QEMU_ALIGN_UP(sp + size, STACK_ALIGNMENT);
1961     }
1962 
1963     u_argv = u_argc + n;
1964     u_envp = u_argv + (argc + 1) * n;
1965     u_auxv = u_envp + (envc + 1) * n;
1966     info->saved_auxv = u_auxv;
1967     info->arg_start = u_argv;
1968     info->arg_end = u_argv + argc * n;
1969 
1970     /* This is correct because Linux defines
1971      * elf_addr_t as Elf32_Off / Elf64_Off
1972      */
1973 #define NEW_AUX_ENT(id, val) do {               \
1974         put_user_ual(id, u_auxv);  u_auxv += n; \
1975         put_user_ual(val, u_auxv); u_auxv += n; \
1976     } while(0)
1977 
1978 #ifdef ARCH_DLINFO
1979     /*
1980      * ARCH_DLINFO must come first so platform specific code can enforce
1981      * special alignment requirements on the AUXV if necessary (eg. PPC).
1982      */
1983     ARCH_DLINFO;
1984 #endif
1985     /* There must be exactly DLINFO_ITEMS entries here, or the assert
1986      * on info->auxv_len will trigger.
1987      */
1988     NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff));
1989     NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
1990     NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
1991     if ((info->alignment & ~qemu_host_page_mask) != 0) {
1992         /* Target doesn't support host page size alignment */
1993         NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
1994     } else {
1995         NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(MAX(TARGET_PAGE_SIZE,
1996                                                qemu_host_page_size)));
1997     }
1998     NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0));
1999     NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
2000     NEW_AUX_ENT(AT_ENTRY, info->entry);
2001     NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
2002     NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
2003     NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
2004     NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
2005     NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
2006     NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
2007     NEW_AUX_ENT(AT_RANDOM, (abi_ulong) u_rand_bytes);
2008     NEW_AUX_ENT(AT_SECURE, (abi_ulong) qemu_getauxval(AT_SECURE));
2009 
2010 #ifdef ELF_HWCAP2
2011     NEW_AUX_ENT(AT_HWCAP2, (abi_ulong) ELF_HWCAP2);
2012 #endif
2013 
2014     if (u_platform) {
2015         NEW_AUX_ENT(AT_PLATFORM, u_platform);
2016     }
2017     NEW_AUX_ENT (AT_NULL, 0);
2018 #undef NEW_AUX_ENT
2019 
2020     /* Check that our initial calculation of the auxv length matches how much
2021      * we actually put into it.
2022      */
2023     assert(info->auxv_len == u_auxv - info->saved_auxv);
2024 
2025     put_user_ual(argc, u_argc);
2026 
2027     p = info->arg_strings;
2028     for (i = 0; i < argc; ++i) {
2029         put_user_ual(p, u_argv);
2030         u_argv += n;
2031         p += target_strlen(p) + 1;
2032     }
2033     put_user_ual(0, u_argv);
2034 
2035     p = info->env_strings;
2036     for (i = 0; i < envc; ++i) {
2037         put_user_ual(p, u_envp);
2038         u_envp += n;
2039         p += target_strlen(p) + 1;
2040     }
2041     put_user_ual(0, u_envp);
2042 
2043     return sp;
2044 }
2045 
2046 unsigned long init_guest_space(unsigned long host_start,
2047                                unsigned long host_size,
2048                                unsigned long guest_start,
2049                                bool fixed)
2050 {
2051     /* In order to use host shmat, we must be able to honor SHMLBA.  */
2052     unsigned long align = MAX(SHMLBA, qemu_host_page_size);
2053     unsigned long current_start, aligned_start;
2054     int flags;
2055 
2056     assert(host_start || host_size);
2057 
2058     /* If just a starting address is given, then just verify that
2059      * address.  */
2060     if (host_start && !host_size) {
2061 #if defined(TARGET_ARM) && !defined(TARGET_AARCH64)
2062         if (init_guest_commpage(host_start, host_size) != 1) {
2063             return (unsigned long)-1;
2064         }
2065 #endif
2066         return host_start;
2067     }
2068 
2069     /* Setup the initial flags and start address.  */
2070     current_start = host_start & -align;
2071     flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
2072     if (fixed) {
2073         flags |= MAP_FIXED;
2074     }
2075 
2076     /* Otherwise, a non-zero size region of memory needs to be mapped
2077      * and validated.  */
2078 
2079 #if defined(TARGET_ARM) && !defined(TARGET_AARCH64)
2080     /* On 32-bit ARM, we need to map not just the usable memory, but
2081      * also the commpage.  Try to find a suitable place by allocating
2082      * a big chunk for all of it.  If host_start, then the naive
2083      * strategy probably does good enough.
2084      */
2085     if (!host_start) {
2086         unsigned long guest_full_size, host_full_size, real_start;
2087 
2088         guest_full_size =
2089             (0xffff0f00 & qemu_host_page_mask) + qemu_host_page_size;
2090         host_full_size = guest_full_size - guest_start;
2091         real_start = (unsigned long)
2092             mmap(NULL, host_full_size, PROT_NONE, flags, -1, 0);
2093         if (real_start == (unsigned long)-1) {
2094             if (host_size < host_full_size - qemu_host_page_size) {
2095                 /* We failed to map a continous segment, but we're
2096                  * allowed to have a gap between the usable memory and
2097                  * the commpage where other things can be mapped.
2098                  * This sparseness gives us more flexibility to find
2099                  * an address range.
2100                  */
2101                 goto naive;
2102             }
2103             return (unsigned long)-1;
2104         }
2105         munmap((void *)real_start, host_full_size);
2106         if (real_start & (align - 1)) {
2107             /* The same thing again, but with extra
2108              * so that we can shift around alignment.
2109              */
2110             unsigned long real_size = host_full_size + qemu_host_page_size;
2111             real_start = (unsigned long)
2112                 mmap(NULL, real_size, PROT_NONE, flags, -1, 0);
2113             if (real_start == (unsigned long)-1) {
2114                 if (host_size < host_full_size - qemu_host_page_size) {
2115                     goto naive;
2116                 }
2117                 return (unsigned long)-1;
2118             }
2119             munmap((void *)real_start, real_size);
2120             real_start = ROUND_UP(real_start, align);
2121         }
2122         current_start = real_start;
2123     }
2124  naive:
2125 #endif
2126 
2127     while (1) {
2128         unsigned long real_start, real_size, aligned_size;
2129         aligned_size = real_size = host_size;
2130 
2131         /* Do not use mmap_find_vma here because that is limited to the
2132          * guest address space.  We are going to make the
2133          * guest address space fit whatever we're given.
2134          */
2135         real_start = (unsigned long)
2136             mmap((void *)current_start, host_size, PROT_NONE, flags, -1, 0);
2137         if (real_start == (unsigned long)-1) {
2138             return (unsigned long)-1;
2139         }
2140 
2141         /* Check to see if the address is valid.  */
2142         if (host_start && real_start != current_start) {
2143             goto try_again;
2144         }
2145 
2146         /* Ensure the address is properly aligned.  */
2147         if (real_start & (align - 1)) {
2148             /* Ideally, we adjust like
2149              *
2150              *    pages: [  ][  ][  ][  ][  ]
2151              *      old:   [   real   ]
2152              *             [ aligned  ]
2153              *      new:   [     real     ]
2154              *               [ aligned  ]
2155              *
2156              * But if there is something else mapped right after it,
2157              * then obviously it won't have room to grow, and the
2158              * kernel will put the new larger real someplace else with
2159              * unknown alignment (if we made it to here, then
2160              * fixed=false).  Which is why we grow real by a full page
2161              * size, instead of by part of one; so that even if we get
2162              * moved, we can still guarantee alignment.  But this does
2163              * mean that there is a padding of < 1 page both before
2164              * and after the aligned range; the "after" could could
2165              * cause problems for ARM emulation where it could butt in
2166              * to where we need to put the commpage.
2167              */
2168             munmap((void *)real_start, host_size);
2169             real_size = aligned_size + qemu_host_page_size;
2170             real_start = (unsigned long)
2171                 mmap((void *)real_start, real_size, PROT_NONE, flags, -1, 0);
2172             if (real_start == (unsigned long)-1) {
2173                 return (unsigned long)-1;
2174             }
2175             aligned_start = ROUND_UP(real_start, align);
2176         } else {
2177             aligned_start = real_start;
2178         }
2179 
2180 #if defined(TARGET_ARM) && !defined(TARGET_AARCH64)
2181         /* On 32-bit ARM, we need to also be able to map the commpage.  */
2182         int valid = init_guest_commpage(aligned_start - guest_start,
2183                                         aligned_size + guest_start);
2184         if (valid == -1) {
2185             munmap((void *)real_start, real_size);
2186             return (unsigned long)-1;
2187         } else if (valid == 0) {
2188             goto try_again;
2189         }
2190 #endif
2191 
2192         /* If nothing has said `return -1` or `goto try_again` yet,
2193          * then the address we have is good.
2194          */
2195         break;
2196 
2197     try_again:
2198         /* That address didn't work.  Unmap and try a different one.
2199          * The address the host picked because is typically right at
2200          * the top of the host address space and leaves the guest with
2201          * no usable address space.  Resort to a linear search.  We
2202          * already compensated for mmap_min_addr, so this should not
2203          * happen often.  Probably means we got unlucky and host
2204          * address space randomization put a shared library somewhere
2205          * inconvenient.
2206          *
2207          * This is probably a good strategy if host_start, but is
2208          * probably a bad strategy if not, which means we got here
2209          * because of trouble with ARM commpage setup.
2210          */
2211         munmap((void *)real_start, real_size);
2212         current_start += align;
2213         if (host_start == current_start) {
2214             /* Theoretically possible if host doesn't have any suitably
2215              * aligned areas.  Normally the first mmap will fail.
2216              */
2217             return (unsigned long)-1;
2218         }
2219     }
2220 
2221     qemu_log_mask(CPU_LOG_PAGE, "Reserved 0x%lx bytes of guest address space\n", host_size);
2222 
2223     return aligned_start;
2224 }
2225 
2226 static void probe_guest_base(const char *image_name,
2227                              abi_ulong loaddr, abi_ulong hiaddr)
2228 {
2229     /* Probe for a suitable guest base address, if the user has not set
2230      * it explicitly, and set guest_base appropriately.
2231      * In case of error we will print a suitable message and exit.
2232      */
2233     const char *errmsg;
2234     if (!have_guest_base && !reserved_va) {
2235         unsigned long host_start, real_start, host_size;
2236 
2237         /* Round addresses to page boundaries.  */
2238         loaddr &= qemu_host_page_mask;
2239         hiaddr = HOST_PAGE_ALIGN(hiaddr);
2240 
2241         if (loaddr < mmap_min_addr) {
2242             host_start = HOST_PAGE_ALIGN(mmap_min_addr);
2243         } else {
2244             host_start = loaddr;
2245             if (host_start != loaddr) {
2246                 errmsg = "Address overflow loading ELF binary";
2247                 goto exit_errmsg;
2248             }
2249         }
2250         host_size = hiaddr - loaddr;
2251 
2252         /* Setup the initial guest memory space with ranges gleaned from
2253          * the ELF image that is being loaded.
2254          */
2255         real_start = init_guest_space(host_start, host_size, loaddr, false);
2256         if (real_start == (unsigned long)-1) {
2257             errmsg = "Unable to find space for application";
2258             goto exit_errmsg;
2259         }
2260         guest_base = real_start - loaddr;
2261 
2262         qemu_log_mask(CPU_LOG_PAGE, "Relocating guest address space from 0x"
2263                       TARGET_ABI_FMT_lx " to 0x%lx\n",
2264                       loaddr, real_start);
2265     }
2266     return;
2267 
2268 exit_errmsg:
2269     fprintf(stderr, "%s: %s\n", image_name, errmsg);
2270     exit(-1);
2271 }
2272 
2273 
2274 /* Load an ELF image into the address space.
2275 
2276    IMAGE_NAME is the filename of the image, to use in error messages.
2277    IMAGE_FD is the open file descriptor for the image.
2278 
2279    BPRM_BUF is a copy of the beginning of the file; this of course
2280    contains the elf file header at offset 0.  It is assumed that this
2281    buffer is sufficiently aligned to present no problems to the host
2282    in accessing data at aligned offsets within the buffer.
2283 
2284    On return: INFO values will be filled in, as necessary or available.  */
2285 
2286 static void load_elf_image(const char *image_name, int image_fd,
2287                            struct image_info *info, char **pinterp_name,
2288                            char bprm_buf[BPRM_BUF_SIZE])
2289 {
2290     struct elfhdr *ehdr = (struct elfhdr *)bprm_buf;
2291     struct elf_phdr *phdr;
2292     abi_ulong load_addr, load_bias, loaddr, hiaddr, error;
2293     int i, retval;
2294     const char *errmsg;
2295 
2296     /* First of all, some simple consistency checks */
2297     errmsg = "Invalid ELF image for this architecture";
2298     if (!elf_check_ident(ehdr)) {
2299         goto exit_errmsg;
2300     }
2301     bswap_ehdr(ehdr);
2302     if (!elf_check_ehdr(ehdr)) {
2303         goto exit_errmsg;
2304     }
2305 
2306     i = ehdr->e_phnum * sizeof(struct elf_phdr);
2307     if (ehdr->e_phoff + i <= BPRM_BUF_SIZE) {
2308         phdr = (struct elf_phdr *)(bprm_buf + ehdr->e_phoff);
2309     } else {
2310         phdr = (struct elf_phdr *) alloca(i);
2311         retval = pread(image_fd, phdr, i, ehdr->e_phoff);
2312         if (retval != i) {
2313             goto exit_read;
2314         }
2315     }
2316     bswap_phdr(phdr, ehdr->e_phnum);
2317 
2318     info->nsegs = 0;
2319     info->pt_dynamic_addr = 0;
2320 
2321     mmap_lock();
2322 
2323     /* Find the maximum size of the image and allocate an appropriate
2324        amount of memory to handle that.  */
2325     loaddr = -1, hiaddr = 0;
2326     info->alignment = 0;
2327     for (i = 0; i < ehdr->e_phnum; ++i) {
2328         if (phdr[i].p_type == PT_LOAD) {
2329             abi_ulong a = phdr[i].p_vaddr - phdr[i].p_offset;
2330             if (a < loaddr) {
2331                 loaddr = a;
2332             }
2333             a = phdr[i].p_vaddr + phdr[i].p_memsz;
2334             if (a > hiaddr) {
2335                 hiaddr = a;
2336             }
2337             ++info->nsegs;
2338             info->alignment |= phdr[i].p_align;
2339         }
2340     }
2341 
2342     load_addr = loaddr;
2343     if (ehdr->e_type == ET_DYN) {
2344         /* The image indicates that it can be loaded anywhere.  Find a
2345            location that can hold the memory space required.  If the
2346            image is pre-linked, LOADDR will be non-zero.  Since we do
2347            not supply MAP_FIXED here we'll use that address if and
2348            only if it remains available.  */
2349         load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE,
2350                                 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
2351                                 -1, 0);
2352         if (load_addr == -1) {
2353             goto exit_perror;
2354         }
2355     } else if (pinterp_name != NULL) {
2356         /* This is the main executable.  Make sure that the low
2357            address does not conflict with MMAP_MIN_ADDR or the
2358            QEMU application itself.  */
2359         probe_guest_base(image_name, loaddr, hiaddr);
2360     }
2361     load_bias = load_addr - loaddr;
2362 
2363     if (elf_is_fdpic(ehdr)) {
2364         struct elf32_fdpic_loadseg *loadsegs = info->loadsegs =
2365             g_malloc(sizeof(*loadsegs) * info->nsegs);
2366 
2367         for (i = 0; i < ehdr->e_phnum; ++i) {
2368             switch (phdr[i].p_type) {
2369             case PT_DYNAMIC:
2370                 info->pt_dynamic_addr = phdr[i].p_vaddr + load_bias;
2371                 break;
2372             case PT_LOAD:
2373                 loadsegs->addr = phdr[i].p_vaddr + load_bias;
2374                 loadsegs->p_vaddr = phdr[i].p_vaddr;
2375                 loadsegs->p_memsz = phdr[i].p_memsz;
2376                 ++loadsegs;
2377                 break;
2378             }
2379         }
2380     }
2381 
2382     info->load_bias = load_bias;
2383     info->load_addr = load_addr;
2384     info->entry = ehdr->e_entry + load_bias;
2385     info->start_code = -1;
2386     info->end_code = 0;
2387     info->start_data = -1;
2388     info->end_data = 0;
2389     info->brk = 0;
2390     info->elf_flags = ehdr->e_flags;
2391 
2392     for (i = 0; i < ehdr->e_phnum; i++) {
2393         struct elf_phdr *eppnt = phdr + i;
2394         if (eppnt->p_type == PT_LOAD) {
2395             abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em, vaddr_len;
2396             int elf_prot = 0;
2397 
2398             if (eppnt->p_flags & PF_R) elf_prot =  PROT_READ;
2399             if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
2400             if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
2401 
2402             vaddr = load_bias + eppnt->p_vaddr;
2403             vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr);
2404             vaddr_ps = TARGET_ELF_PAGESTART(vaddr);
2405             vaddr_len = TARGET_ELF_PAGELENGTH(eppnt->p_filesz + vaddr_po);
2406 
2407             /*
2408              * Some segments may be completely empty without any backing file
2409              * segment, in that case just let zero_bss allocate an empty buffer
2410              * for it.
2411              */
2412             if (eppnt->p_filesz != 0) {
2413                 error = target_mmap(vaddr_ps, vaddr_len, elf_prot,
2414                                     MAP_PRIVATE | MAP_FIXED,
2415                                     image_fd, eppnt->p_offset - vaddr_po);
2416 
2417                 if (error == -1) {
2418                     goto exit_perror;
2419                 }
2420             }
2421 
2422             vaddr_ef = vaddr + eppnt->p_filesz;
2423             vaddr_em = vaddr + eppnt->p_memsz;
2424 
2425             /* If the load segment requests extra zeros (e.g. bss), map it.  */
2426             if (vaddr_ef < vaddr_em) {
2427                 zero_bss(vaddr_ef, vaddr_em, elf_prot);
2428             }
2429 
2430             /* Find the full program boundaries.  */
2431             if (elf_prot & PROT_EXEC) {
2432                 if (vaddr < info->start_code) {
2433                     info->start_code = vaddr;
2434                 }
2435                 if (vaddr_ef > info->end_code) {
2436                     info->end_code = vaddr_ef;
2437                 }
2438             }
2439             if (elf_prot & PROT_WRITE) {
2440                 if (vaddr < info->start_data) {
2441                     info->start_data = vaddr;
2442                 }
2443                 if (vaddr_ef > info->end_data) {
2444                     info->end_data = vaddr_ef;
2445                 }
2446                 if (vaddr_em > info->brk) {
2447                     info->brk = vaddr_em;
2448                 }
2449             }
2450         } else if (eppnt->p_type == PT_INTERP && pinterp_name) {
2451             char *interp_name;
2452 
2453             if (*pinterp_name) {
2454                 errmsg = "Multiple PT_INTERP entries";
2455                 goto exit_errmsg;
2456             }
2457             interp_name = malloc(eppnt->p_filesz);
2458             if (!interp_name) {
2459                 goto exit_perror;
2460             }
2461 
2462             if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) {
2463                 memcpy(interp_name, bprm_buf + eppnt->p_offset,
2464                        eppnt->p_filesz);
2465             } else {
2466                 retval = pread(image_fd, interp_name, eppnt->p_filesz,
2467                                eppnt->p_offset);
2468                 if (retval != eppnt->p_filesz) {
2469                     goto exit_perror;
2470                 }
2471             }
2472             if (interp_name[eppnt->p_filesz - 1] != 0) {
2473                 errmsg = "Invalid PT_INTERP entry";
2474                 goto exit_errmsg;
2475             }
2476             *pinterp_name = interp_name;
2477 #ifdef TARGET_MIPS
2478         } else if (eppnt->p_type == PT_MIPS_ABIFLAGS) {
2479             Mips_elf_abiflags_v0 abiflags;
2480             if (eppnt->p_filesz < sizeof(Mips_elf_abiflags_v0)) {
2481                 errmsg = "Invalid PT_MIPS_ABIFLAGS entry";
2482                 goto exit_errmsg;
2483             }
2484             if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) {
2485                 memcpy(&abiflags, bprm_buf + eppnt->p_offset,
2486                        sizeof(Mips_elf_abiflags_v0));
2487             } else {
2488                 retval = pread(image_fd, &abiflags, sizeof(Mips_elf_abiflags_v0),
2489                                eppnt->p_offset);
2490                 if (retval != sizeof(Mips_elf_abiflags_v0)) {
2491                     goto exit_perror;
2492                 }
2493             }
2494             bswap_mips_abiflags(&abiflags);
2495             info->fp_abi = abiflags.fp_abi;
2496 #endif
2497         }
2498     }
2499 
2500     if (info->end_data == 0) {
2501         info->start_data = info->end_code;
2502         info->end_data = info->end_code;
2503         info->brk = info->end_code;
2504     }
2505 
2506     if (qemu_log_enabled()) {
2507         load_symbols(ehdr, image_fd, load_bias);
2508     }
2509 
2510     mmap_unlock();
2511 
2512     close(image_fd);
2513     return;
2514 
2515  exit_read:
2516     if (retval >= 0) {
2517         errmsg = "Incomplete read of file header";
2518         goto exit_errmsg;
2519     }
2520  exit_perror:
2521     errmsg = strerror(errno);
2522  exit_errmsg:
2523     fprintf(stderr, "%s: %s\n", image_name, errmsg);
2524     exit(-1);
2525 }
2526 
2527 static void load_elf_interp(const char *filename, struct image_info *info,
2528                             char bprm_buf[BPRM_BUF_SIZE])
2529 {
2530     int fd, retval;
2531 
2532     fd = open(path(filename), O_RDONLY);
2533     if (fd < 0) {
2534         goto exit_perror;
2535     }
2536 
2537     retval = read(fd, bprm_buf, BPRM_BUF_SIZE);
2538     if (retval < 0) {
2539         goto exit_perror;
2540     }
2541     if (retval < BPRM_BUF_SIZE) {
2542         memset(bprm_buf + retval, 0, BPRM_BUF_SIZE - retval);
2543     }
2544 
2545     load_elf_image(filename, fd, info, NULL, bprm_buf);
2546     return;
2547 
2548  exit_perror:
2549     fprintf(stderr, "%s: %s\n", filename, strerror(errno));
2550     exit(-1);
2551 }
2552 
2553 static int symfind(const void *s0, const void *s1)
2554 {
2555     target_ulong addr = *(target_ulong *)s0;
2556     struct elf_sym *sym = (struct elf_sym *)s1;
2557     int result = 0;
2558     if (addr < sym->st_value) {
2559         result = -1;
2560     } else if (addr >= sym->st_value + sym->st_size) {
2561         result = 1;
2562     }
2563     return result;
2564 }
2565 
2566 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
2567 {
2568 #if ELF_CLASS == ELFCLASS32
2569     struct elf_sym *syms = s->disas_symtab.elf32;
2570 #else
2571     struct elf_sym *syms = s->disas_symtab.elf64;
2572 #endif
2573 
2574     // binary search
2575     struct elf_sym *sym;
2576 
2577     sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind);
2578     if (sym != NULL) {
2579         return s->disas_strtab + sym->st_name;
2580     }
2581 
2582     return "";
2583 }
2584 
2585 /* FIXME: This should use elf_ops.h  */
2586 static int symcmp(const void *s0, const void *s1)
2587 {
2588     struct elf_sym *sym0 = (struct elf_sym *)s0;
2589     struct elf_sym *sym1 = (struct elf_sym *)s1;
2590     return (sym0->st_value < sym1->st_value)
2591         ? -1
2592         : ((sym0->st_value > sym1->st_value) ? 1 : 0);
2593 }
2594 
2595 /* Best attempt to load symbols from this ELF object. */
2596 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias)
2597 {
2598     int i, shnum, nsyms, sym_idx = 0, str_idx = 0;
2599     uint64_t segsz;
2600     struct elf_shdr *shdr;
2601     char *strings = NULL;
2602     struct syminfo *s = NULL;
2603     struct elf_sym *new_syms, *syms = NULL;
2604 
2605     shnum = hdr->e_shnum;
2606     i = shnum * sizeof(struct elf_shdr);
2607     shdr = (struct elf_shdr *)alloca(i);
2608     if (pread(fd, shdr, i, hdr->e_shoff) != i) {
2609         return;
2610     }
2611 
2612     bswap_shdr(shdr, shnum);
2613     for (i = 0; i < shnum; ++i) {
2614         if (shdr[i].sh_type == SHT_SYMTAB) {
2615             sym_idx = i;
2616             str_idx = shdr[i].sh_link;
2617             goto found;
2618         }
2619     }
2620 
2621     /* There will be no symbol table if the file was stripped.  */
2622     return;
2623 
2624  found:
2625     /* Now know where the strtab and symtab are.  Snarf them.  */
2626     s = g_try_new(struct syminfo, 1);
2627     if (!s) {
2628         goto give_up;
2629     }
2630 
2631     segsz = shdr[str_idx].sh_size;
2632     s->disas_strtab = strings = g_try_malloc(segsz);
2633     if (!strings ||
2634         pread(fd, strings, segsz, shdr[str_idx].sh_offset) != segsz) {
2635         goto give_up;
2636     }
2637 
2638     segsz = shdr[sym_idx].sh_size;
2639     syms = g_try_malloc(segsz);
2640     if (!syms || pread(fd, syms, segsz, shdr[sym_idx].sh_offset) != segsz) {
2641         goto give_up;
2642     }
2643 
2644     if (segsz / sizeof(struct elf_sym) > INT_MAX) {
2645         /* Implausibly large symbol table: give up rather than ploughing
2646          * on with the number of symbols calculation overflowing
2647          */
2648         goto give_up;
2649     }
2650     nsyms = segsz / sizeof(struct elf_sym);
2651     for (i = 0; i < nsyms; ) {
2652         bswap_sym(syms + i);
2653         /* Throw away entries which we do not need.  */
2654         if (syms[i].st_shndx == SHN_UNDEF
2655             || syms[i].st_shndx >= SHN_LORESERVE
2656             || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
2657             if (i < --nsyms) {
2658                 syms[i] = syms[nsyms];
2659             }
2660         } else {
2661 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
2662             /* The bottom address bit marks a Thumb or MIPS16 symbol.  */
2663             syms[i].st_value &= ~(target_ulong)1;
2664 #endif
2665             syms[i].st_value += load_bias;
2666             i++;
2667         }
2668     }
2669 
2670     /* No "useful" symbol.  */
2671     if (nsyms == 0) {
2672         goto give_up;
2673     }
2674 
2675     /* Attempt to free the storage associated with the local symbols
2676        that we threw away.  Whether or not this has any effect on the
2677        memory allocation depends on the malloc implementation and how
2678        many symbols we managed to discard.  */
2679     new_syms = g_try_renew(struct elf_sym, syms, nsyms);
2680     if (new_syms == NULL) {
2681         goto give_up;
2682     }
2683     syms = new_syms;
2684 
2685     qsort(syms, nsyms, sizeof(*syms), symcmp);
2686 
2687     s->disas_num_syms = nsyms;
2688 #if ELF_CLASS == ELFCLASS32
2689     s->disas_symtab.elf32 = syms;
2690 #else
2691     s->disas_symtab.elf64 = syms;
2692 #endif
2693     s->lookup_symbol = lookup_symbolxx;
2694     s->next = syminfos;
2695     syminfos = s;
2696 
2697     return;
2698 
2699 give_up:
2700     g_free(s);
2701     g_free(strings);
2702     g_free(syms);
2703 }
2704 
2705 uint32_t get_elf_eflags(int fd)
2706 {
2707     struct elfhdr ehdr;
2708     off_t offset;
2709     int ret;
2710 
2711     /* Read ELF header */
2712     offset = lseek(fd, 0, SEEK_SET);
2713     if (offset == (off_t) -1) {
2714         return 0;
2715     }
2716     ret = read(fd, &ehdr, sizeof(ehdr));
2717     if (ret < sizeof(ehdr)) {
2718         return 0;
2719     }
2720     offset = lseek(fd, offset, SEEK_SET);
2721     if (offset == (off_t) -1) {
2722         return 0;
2723     }
2724 
2725     /* Check ELF signature */
2726     if (!elf_check_ident(&ehdr)) {
2727         return 0;
2728     }
2729 
2730     /* check header */
2731     bswap_ehdr(&ehdr);
2732     if (!elf_check_ehdr(&ehdr)) {
2733         return 0;
2734     }
2735 
2736     /* return architecture id */
2737     return ehdr.e_flags;
2738 }
2739 
2740 int load_elf_binary(struct linux_binprm *bprm, struct image_info *info)
2741 {
2742     struct image_info interp_info;
2743     struct elfhdr elf_ex;
2744     char *elf_interpreter = NULL;
2745     char *scratch;
2746 
2747     memset(&interp_info, 0, sizeof(interp_info));
2748 #ifdef TARGET_MIPS
2749     interp_info.fp_abi = MIPS_ABI_FP_UNKNOWN;
2750 #endif
2751 
2752     info->start_mmap = (abi_ulong)ELF_START_MMAP;
2753 
2754     load_elf_image(bprm->filename, bprm->fd, info,
2755                    &elf_interpreter, bprm->buf);
2756 
2757     /* ??? We need a copy of the elf header for passing to create_elf_tables.
2758        If we do nothing, we'll have overwritten this when we re-use bprm->buf
2759        when we load the interpreter.  */
2760     elf_ex = *(struct elfhdr *)bprm->buf;
2761 
2762     /* Do this so that we can load the interpreter, if need be.  We will
2763        change some of these later */
2764     bprm->p = setup_arg_pages(bprm, info);
2765 
2766     scratch = g_new0(char, TARGET_PAGE_SIZE);
2767     if (STACK_GROWS_DOWN) {
2768         bprm->p = copy_elf_strings(1, &bprm->filename, scratch,
2769                                    bprm->p, info->stack_limit);
2770         info->file_string = bprm->p;
2771         bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch,
2772                                    bprm->p, info->stack_limit);
2773         info->env_strings = bprm->p;
2774         bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch,
2775                                    bprm->p, info->stack_limit);
2776         info->arg_strings = bprm->p;
2777     } else {
2778         info->arg_strings = bprm->p;
2779         bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch,
2780                                    bprm->p, info->stack_limit);
2781         info->env_strings = bprm->p;
2782         bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch,
2783                                    bprm->p, info->stack_limit);
2784         info->file_string = bprm->p;
2785         bprm->p = copy_elf_strings(1, &bprm->filename, scratch,
2786                                    bprm->p, info->stack_limit);
2787     }
2788 
2789     g_free(scratch);
2790 
2791     if (!bprm->p) {
2792         fprintf(stderr, "%s: %s\n", bprm->filename, strerror(E2BIG));
2793         exit(-1);
2794     }
2795 
2796     if (elf_interpreter) {
2797         load_elf_interp(elf_interpreter, &interp_info, bprm->buf);
2798 
2799         /* If the program interpreter is one of these two, then assume
2800            an iBCS2 image.  Otherwise assume a native linux image.  */
2801 
2802         if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0
2803             || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) {
2804             info->personality = PER_SVR4;
2805 
2806             /* Why this, you ask???  Well SVr4 maps page 0 as read-only,
2807                and some applications "depend" upon this behavior.  Since
2808                we do not have the power to recompile these, we emulate
2809                the SVr4 behavior.  Sigh.  */
2810             target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
2811                         MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
2812         }
2813 #ifdef TARGET_MIPS
2814         info->interp_fp_abi = interp_info.fp_abi;
2815 #endif
2816     }
2817 
2818     bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &elf_ex,
2819                                 info, (elf_interpreter ? &interp_info : NULL));
2820     info->start_stack = bprm->p;
2821 
2822     /* If we have an interpreter, set that as the program's entry point.
2823        Copy the load_bias as well, to help PPC64 interpret the entry
2824        point as a function descriptor.  Do this after creating elf tables
2825        so that we copy the original program entry point into the AUXV.  */
2826     if (elf_interpreter) {
2827         info->load_bias = interp_info.load_bias;
2828         info->entry = interp_info.entry;
2829         free(elf_interpreter);
2830     }
2831 
2832 #ifdef USE_ELF_CORE_DUMP
2833     bprm->core_dump = &elf_core_dump;
2834 #endif
2835 
2836     return 0;
2837 }
2838 
2839 #ifdef USE_ELF_CORE_DUMP
2840 /*
2841  * Definitions to generate Intel SVR4-like core files.
2842  * These mostly have the same names as the SVR4 types with "target_elf_"
2843  * tacked on the front to prevent clashes with linux definitions,
2844  * and the typedef forms have been avoided.  This is mostly like
2845  * the SVR4 structure, but more Linuxy, with things that Linux does
2846  * not support and which gdb doesn't really use excluded.
2847  *
2848  * Fields we don't dump (their contents is zero) in linux-user qemu
2849  * are marked with XXX.
2850  *
2851  * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
2852  *
2853  * Porting ELF coredump for target is (quite) simple process.  First you
2854  * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
2855  * the target resides):
2856  *
2857  * #define USE_ELF_CORE_DUMP
2858  *
2859  * Next you define type of register set used for dumping.  ELF specification
2860  * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
2861  *
2862  * typedef <target_regtype> target_elf_greg_t;
2863  * #define ELF_NREG <number of registers>
2864  * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
2865  *
2866  * Last step is to implement target specific function that copies registers
2867  * from given cpu into just specified register set.  Prototype is:
2868  *
2869  * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
2870  *                                const CPUArchState *env);
2871  *
2872  * Parameters:
2873  *     regs - copy register values into here (allocated and zeroed by caller)
2874  *     env - copy registers from here
2875  *
2876  * Example for ARM target is provided in this file.
2877  */
2878 
2879 /* An ELF note in memory */
2880 struct memelfnote {
2881     const char *name;
2882     size_t     namesz;
2883     size_t     namesz_rounded;
2884     int        type;
2885     size_t     datasz;
2886     size_t     datasz_rounded;
2887     void       *data;
2888     size_t     notesz;
2889 };
2890 
2891 struct target_elf_siginfo {
2892     abi_int    si_signo; /* signal number */
2893     abi_int    si_code;  /* extra code */
2894     abi_int    si_errno; /* errno */
2895 };
2896 
2897 struct target_elf_prstatus {
2898     struct target_elf_siginfo pr_info;      /* Info associated with signal */
2899     abi_short          pr_cursig;    /* Current signal */
2900     abi_ulong          pr_sigpend;   /* XXX */
2901     abi_ulong          pr_sighold;   /* XXX */
2902     target_pid_t       pr_pid;
2903     target_pid_t       pr_ppid;
2904     target_pid_t       pr_pgrp;
2905     target_pid_t       pr_sid;
2906     struct target_timeval pr_utime;  /* XXX User time */
2907     struct target_timeval pr_stime;  /* XXX System time */
2908     struct target_timeval pr_cutime; /* XXX Cumulative user time */
2909     struct target_timeval pr_cstime; /* XXX Cumulative system time */
2910     target_elf_gregset_t      pr_reg;       /* GP registers */
2911     abi_int            pr_fpvalid;   /* XXX */
2912 };
2913 
2914 #define ELF_PRARGSZ     (80) /* Number of chars for args */
2915 
2916 struct target_elf_prpsinfo {
2917     char         pr_state;       /* numeric process state */
2918     char         pr_sname;       /* char for pr_state */
2919     char         pr_zomb;        /* zombie */
2920     char         pr_nice;        /* nice val */
2921     abi_ulong    pr_flag;        /* flags */
2922     target_uid_t pr_uid;
2923     target_gid_t pr_gid;
2924     target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
2925     /* Lots missing */
2926     char    pr_fname[16] QEMU_NONSTRING; /* filename of executable */
2927     char    pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
2928 };
2929 
2930 /* Here is the structure in which status of each thread is captured. */
2931 struct elf_thread_status {
2932     QTAILQ_ENTRY(elf_thread_status)  ets_link;
2933     struct target_elf_prstatus prstatus;   /* NT_PRSTATUS */
2934 #if 0
2935     elf_fpregset_t fpu;             /* NT_PRFPREG */
2936     struct task_struct *thread;
2937     elf_fpxregset_t xfpu;           /* ELF_CORE_XFPREG_TYPE */
2938 #endif
2939     struct memelfnote notes[1];
2940     int num_notes;
2941 };
2942 
2943 struct elf_note_info {
2944     struct memelfnote   *notes;
2945     struct target_elf_prstatus *prstatus;  /* NT_PRSTATUS */
2946     struct target_elf_prpsinfo *psinfo;    /* NT_PRPSINFO */
2947 
2948     QTAILQ_HEAD(, elf_thread_status) thread_list;
2949 #if 0
2950     /*
2951      * Current version of ELF coredump doesn't support
2952      * dumping fp regs etc.
2953      */
2954     elf_fpregset_t *fpu;
2955     elf_fpxregset_t *xfpu;
2956     int thread_status_size;
2957 #endif
2958     int notes_size;
2959     int numnote;
2960 };
2961 
2962 struct vm_area_struct {
2963     target_ulong   vma_start;  /* start vaddr of memory region */
2964     target_ulong   vma_end;    /* end vaddr of memory region */
2965     abi_ulong      vma_flags;  /* protection etc. flags for the region */
2966     QTAILQ_ENTRY(vm_area_struct) vma_link;
2967 };
2968 
2969 struct mm_struct {
2970     QTAILQ_HEAD(, vm_area_struct) mm_mmap;
2971     int mm_count;           /* number of mappings */
2972 };
2973 
2974 static struct mm_struct *vma_init(void);
2975 static void vma_delete(struct mm_struct *);
2976 static int vma_add_mapping(struct mm_struct *, target_ulong,
2977                            target_ulong, abi_ulong);
2978 static int vma_get_mapping_count(const struct mm_struct *);
2979 static struct vm_area_struct *vma_first(const struct mm_struct *);
2980 static struct vm_area_struct *vma_next(struct vm_area_struct *);
2981 static abi_ulong vma_dump_size(const struct vm_area_struct *);
2982 static int vma_walker(void *priv, target_ulong start, target_ulong end,
2983                       unsigned long flags);
2984 
2985 static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
2986 static void fill_note(struct memelfnote *, const char *, int,
2987                       unsigned int, void *);
2988 static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
2989 static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
2990 static void fill_auxv_note(struct memelfnote *, const TaskState *);
2991 static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
2992 static size_t note_size(const struct memelfnote *);
2993 static void free_note_info(struct elf_note_info *);
2994 static int fill_note_info(struct elf_note_info *, long, const CPUArchState *);
2995 static void fill_thread_info(struct elf_note_info *, const CPUArchState *);
2996 static int core_dump_filename(const TaskState *, char *, size_t);
2997 
2998 static int dump_write(int, const void *, size_t);
2999 static int write_note(struct memelfnote *, int);
3000 static int write_note_info(struct elf_note_info *, int);
3001 
3002 #ifdef BSWAP_NEEDED
3003 static void bswap_prstatus(struct target_elf_prstatus *prstatus)
3004 {
3005     prstatus->pr_info.si_signo = tswap32(prstatus->pr_info.si_signo);
3006     prstatus->pr_info.si_code = tswap32(prstatus->pr_info.si_code);
3007     prstatus->pr_info.si_errno = tswap32(prstatus->pr_info.si_errno);
3008     prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
3009     prstatus->pr_sigpend = tswapal(prstatus->pr_sigpend);
3010     prstatus->pr_sighold = tswapal(prstatus->pr_sighold);
3011     prstatus->pr_pid = tswap32(prstatus->pr_pid);
3012     prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
3013     prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
3014     prstatus->pr_sid = tswap32(prstatus->pr_sid);
3015     /* cpu times are not filled, so we skip them */
3016     /* regs should be in correct format already */
3017     prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
3018 }
3019 
3020 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
3021 {
3022     psinfo->pr_flag = tswapal(psinfo->pr_flag);
3023     psinfo->pr_uid = tswap16(psinfo->pr_uid);
3024     psinfo->pr_gid = tswap16(psinfo->pr_gid);
3025     psinfo->pr_pid = tswap32(psinfo->pr_pid);
3026     psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
3027     psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
3028     psinfo->pr_sid = tswap32(psinfo->pr_sid);
3029 }
3030 
3031 static void bswap_note(struct elf_note *en)
3032 {
3033     bswap32s(&en->n_namesz);
3034     bswap32s(&en->n_descsz);
3035     bswap32s(&en->n_type);
3036 }
3037 #else
3038 static inline void bswap_prstatus(struct target_elf_prstatus *p) { }
3039 static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {}
3040 static inline void bswap_note(struct elf_note *en) { }
3041 #endif /* BSWAP_NEEDED */
3042 
3043 /*
3044  * Minimal support for linux memory regions.  These are needed
3045  * when we are finding out what memory exactly belongs to
3046  * emulated process.  No locks needed here, as long as
3047  * thread that received the signal is stopped.
3048  */
3049 
3050 static struct mm_struct *vma_init(void)
3051 {
3052     struct mm_struct *mm;
3053 
3054     if ((mm = g_malloc(sizeof (*mm))) == NULL)
3055         return (NULL);
3056 
3057     mm->mm_count = 0;
3058     QTAILQ_INIT(&mm->mm_mmap);
3059 
3060     return (mm);
3061 }
3062 
3063 static void vma_delete(struct mm_struct *mm)
3064 {
3065     struct vm_area_struct *vma;
3066 
3067     while ((vma = vma_first(mm)) != NULL) {
3068         QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
3069         g_free(vma);
3070     }
3071     g_free(mm);
3072 }
3073 
3074 static int vma_add_mapping(struct mm_struct *mm, target_ulong start,
3075                            target_ulong end, abi_ulong flags)
3076 {
3077     struct vm_area_struct *vma;
3078 
3079     if ((vma = g_malloc0(sizeof (*vma))) == NULL)
3080         return (-1);
3081 
3082     vma->vma_start = start;
3083     vma->vma_end = end;
3084     vma->vma_flags = flags;
3085 
3086     QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
3087     mm->mm_count++;
3088 
3089     return (0);
3090 }
3091 
3092 static struct vm_area_struct *vma_first(const struct mm_struct *mm)
3093 {
3094     return (QTAILQ_FIRST(&mm->mm_mmap));
3095 }
3096 
3097 static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
3098 {
3099     return (QTAILQ_NEXT(vma, vma_link));
3100 }
3101 
3102 static int vma_get_mapping_count(const struct mm_struct *mm)
3103 {
3104     return (mm->mm_count);
3105 }
3106 
3107 /*
3108  * Calculate file (dump) size of given memory region.
3109  */
3110 static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
3111 {
3112     /* if we cannot even read the first page, skip it */
3113     if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
3114         return (0);
3115 
3116     /*
3117      * Usually we don't dump executable pages as they contain
3118      * non-writable code that debugger can read directly from
3119      * target library etc.  However, thread stacks are marked
3120      * also executable so we read in first page of given region
3121      * and check whether it contains elf header.  If there is
3122      * no elf header, we dump it.
3123      */
3124     if (vma->vma_flags & PROT_EXEC) {
3125         char page[TARGET_PAGE_SIZE];
3126 
3127         copy_from_user(page, vma->vma_start, sizeof (page));
3128         if ((page[EI_MAG0] == ELFMAG0) &&
3129             (page[EI_MAG1] == ELFMAG1) &&
3130             (page[EI_MAG2] == ELFMAG2) &&
3131             (page[EI_MAG3] == ELFMAG3)) {
3132             /*
3133              * Mappings are possibly from ELF binary.  Don't dump
3134              * them.
3135              */
3136             return (0);
3137         }
3138     }
3139 
3140     return (vma->vma_end - vma->vma_start);
3141 }
3142 
3143 static int vma_walker(void *priv, target_ulong start, target_ulong end,
3144                       unsigned long flags)
3145 {
3146     struct mm_struct *mm = (struct mm_struct *)priv;
3147 
3148     vma_add_mapping(mm, start, end, flags);
3149     return (0);
3150 }
3151 
3152 static void fill_note(struct memelfnote *note, const char *name, int type,
3153                       unsigned int sz, void *data)
3154 {
3155     unsigned int namesz;
3156 
3157     namesz = strlen(name) + 1;
3158     note->name = name;
3159     note->namesz = namesz;
3160     note->namesz_rounded = roundup(namesz, sizeof (int32_t));
3161     note->type = type;
3162     note->datasz = sz;
3163     note->datasz_rounded = roundup(sz, sizeof (int32_t));
3164 
3165     note->data = data;
3166 
3167     /*
3168      * We calculate rounded up note size here as specified by
3169      * ELF document.
3170      */
3171     note->notesz = sizeof (struct elf_note) +
3172         note->namesz_rounded + note->datasz_rounded;
3173 }
3174 
3175 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
3176                             uint32_t flags)
3177 {
3178     (void) memset(elf, 0, sizeof(*elf));
3179 
3180     (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
3181     elf->e_ident[EI_CLASS] = ELF_CLASS;
3182     elf->e_ident[EI_DATA] = ELF_DATA;
3183     elf->e_ident[EI_VERSION] = EV_CURRENT;
3184     elf->e_ident[EI_OSABI] = ELF_OSABI;
3185 
3186     elf->e_type = ET_CORE;
3187     elf->e_machine = machine;
3188     elf->e_version = EV_CURRENT;
3189     elf->e_phoff = sizeof(struct elfhdr);
3190     elf->e_flags = flags;
3191     elf->e_ehsize = sizeof(struct elfhdr);
3192     elf->e_phentsize = sizeof(struct elf_phdr);
3193     elf->e_phnum = segs;
3194 
3195     bswap_ehdr(elf);
3196 }
3197 
3198 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
3199 {
3200     phdr->p_type = PT_NOTE;
3201     phdr->p_offset = offset;
3202     phdr->p_vaddr = 0;
3203     phdr->p_paddr = 0;
3204     phdr->p_filesz = sz;
3205     phdr->p_memsz = 0;
3206     phdr->p_flags = 0;
3207     phdr->p_align = 0;
3208 
3209     bswap_phdr(phdr, 1);
3210 }
3211 
3212 static size_t note_size(const struct memelfnote *note)
3213 {
3214     return (note->notesz);
3215 }
3216 
3217 static void fill_prstatus(struct target_elf_prstatus *prstatus,
3218                           const TaskState *ts, int signr)
3219 {
3220     (void) memset(prstatus, 0, sizeof (*prstatus));
3221     prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
3222     prstatus->pr_pid = ts->ts_tid;
3223     prstatus->pr_ppid = getppid();
3224     prstatus->pr_pgrp = getpgrp();
3225     prstatus->pr_sid = getsid(0);
3226 
3227     bswap_prstatus(prstatus);
3228 }
3229 
3230 static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
3231 {
3232     char *base_filename;
3233     unsigned int i, len;
3234 
3235     (void) memset(psinfo, 0, sizeof (*psinfo));
3236 
3237     len = ts->info->arg_end - ts->info->arg_start;
3238     if (len >= ELF_PRARGSZ)
3239         len = ELF_PRARGSZ - 1;
3240     if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_start, len))
3241         return -EFAULT;
3242     for (i = 0; i < len; i++)
3243         if (psinfo->pr_psargs[i] == 0)
3244             psinfo->pr_psargs[i] = ' ';
3245     psinfo->pr_psargs[len] = 0;
3246 
3247     psinfo->pr_pid = getpid();
3248     psinfo->pr_ppid = getppid();
3249     psinfo->pr_pgrp = getpgrp();
3250     psinfo->pr_sid = getsid(0);
3251     psinfo->pr_uid = getuid();
3252     psinfo->pr_gid = getgid();
3253 
3254     base_filename = g_path_get_basename(ts->bprm->filename);
3255     /*
3256      * Using strncpy here is fine: at max-length,
3257      * this field is not NUL-terminated.
3258      */
3259     (void) strncpy(psinfo->pr_fname, base_filename,
3260                    sizeof(psinfo->pr_fname));
3261 
3262     g_free(base_filename);
3263     bswap_psinfo(psinfo);
3264     return (0);
3265 }
3266 
3267 static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
3268 {
3269     elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
3270     elf_addr_t orig_auxv = auxv;
3271     void *ptr;
3272     int len = ts->info->auxv_len;
3273 
3274     /*
3275      * Auxiliary vector is stored in target process stack.  It contains
3276      * {type, value} pairs that we need to dump into note.  This is not
3277      * strictly necessary but we do it here for sake of completeness.
3278      */
3279 
3280     /* read in whole auxv vector and copy it to memelfnote */
3281     ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
3282     if (ptr != NULL) {
3283         fill_note(note, "CORE", NT_AUXV, len, ptr);
3284         unlock_user(ptr, auxv, len);
3285     }
3286 }
3287 
3288 /*
3289  * Constructs name of coredump file.  We have following convention
3290  * for the name:
3291  *     qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
3292  *
3293  * Returns 0 in case of success, -1 otherwise (errno is set).
3294  */
3295 static int core_dump_filename(const TaskState *ts, char *buf,
3296                               size_t bufsize)
3297 {
3298     char timestamp[64];
3299     char *base_filename = NULL;
3300     struct timeval tv;
3301     struct tm tm;
3302 
3303     assert(bufsize >= PATH_MAX);
3304 
3305     if (gettimeofday(&tv, NULL) < 0) {
3306         (void) fprintf(stderr, "unable to get current timestamp: %s",
3307                        strerror(errno));
3308         return (-1);
3309     }
3310 
3311     base_filename = g_path_get_basename(ts->bprm->filename);
3312     (void) strftime(timestamp, sizeof (timestamp), "%Y%m%d-%H%M%S",
3313                     localtime_r(&tv.tv_sec, &tm));
3314     (void) snprintf(buf, bufsize, "qemu_%s_%s_%d.core",
3315                     base_filename, timestamp, (int)getpid());
3316     g_free(base_filename);
3317 
3318     return (0);
3319 }
3320 
3321 static int dump_write(int fd, const void *ptr, size_t size)
3322 {
3323     const char *bufp = (const char *)ptr;
3324     ssize_t bytes_written, bytes_left;
3325     struct rlimit dumpsize;
3326     off_t pos;
3327 
3328     bytes_written = 0;
3329     getrlimit(RLIMIT_CORE, &dumpsize);
3330     if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
3331         if (errno == ESPIPE) { /* not a seekable stream */
3332             bytes_left = size;
3333         } else {
3334             return pos;
3335         }
3336     } else {
3337         if (dumpsize.rlim_cur <= pos) {
3338             return -1;
3339         } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
3340             bytes_left = size;
3341         } else {
3342             size_t limit_left=dumpsize.rlim_cur - pos;
3343             bytes_left = limit_left >= size ? size : limit_left ;
3344         }
3345     }
3346 
3347     /*
3348      * In normal conditions, single write(2) should do but
3349      * in case of socket etc. this mechanism is more portable.
3350      */
3351     do {
3352         bytes_written = write(fd, bufp, bytes_left);
3353         if (bytes_written < 0) {
3354             if (errno == EINTR)
3355                 continue;
3356             return (-1);
3357         } else if (bytes_written == 0) { /* eof */
3358             return (-1);
3359         }
3360         bufp += bytes_written;
3361         bytes_left -= bytes_written;
3362     } while (bytes_left > 0);
3363 
3364     return (0);
3365 }
3366 
3367 static int write_note(struct memelfnote *men, int fd)
3368 {
3369     struct elf_note en;
3370 
3371     en.n_namesz = men->namesz;
3372     en.n_type = men->type;
3373     en.n_descsz = men->datasz;
3374 
3375     bswap_note(&en);
3376 
3377     if (dump_write(fd, &en, sizeof(en)) != 0)
3378         return (-1);
3379     if (dump_write(fd, men->name, men->namesz_rounded) != 0)
3380         return (-1);
3381     if (dump_write(fd, men->data, men->datasz_rounded) != 0)
3382         return (-1);
3383 
3384     return (0);
3385 }
3386 
3387 static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env)
3388 {
3389     CPUState *cpu = env_cpu((CPUArchState *)env);
3390     TaskState *ts = (TaskState *)cpu->opaque;
3391     struct elf_thread_status *ets;
3392 
3393     ets = g_malloc0(sizeof (*ets));
3394     ets->num_notes = 1; /* only prstatus is dumped */
3395     fill_prstatus(&ets->prstatus, ts, 0);
3396     elf_core_copy_regs(&ets->prstatus.pr_reg, env);
3397     fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
3398               &ets->prstatus);
3399 
3400     QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
3401 
3402     info->notes_size += note_size(&ets->notes[0]);
3403 }
3404 
3405 static void init_note_info(struct elf_note_info *info)
3406 {
3407     /* Initialize the elf_note_info structure so that it is at
3408      * least safe to call free_note_info() on it. Must be
3409      * called before calling fill_note_info().
3410      */
3411     memset(info, 0, sizeof (*info));
3412     QTAILQ_INIT(&info->thread_list);
3413 }
3414 
3415 static int fill_note_info(struct elf_note_info *info,
3416                           long signr, const CPUArchState *env)
3417 {
3418 #define NUMNOTES 3
3419     CPUState *cpu = env_cpu((CPUArchState *)env);
3420     TaskState *ts = (TaskState *)cpu->opaque;
3421     int i;
3422 
3423     info->notes = g_new0(struct memelfnote, NUMNOTES);
3424     if (info->notes == NULL)
3425         return (-ENOMEM);
3426     info->prstatus = g_malloc0(sizeof (*info->prstatus));
3427     if (info->prstatus == NULL)
3428         return (-ENOMEM);
3429     info->psinfo = g_malloc0(sizeof (*info->psinfo));
3430     if (info->prstatus == NULL)
3431         return (-ENOMEM);
3432 
3433     /*
3434      * First fill in status (and registers) of current thread
3435      * including process info & aux vector.
3436      */
3437     fill_prstatus(info->prstatus, ts, signr);
3438     elf_core_copy_regs(&info->prstatus->pr_reg, env);
3439     fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
3440               sizeof (*info->prstatus), info->prstatus);
3441     fill_psinfo(info->psinfo, ts);
3442     fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
3443               sizeof (*info->psinfo), info->psinfo);
3444     fill_auxv_note(&info->notes[2], ts);
3445     info->numnote = 3;
3446 
3447     info->notes_size = 0;
3448     for (i = 0; i < info->numnote; i++)
3449         info->notes_size += note_size(&info->notes[i]);
3450 
3451     /* read and fill status of all threads */
3452     cpu_list_lock();
3453     CPU_FOREACH(cpu) {
3454         if (cpu == thread_cpu) {
3455             continue;
3456         }
3457         fill_thread_info(info, (CPUArchState *)cpu->env_ptr);
3458     }
3459     cpu_list_unlock();
3460 
3461     return (0);
3462 }
3463 
3464 static void free_note_info(struct elf_note_info *info)
3465 {
3466     struct elf_thread_status *ets;
3467 
3468     while (!QTAILQ_EMPTY(&info->thread_list)) {
3469         ets = QTAILQ_FIRST(&info->thread_list);
3470         QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
3471         g_free(ets);
3472     }
3473 
3474     g_free(info->prstatus);
3475     g_free(info->psinfo);
3476     g_free(info->notes);
3477 }
3478 
3479 static int write_note_info(struct elf_note_info *info, int fd)
3480 {
3481     struct elf_thread_status *ets;
3482     int i, error = 0;
3483 
3484     /* write prstatus, psinfo and auxv for current thread */
3485     for (i = 0; i < info->numnote; i++)
3486         if ((error = write_note(&info->notes[i], fd)) != 0)
3487             return (error);
3488 
3489     /* write prstatus for each thread */
3490     QTAILQ_FOREACH(ets, &info->thread_list, ets_link) {
3491         if ((error = write_note(&ets->notes[0], fd)) != 0)
3492             return (error);
3493     }
3494 
3495     return (0);
3496 }
3497 
3498 /*
3499  * Write out ELF coredump.
3500  *
3501  * See documentation of ELF object file format in:
3502  * http://www.caldera.com/developers/devspecs/gabi41.pdf
3503  *
3504  * Coredump format in linux is following:
3505  *
3506  * 0   +----------------------+         \
3507  *     | ELF header           | ET_CORE  |
3508  *     +----------------------+          |
3509  *     | ELF program headers  |          |--- headers
3510  *     | - NOTE section       |          |
3511  *     | - PT_LOAD sections   |          |
3512  *     +----------------------+         /
3513  *     | NOTEs:               |
3514  *     | - NT_PRSTATUS        |
3515  *     | - NT_PRSINFO         |
3516  *     | - NT_AUXV            |
3517  *     +----------------------+ <-- aligned to target page
3518  *     | Process memory dump  |
3519  *     :                      :
3520  *     .                      .
3521  *     :                      :
3522  *     |                      |
3523  *     +----------------------+
3524  *
3525  * NT_PRSTATUS -> struct elf_prstatus (per thread)
3526  * NT_PRSINFO  -> struct elf_prpsinfo
3527  * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
3528  *
3529  * Format follows System V format as close as possible.  Current
3530  * version limitations are as follows:
3531  *     - no floating point registers are dumped
3532  *
3533  * Function returns 0 in case of success, negative errno otherwise.
3534  *
3535  * TODO: make this work also during runtime: it should be
3536  * possible to force coredump from running process and then
3537  * continue processing.  For example qemu could set up SIGUSR2
3538  * handler (provided that target process haven't registered
3539  * handler for that) that does the dump when signal is received.
3540  */
3541 static int elf_core_dump(int signr, const CPUArchState *env)
3542 {
3543     const CPUState *cpu = env_cpu((CPUArchState *)env);
3544     const TaskState *ts = (const TaskState *)cpu->opaque;
3545     struct vm_area_struct *vma = NULL;
3546     char corefile[PATH_MAX];
3547     struct elf_note_info info;
3548     struct elfhdr elf;
3549     struct elf_phdr phdr;
3550     struct rlimit dumpsize;
3551     struct mm_struct *mm = NULL;
3552     off_t offset = 0, data_offset = 0;
3553     int segs = 0;
3554     int fd = -1;
3555 
3556     init_note_info(&info);
3557 
3558     errno = 0;
3559     getrlimit(RLIMIT_CORE, &dumpsize);
3560     if (dumpsize.rlim_cur == 0)
3561         return 0;
3562 
3563     if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0)
3564         return (-errno);
3565 
3566     if ((fd = open(corefile, O_WRONLY | O_CREAT,
3567                    S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
3568         return (-errno);
3569 
3570     /*
3571      * Walk through target process memory mappings and
3572      * set up structure containing this information.  After
3573      * this point vma_xxx functions can be used.
3574      */
3575     if ((mm = vma_init()) == NULL)
3576         goto out;
3577 
3578     walk_memory_regions(mm, vma_walker);
3579     segs = vma_get_mapping_count(mm);
3580 
3581     /*
3582      * Construct valid coredump ELF header.  We also
3583      * add one more segment for notes.
3584      */
3585     fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
3586     if (dump_write(fd, &elf, sizeof (elf)) != 0)
3587         goto out;
3588 
3589     /* fill in the in-memory version of notes */
3590     if (fill_note_info(&info, signr, env) < 0)
3591         goto out;
3592 
3593     offset += sizeof (elf);                             /* elf header */
3594     offset += (segs + 1) * sizeof (struct elf_phdr);    /* program headers */
3595 
3596     /* write out notes program header */
3597     fill_elf_note_phdr(&phdr, info.notes_size, offset);
3598 
3599     offset += info.notes_size;
3600     if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
3601         goto out;
3602 
3603     /*
3604      * ELF specification wants data to start at page boundary so
3605      * we align it here.
3606      */
3607     data_offset = offset = roundup(offset, ELF_EXEC_PAGESIZE);
3608 
3609     /*
3610      * Write program headers for memory regions mapped in
3611      * the target process.
3612      */
3613     for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
3614         (void) memset(&phdr, 0, sizeof (phdr));
3615 
3616         phdr.p_type = PT_LOAD;
3617         phdr.p_offset = offset;
3618         phdr.p_vaddr = vma->vma_start;
3619         phdr.p_paddr = 0;
3620         phdr.p_filesz = vma_dump_size(vma);
3621         offset += phdr.p_filesz;
3622         phdr.p_memsz = vma->vma_end - vma->vma_start;
3623         phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
3624         if (vma->vma_flags & PROT_WRITE)
3625             phdr.p_flags |= PF_W;
3626         if (vma->vma_flags & PROT_EXEC)
3627             phdr.p_flags |= PF_X;
3628         phdr.p_align = ELF_EXEC_PAGESIZE;
3629 
3630         bswap_phdr(&phdr, 1);
3631         if (dump_write(fd, &phdr, sizeof(phdr)) != 0) {
3632             goto out;
3633         }
3634     }
3635 
3636     /*
3637      * Next we write notes just after program headers.  No
3638      * alignment needed here.
3639      */
3640     if (write_note_info(&info, fd) < 0)
3641         goto out;
3642 
3643     /* align data to page boundary */
3644     if (lseek(fd, data_offset, SEEK_SET) != data_offset)
3645         goto out;
3646 
3647     /*
3648      * Finally we can dump process memory into corefile as well.
3649      */
3650     for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
3651         abi_ulong addr;
3652         abi_ulong end;
3653 
3654         end = vma->vma_start + vma_dump_size(vma);
3655 
3656         for (addr = vma->vma_start; addr < end;
3657              addr += TARGET_PAGE_SIZE) {
3658             char page[TARGET_PAGE_SIZE];
3659             int error;
3660 
3661             /*
3662              *  Read in page from target process memory and
3663              *  write it to coredump file.
3664              */
3665             error = copy_from_user(page, addr, sizeof (page));
3666             if (error != 0) {
3667                 (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
3668                                addr);
3669                 errno = -error;
3670                 goto out;
3671             }
3672             if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
3673                 goto out;
3674         }
3675     }
3676 
3677  out:
3678     free_note_info(&info);
3679     if (mm != NULL)
3680         vma_delete(mm);
3681     (void) close(fd);
3682 
3683     if (errno != 0)
3684         return (-errno);
3685     return (0);
3686 }
3687 #endif /* USE_ELF_CORE_DUMP */
3688 
3689 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
3690 {
3691     init_thread(regs, infop);
3692 }
3693