xref: /openbmc/qemu/linux-user/elfload.c (revision 2f02c14b)
1 /* This is the Linux kernel elf-loading code, ported into user space */
2 #include "qemu/osdep.h"
3 #include <sys/param.h>
4 
5 #include <sys/resource.h>
6 #include <sys/shm.h>
7 
8 #include "qemu.h"
9 #include "user-internals.h"
10 #include "signal-common.h"
11 #include "loader.h"
12 #include "user-mmap.h"
13 #include "disas/disas.h"
14 #include "qemu/bitops.h"
15 #include "qemu/path.h"
16 #include "qemu/queue.h"
17 #include "qemu/guest-random.h"
18 #include "qemu/units.h"
19 #include "qemu/selfmap.h"
20 #include "qemu/lockable.h"
21 #include "qapi/error.h"
22 #include "qemu/error-report.h"
23 #include "target_signal.h"
24 #include "accel/tcg/debuginfo.h"
25 
26 #ifdef _ARCH_PPC64
27 #undef ARCH_DLINFO
28 #undef ELF_PLATFORM
29 #undef ELF_HWCAP
30 #undef ELF_HWCAP2
31 #undef ELF_CLASS
32 #undef ELF_DATA
33 #undef ELF_ARCH
34 #endif
35 
36 #define ELF_OSABI   ELFOSABI_SYSV
37 
38 /* from personality.h */
39 
40 /*
41  * Flags for bug emulation.
42  *
43  * These occupy the top three bytes.
44  */
45 enum {
46     ADDR_NO_RANDOMIZE = 0x0040000,      /* disable randomization of VA space */
47     FDPIC_FUNCPTRS =    0x0080000,      /* userspace function ptrs point to
48                                            descriptors (signal handling) */
49     MMAP_PAGE_ZERO =    0x0100000,
50     ADDR_COMPAT_LAYOUT = 0x0200000,
51     READ_IMPLIES_EXEC = 0x0400000,
52     ADDR_LIMIT_32BIT =  0x0800000,
53     SHORT_INODE =       0x1000000,
54     WHOLE_SECONDS =     0x2000000,
55     STICKY_TIMEOUTS =   0x4000000,
56     ADDR_LIMIT_3GB =    0x8000000,
57 };
58 
59 /*
60  * Personality types.
61  *
62  * These go in the low byte.  Avoid using the top bit, it will
63  * conflict with error returns.
64  */
65 enum {
66     PER_LINUX =         0x0000,
67     PER_LINUX_32BIT =   0x0000 | ADDR_LIMIT_32BIT,
68     PER_LINUX_FDPIC =   0x0000 | FDPIC_FUNCPTRS,
69     PER_SVR4 =          0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
70     PER_SVR3 =          0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
71     PER_SCOSVR3 =       0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE,
72     PER_OSR5 =          0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
73     PER_WYSEV386 =      0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
74     PER_ISCR4 =         0x0005 | STICKY_TIMEOUTS,
75     PER_BSD =           0x0006,
76     PER_SUNOS =         0x0006 | STICKY_TIMEOUTS,
77     PER_XENIX =         0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
78     PER_LINUX32 =       0x0008,
79     PER_LINUX32_3GB =   0x0008 | ADDR_LIMIT_3GB,
80     PER_IRIX32 =        0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
81     PER_IRIXN32 =       0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
82     PER_IRIX64 =        0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
83     PER_RISCOS =        0x000c,
84     PER_SOLARIS =       0x000d | STICKY_TIMEOUTS,
85     PER_UW7 =           0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
86     PER_OSF4 =          0x000f,                  /* OSF/1 v4 */
87     PER_HPUX =          0x0010,
88     PER_MASK =          0x00ff,
89 };
90 
91 /*
92  * Return the base personality without flags.
93  */
94 #define personality(pers)       (pers & PER_MASK)
95 
96 int info_is_fdpic(struct image_info *info)
97 {
98     return info->personality == PER_LINUX_FDPIC;
99 }
100 
101 /* this flag is uneffective under linux too, should be deleted */
102 #ifndef MAP_DENYWRITE
103 #define MAP_DENYWRITE 0
104 #endif
105 
106 /* should probably go in elf.h */
107 #ifndef ELIBBAD
108 #define ELIBBAD 80
109 #endif
110 
111 #if TARGET_BIG_ENDIAN
112 #define ELF_DATA        ELFDATA2MSB
113 #else
114 #define ELF_DATA        ELFDATA2LSB
115 #endif
116 
117 #ifdef TARGET_ABI_MIPSN32
118 typedef abi_ullong      target_elf_greg_t;
119 #define tswapreg(ptr)   tswap64(ptr)
120 #else
121 typedef abi_ulong       target_elf_greg_t;
122 #define tswapreg(ptr)   tswapal(ptr)
123 #endif
124 
125 #ifdef USE_UID16
126 typedef abi_ushort      target_uid_t;
127 typedef abi_ushort      target_gid_t;
128 #else
129 typedef abi_uint        target_uid_t;
130 typedef abi_uint        target_gid_t;
131 #endif
132 typedef abi_int         target_pid_t;
133 
134 #ifdef TARGET_I386
135 
136 #define ELF_HWCAP get_elf_hwcap()
137 
138 static uint32_t get_elf_hwcap(void)
139 {
140     X86CPU *cpu = X86_CPU(thread_cpu);
141 
142     return cpu->env.features[FEAT_1_EDX];
143 }
144 
145 #ifdef TARGET_X86_64
146 #define ELF_CLASS      ELFCLASS64
147 #define ELF_ARCH       EM_X86_64
148 
149 #define ELF_PLATFORM   "x86_64"
150 
151 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
152 {
153     regs->rax = 0;
154     regs->rsp = infop->start_stack;
155     regs->rip = infop->entry;
156 }
157 
158 #define ELF_NREG    27
159 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
160 
161 /*
162  * Note that ELF_NREG should be 29 as there should be place for
163  * TRAPNO and ERR "registers" as well but linux doesn't dump
164  * those.
165  *
166  * See linux kernel: arch/x86/include/asm/elf.h
167  */
168 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env)
169 {
170     (*regs)[0] = tswapreg(env->regs[15]);
171     (*regs)[1] = tswapreg(env->regs[14]);
172     (*regs)[2] = tswapreg(env->regs[13]);
173     (*regs)[3] = tswapreg(env->regs[12]);
174     (*regs)[4] = tswapreg(env->regs[R_EBP]);
175     (*regs)[5] = tswapreg(env->regs[R_EBX]);
176     (*regs)[6] = tswapreg(env->regs[11]);
177     (*regs)[7] = tswapreg(env->regs[10]);
178     (*regs)[8] = tswapreg(env->regs[9]);
179     (*regs)[9] = tswapreg(env->regs[8]);
180     (*regs)[10] = tswapreg(env->regs[R_EAX]);
181     (*regs)[11] = tswapreg(env->regs[R_ECX]);
182     (*regs)[12] = tswapreg(env->regs[R_EDX]);
183     (*regs)[13] = tswapreg(env->regs[R_ESI]);
184     (*regs)[14] = tswapreg(env->regs[R_EDI]);
185     (*regs)[15] = tswapreg(env->regs[R_EAX]); /* XXX */
186     (*regs)[16] = tswapreg(env->eip);
187     (*regs)[17] = tswapreg(env->segs[R_CS].selector & 0xffff);
188     (*regs)[18] = tswapreg(env->eflags);
189     (*regs)[19] = tswapreg(env->regs[R_ESP]);
190     (*regs)[20] = tswapreg(env->segs[R_SS].selector & 0xffff);
191     (*regs)[21] = tswapreg(env->segs[R_FS].selector & 0xffff);
192     (*regs)[22] = tswapreg(env->segs[R_GS].selector & 0xffff);
193     (*regs)[23] = tswapreg(env->segs[R_DS].selector & 0xffff);
194     (*regs)[24] = tswapreg(env->segs[R_ES].selector & 0xffff);
195     (*regs)[25] = tswapreg(env->segs[R_FS].selector & 0xffff);
196     (*regs)[26] = tswapreg(env->segs[R_GS].selector & 0xffff);
197 }
198 
199 #if ULONG_MAX > UINT32_MAX
200 #define INIT_GUEST_COMMPAGE
201 static bool init_guest_commpage(void)
202 {
203     /*
204      * The vsyscall page is at a high negative address aka kernel space,
205      * which means that we cannot actually allocate it with target_mmap.
206      * We still should be able to use page_set_flags, unless the user
207      * has specified -R reserved_va, which would trigger an assert().
208      */
209     if (reserved_va != 0 &&
210         TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) {
211         error_report("Cannot allocate vsyscall page");
212         exit(EXIT_FAILURE);
213     }
214     page_set_flags(TARGET_VSYSCALL_PAGE,
215                    TARGET_VSYSCALL_PAGE | ~TARGET_PAGE_MASK,
216                    PAGE_EXEC | PAGE_VALID);
217     return true;
218 }
219 #endif
220 #else
221 
222 /*
223  * This is used to ensure we don't load something for the wrong architecture.
224  */
225 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
226 
227 /*
228  * These are used to set parameters in the core dumps.
229  */
230 #define ELF_CLASS       ELFCLASS32
231 #define ELF_ARCH        EM_386
232 
233 #define ELF_PLATFORM get_elf_platform()
234 #define EXSTACK_DEFAULT true
235 
236 static const char *get_elf_platform(void)
237 {
238     static char elf_platform[] = "i386";
239     int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL);
240     if (family > 6) {
241         family = 6;
242     }
243     if (family >= 3) {
244         elf_platform[1] = '0' + family;
245     }
246     return elf_platform;
247 }
248 
249 static inline void init_thread(struct target_pt_regs *regs,
250                                struct image_info *infop)
251 {
252     regs->esp = infop->start_stack;
253     regs->eip = infop->entry;
254 
255     /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
256        starts %edx contains a pointer to a function which might be
257        registered using `atexit'.  This provides a mean for the
258        dynamic linker to call DT_FINI functions for shared libraries
259        that have been loaded before the code runs.
260 
261        A value of 0 tells we have no such handler.  */
262     regs->edx = 0;
263 }
264 
265 #define ELF_NREG    17
266 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
267 
268 /*
269  * Note that ELF_NREG should be 19 as there should be place for
270  * TRAPNO and ERR "registers" as well but linux doesn't dump
271  * those.
272  *
273  * See linux kernel: arch/x86/include/asm/elf.h
274  */
275 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env)
276 {
277     (*regs)[0] = tswapreg(env->regs[R_EBX]);
278     (*regs)[1] = tswapreg(env->regs[R_ECX]);
279     (*regs)[2] = tswapreg(env->regs[R_EDX]);
280     (*regs)[3] = tswapreg(env->regs[R_ESI]);
281     (*regs)[4] = tswapreg(env->regs[R_EDI]);
282     (*regs)[5] = tswapreg(env->regs[R_EBP]);
283     (*regs)[6] = tswapreg(env->regs[R_EAX]);
284     (*regs)[7] = tswapreg(env->segs[R_DS].selector & 0xffff);
285     (*regs)[8] = tswapreg(env->segs[R_ES].selector & 0xffff);
286     (*regs)[9] = tswapreg(env->segs[R_FS].selector & 0xffff);
287     (*regs)[10] = tswapreg(env->segs[R_GS].selector & 0xffff);
288     (*regs)[11] = tswapreg(env->regs[R_EAX]); /* XXX */
289     (*regs)[12] = tswapreg(env->eip);
290     (*regs)[13] = tswapreg(env->segs[R_CS].selector & 0xffff);
291     (*regs)[14] = tswapreg(env->eflags);
292     (*regs)[15] = tswapreg(env->regs[R_ESP]);
293     (*regs)[16] = tswapreg(env->segs[R_SS].selector & 0xffff);
294 }
295 #endif
296 
297 #define USE_ELF_CORE_DUMP
298 #define ELF_EXEC_PAGESIZE       4096
299 
300 #endif
301 
302 #ifdef TARGET_ARM
303 
304 #ifndef TARGET_AARCH64
305 /* 32 bit ARM definitions */
306 
307 #define ELF_ARCH        EM_ARM
308 #define ELF_CLASS       ELFCLASS32
309 #define EXSTACK_DEFAULT true
310 
311 static inline void init_thread(struct target_pt_regs *regs,
312                                struct image_info *infop)
313 {
314     abi_long stack = infop->start_stack;
315     memset(regs, 0, sizeof(*regs));
316 
317     regs->uregs[16] = ARM_CPU_MODE_USR;
318     if (infop->entry & 1) {
319         regs->uregs[16] |= CPSR_T;
320     }
321     regs->uregs[15] = infop->entry & 0xfffffffe;
322     regs->uregs[13] = infop->start_stack;
323     /* FIXME - what to for failure of get_user()? */
324     get_user_ual(regs->uregs[2], stack + 8); /* envp */
325     get_user_ual(regs->uregs[1], stack + 4); /* envp */
326     /* XXX: it seems that r0 is zeroed after ! */
327     regs->uregs[0] = 0;
328     /* For uClinux PIC binaries.  */
329     /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
330     regs->uregs[10] = infop->start_data;
331 
332     /* Support ARM FDPIC.  */
333     if (info_is_fdpic(infop)) {
334         /* As described in the ABI document, r7 points to the loadmap info
335          * prepared by the kernel. If an interpreter is needed, r8 points
336          * to the interpreter loadmap and r9 points to the interpreter
337          * PT_DYNAMIC info. If no interpreter is needed, r8 is zero, and
338          * r9 points to the main program PT_DYNAMIC info.
339          */
340         regs->uregs[7] = infop->loadmap_addr;
341         if (infop->interpreter_loadmap_addr) {
342             /* Executable is dynamically loaded.  */
343             regs->uregs[8] = infop->interpreter_loadmap_addr;
344             regs->uregs[9] = infop->interpreter_pt_dynamic_addr;
345         } else {
346             regs->uregs[8] = 0;
347             regs->uregs[9] = infop->pt_dynamic_addr;
348         }
349     }
350 }
351 
352 #define ELF_NREG    18
353 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
354 
355 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUARMState *env)
356 {
357     (*regs)[0] = tswapreg(env->regs[0]);
358     (*regs)[1] = tswapreg(env->regs[1]);
359     (*regs)[2] = tswapreg(env->regs[2]);
360     (*regs)[3] = tswapreg(env->regs[3]);
361     (*regs)[4] = tswapreg(env->regs[4]);
362     (*regs)[5] = tswapreg(env->regs[5]);
363     (*regs)[6] = tswapreg(env->regs[6]);
364     (*regs)[7] = tswapreg(env->regs[7]);
365     (*regs)[8] = tswapreg(env->regs[8]);
366     (*regs)[9] = tswapreg(env->regs[9]);
367     (*regs)[10] = tswapreg(env->regs[10]);
368     (*regs)[11] = tswapreg(env->regs[11]);
369     (*regs)[12] = tswapreg(env->regs[12]);
370     (*regs)[13] = tswapreg(env->regs[13]);
371     (*regs)[14] = tswapreg(env->regs[14]);
372     (*regs)[15] = tswapreg(env->regs[15]);
373 
374     (*regs)[16] = tswapreg(cpsr_read((CPUARMState *)env));
375     (*regs)[17] = tswapreg(env->regs[0]); /* XXX */
376 }
377 
378 #define USE_ELF_CORE_DUMP
379 #define ELF_EXEC_PAGESIZE       4096
380 
381 enum
382 {
383     ARM_HWCAP_ARM_SWP       = 1 << 0,
384     ARM_HWCAP_ARM_HALF      = 1 << 1,
385     ARM_HWCAP_ARM_THUMB     = 1 << 2,
386     ARM_HWCAP_ARM_26BIT     = 1 << 3,
387     ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
388     ARM_HWCAP_ARM_FPA       = 1 << 5,
389     ARM_HWCAP_ARM_VFP       = 1 << 6,
390     ARM_HWCAP_ARM_EDSP      = 1 << 7,
391     ARM_HWCAP_ARM_JAVA      = 1 << 8,
392     ARM_HWCAP_ARM_IWMMXT    = 1 << 9,
393     ARM_HWCAP_ARM_CRUNCH    = 1 << 10,
394     ARM_HWCAP_ARM_THUMBEE   = 1 << 11,
395     ARM_HWCAP_ARM_NEON      = 1 << 12,
396     ARM_HWCAP_ARM_VFPv3     = 1 << 13,
397     ARM_HWCAP_ARM_VFPv3D16  = 1 << 14,
398     ARM_HWCAP_ARM_TLS       = 1 << 15,
399     ARM_HWCAP_ARM_VFPv4     = 1 << 16,
400     ARM_HWCAP_ARM_IDIVA     = 1 << 17,
401     ARM_HWCAP_ARM_IDIVT     = 1 << 18,
402     ARM_HWCAP_ARM_VFPD32    = 1 << 19,
403     ARM_HWCAP_ARM_LPAE      = 1 << 20,
404     ARM_HWCAP_ARM_EVTSTRM   = 1 << 21,
405     ARM_HWCAP_ARM_FPHP      = 1 << 22,
406     ARM_HWCAP_ARM_ASIMDHP   = 1 << 23,
407     ARM_HWCAP_ARM_ASIMDDP   = 1 << 24,
408     ARM_HWCAP_ARM_ASIMDFHM  = 1 << 25,
409     ARM_HWCAP_ARM_ASIMDBF16 = 1 << 26,
410     ARM_HWCAP_ARM_I8MM      = 1 << 27,
411 };
412 
413 enum {
414     ARM_HWCAP2_ARM_AES      = 1 << 0,
415     ARM_HWCAP2_ARM_PMULL    = 1 << 1,
416     ARM_HWCAP2_ARM_SHA1     = 1 << 2,
417     ARM_HWCAP2_ARM_SHA2     = 1 << 3,
418     ARM_HWCAP2_ARM_CRC32    = 1 << 4,
419     ARM_HWCAP2_ARM_SB       = 1 << 5,
420     ARM_HWCAP2_ARM_SSBS     = 1 << 6,
421 };
422 
423 /* The commpage only exists for 32 bit kernels */
424 
425 #define HI_COMMPAGE (intptr_t)0xffff0f00u
426 
427 static bool init_guest_commpage(void)
428 {
429     ARMCPU *cpu = ARM_CPU(thread_cpu);
430     abi_ptr commpage;
431     void *want;
432     void *addr;
433 
434     /*
435      * M-profile allocates maximum of 2GB address space, so can never
436      * allocate the commpage.  Skip it.
437      */
438     if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
439         return true;
440     }
441 
442     commpage = HI_COMMPAGE & -qemu_host_page_size;
443     want = g2h_untagged(commpage);
444     addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
445                 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
446 
447     if (addr == MAP_FAILED) {
448         perror("Allocating guest commpage");
449         exit(EXIT_FAILURE);
450     }
451     if (addr != want) {
452         return false;
453     }
454 
455     /* Set kernel helper versions; rest of page is 0.  */
456     __put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu));
457 
458     if (mprotect(addr, qemu_host_page_size, PROT_READ)) {
459         perror("Protecting guest commpage");
460         exit(EXIT_FAILURE);
461     }
462 
463     page_set_flags(commpage, commpage | ~qemu_host_page_mask,
464                    PAGE_READ | PAGE_EXEC | PAGE_VALID);
465     return true;
466 }
467 
468 #define ELF_HWCAP get_elf_hwcap()
469 #define ELF_HWCAP2 get_elf_hwcap2()
470 
471 uint32_t get_elf_hwcap(void)
472 {
473     ARMCPU *cpu = ARM_CPU(thread_cpu);
474     uint32_t hwcaps = 0;
475 
476     hwcaps |= ARM_HWCAP_ARM_SWP;
477     hwcaps |= ARM_HWCAP_ARM_HALF;
478     hwcaps |= ARM_HWCAP_ARM_THUMB;
479     hwcaps |= ARM_HWCAP_ARM_FAST_MULT;
480 
481     /* probe for the extra features */
482 #define GET_FEATURE(feat, hwcap) \
483     do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
484 
485 #define GET_FEATURE_ID(feat, hwcap) \
486     do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
487 
488     /* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */
489     GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP);
490     GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT);
491     GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE);
492     GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON);
493     GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS);
494     GET_FEATURE(ARM_FEATURE_LPAE, ARM_HWCAP_ARM_LPAE);
495     GET_FEATURE_ID(aa32_arm_div, ARM_HWCAP_ARM_IDIVA);
496     GET_FEATURE_ID(aa32_thumb_div, ARM_HWCAP_ARM_IDIVT);
497     GET_FEATURE_ID(aa32_vfp, ARM_HWCAP_ARM_VFP);
498 
499     if (cpu_isar_feature(aa32_fpsp_v3, cpu) ||
500         cpu_isar_feature(aa32_fpdp_v3, cpu)) {
501         hwcaps |= ARM_HWCAP_ARM_VFPv3;
502         if (cpu_isar_feature(aa32_simd_r32, cpu)) {
503             hwcaps |= ARM_HWCAP_ARM_VFPD32;
504         } else {
505             hwcaps |= ARM_HWCAP_ARM_VFPv3D16;
506         }
507     }
508     GET_FEATURE_ID(aa32_simdfmac, ARM_HWCAP_ARM_VFPv4);
509     /*
510      * MVFR1.FPHP and .SIMDHP must be in sync, and QEMU uses the same
511      * isar_feature function for both. The kernel reports them as two hwcaps.
512      */
513     GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_FPHP);
514     GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_ASIMDHP);
515     GET_FEATURE_ID(aa32_dp, ARM_HWCAP_ARM_ASIMDDP);
516     GET_FEATURE_ID(aa32_fhm, ARM_HWCAP_ARM_ASIMDFHM);
517     GET_FEATURE_ID(aa32_bf16, ARM_HWCAP_ARM_ASIMDBF16);
518     GET_FEATURE_ID(aa32_i8mm, ARM_HWCAP_ARM_I8MM);
519 
520     return hwcaps;
521 }
522 
523 uint32_t get_elf_hwcap2(void)
524 {
525     ARMCPU *cpu = ARM_CPU(thread_cpu);
526     uint32_t hwcaps = 0;
527 
528     GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES);
529     GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL);
530     GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1);
531     GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2);
532     GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32);
533     GET_FEATURE_ID(aa32_sb, ARM_HWCAP2_ARM_SB);
534     GET_FEATURE_ID(aa32_ssbs, ARM_HWCAP2_ARM_SSBS);
535     return hwcaps;
536 }
537 
538 const char *elf_hwcap_str(uint32_t bit)
539 {
540     static const char *hwcap_str[] = {
541     [__builtin_ctz(ARM_HWCAP_ARM_SWP      )] = "swp",
542     [__builtin_ctz(ARM_HWCAP_ARM_HALF     )] = "half",
543     [__builtin_ctz(ARM_HWCAP_ARM_THUMB    )] = "thumb",
544     [__builtin_ctz(ARM_HWCAP_ARM_26BIT    )] = "26bit",
545     [__builtin_ctz(ARM_HWCAP_ARM_FAST_MULT)] = "fast_mult",
546     [__builtin_ctz(ARM_HWCAP_ARM_FPA      )] = "fpa",
547     [__builtin_ctz(ARM_HWCAP_ARM_VFP      )] = "vfp",
548     [__builtin_ctz(ARM_HWCAP_ARM_EDSP     )] = "edsp",
549     [__builtin_ctz(ARM_HWCAP_ARM_JAVA     )] = "java",
550     [__builtin_ctz(ARM_HWCAP_ARM_IWMMXT   )] = "iwmmxt",
551     [__builtin_ctz(ARM_HWCAP_ARM_CRUNCH   )] = "crunch",
552     [__builtin_ctz(ARM_HWCAP_ARM_THUMBEE  )] = "thumbee",
553     [__builtin_ctz(ARM_HWCAP_ARM_NEON     )] = "neon",
554     [__builtin_ctz(ARM_HWCAP_ARM_VFPv3    )] = "vfpv3",
555     [__builtin_ctz(ARM_HWCAP_ARM_VFPv3D16 )] = "vfpv3d16",
556     [__builtin_ctz(ARM_HWCAP_ARM_TLS      )] = "tls",
557     [__builtin_ctz(ARM_HWCAP_ARM_VFPv4    )] = "vfpv4",
558     [__builtin_ctz(ARM_HWCAP_ARM_IDIVA    )] = "idiva",
559     [__builtin_ctz(ARM_HWCAP_ARM_IDIVT    )] = "idivt",
560     [__builtin_ctz(ARM_HWCAP_ARM_VFPD32   )] = "vfpd32",
561     [__builtin_ctz(ARM_HWCAP_ARM_LPAE     )] = "lpae",
562     [__builtin_ctz(ARM_HWCAP_ARM_EVTSTRM  )] = "evtstrm",
563     [__builtin_ctz(ARM_HWCAP_ARM_FPHP     )] = "fphp",
564     [__builtin_ctz(ARM_HWCAP_ARM_ASIMDHP  )] = "asimdhp",
565     [__builtin_ctz(ARM_HWCAP_ARM_ASIMDDP  )] = "asimddp",
566     [__builtin_ctz(ARM_HWCAP_ARM_ASIMDFHM )] = "asimdfhm",
567     [__builtin_ctz(ARM_HWCAP_ARM_ASIMDBF16)] = "asimdbf16",
568     [__builtin_ctz(ARM_HWCAP_ARM_I8MM     )] = "i8mm",
569     };
570 
571     return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
572 }
573 
574 const char *elf_hwcap2_str(uint32_t bit)
575 {
576     static const char *hwcap_str[] = {
577     [__builtin_ctz(ARM_HWCAP2_ARM_AES  )] = "aes",
578     [__builtin_ctz(ARM_HWCAP2_ARM_PMULL)] = "pmull",
579     [__builtin_ctz(ARM_HWCAP2_ARM_SHA1 )] = "sha1",
580     [__builtin_ctz(ARM_HWCAP2_ARM_SHA2 )] = "sha2",
581     [__builtin_ctz(ARM_HWCAP2_ARM_CRC32)] = "crc32",
582     [__builtin_ctz(ARM_HWCAP2_ARM_SB   )] = "sb",
583     [__builtin_ctz(ARM_HWCAP2_ARM_SSBS )] = "ssbs",
584     };
585 
586     return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
587 }
588 
589 #undef GET_FEATURE
590 #undef GET_FEATURE_ID
591 
592 #define ELF_PLATFORM get_elf_platform()
593 
594 static const char *get_elf_platform(void)
595 {
596     CPUARMState *env = cpu_env(thread_cpu);
597 
598 #if TARGET_BIG_ENDIAN
599 # define END  "b"
600 #else
601 # define END  "l"
602 #endif
603 
604     if (arm_feature(env, ARM_FEATURE_V8)) {
605         return "v8" END;
606     } else if (arm_feature(env, ARM_FEATURE_V7)) {
607         if (arm_feature(env, ARM_FEATURE_M)) {
608             return "v7m" END;
609         } else {
610             return "v7" END;
611         }
612     } else if (arm_feature(env, ARM_FEATURE_V6)) {
613         return "v6" END;
614     } else if (arm_feature(env, ARM_FEATURE_V5)) {
615         return "v5" END;
616     } else {
617         return "v4" END;
618     }
619 
620 #undef END
621 }
622 
623 #else
624 /* 64 bit ARM definitions */
625 
626 #define ELF_ARCH        EM_AARCH64
627 #define ELF_CLASS       ELFCLASS64
628 #if TARGET_BIG_ENDIAN
629 # define ELF_PLATFORM    "aarch64_be"
630 #else
631 # define ELF_PLATFORM    "aarch64"
632 #endif
633 
634 static inline void init_thread(struct target_pt_regs *regs,
635                                struct image_info *infop)
636 {
637     abi_long stack = infop->start_stack;
638     memset(regs, 0, sizeof(*regs));
639 
640     regs->pc = infop->entry & ~0x3ULL;
641     regs->sp = stack;
642 }
643 
644 #define ELF_NREG    34
645 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
646 
647 static void elf_core_copy_regs(target_elf_gregset_t *regs,
648                                const CPUARMState *env)
649 {
650     int i;
651 
652     for (i = 0; i < 32; i++) {
653         (*regs)[i] = tswapreg(env->xregs[i]);
654     }
655     (*regs)[32] = tswapreg(env->pc);
656     (*regs)[33] = tswapreg(pstate_read((CPUARMState *)env));
657 }
658 
659 #define USE_ELF_CORE_DUMP
660 #define ELF_EXEC_PAGESIZE       4096
661 
662 enum {
663     ARM_HWCAP_A64_FP            = 1 << 0,
664     ARM_HWCAP_A64_ASIMD         = 1 << 1,
665     ARM_HWCAP_A64_EVTSTRM       = 1 << 2,
666     ARM_HWCAP_A64_AES           = 1 << 3,
667     ARM_HWCAP_A64_PMULL         = 1 << 4,
668     ARM_HWCAP_A64_SHA1          = 1 << 5,
669     ARM_HWCAP_A64_SHA2          = 1 << 6,
670     ARM_HWCAP_A64_CRC32         = 1 << 7,
671     ARM_HWCAP_A64_ATOMICS       = 1 << 8,
672     ARM_HWCAP_A64_FPHP          = 1 << 9,
673     ARM_HWCAP_A64_ASIMDHP       = 1 << 10,
674     ARM_HWCAP_A64_CPUID         = 1 << 11,
675     ARM_HWCAP_A64_ASIMDRDM      = 1 << 12,
676     ARM_HWCAP_A64_JSCVT         = 1 << 13,
677     ARM_HWCAP_A64_FCMA          = 1 << 14,
678     ARM_HWCAP_A64_LRCPC         = 1 << 15,
679     ARM_HWCAP_A64_DCPOP         = 1 << 16,
680     ARM_HWCAP_A64_SHA3          = 1 << 17,
681     ARM_HWCAP_A64_SM3           = 1 << 18,
682     ARM_HWCAP_A64_SM4           = 1 << 19,
683     ARM_HWCAP_A64_ASIMDDP       = 1 << 20,
684     ARM_HWCAP_A64_SHA512        = 1 << 21,
685     ARM_HWCAP_A64_SVE           = 1 << 22,
686     ARM_HWCAP_A64_ASIMDFHM      = 1 << 23,
687     ARM_HWCAP_A64_DIT           = 1 << 24,
688     ARM_HWCAP_A64_USCAT         = 1 << 25,
689     ARM_HWCAP_A64_ILRCPC        = 1 << 26,
690     ARM_HWCAP_A64_FLAGM         = 1 << 27,
691     ARM_HWCAP_A64_SSBS          = 1 << 28,
692     ARM_HWCAP_A64_SB            = 1 << 29,
693     ARM_HWCAP_A64_PACA          = 1 << 30,
694     ARM_HWCAP_A64_PACG          = 1UL << 31,
695 
696     ARM_HWCAP2_A64_DCPODP       = 1 << 0,
697     ARM_HWCAP2_A64_SVE2         = 1 << 1,
698     ARM_HWCAP2_A64_SVEAES       = 1 << 2,
699     ARM_HWCAP2_A64_SVEPMULL     = 1 << 3,
700     ARM_HWCAP2_A64_SVEBITPERM   = 1 << 4,
701     ARM_HWCAP2_A64_SVESHA3      = 1 << 5,
702     ARM_HWCAP2_A64_SVESM4       = 1 << 6,
703     ARM_HWCAP2_A64_FLAGM2       = 1 << 7,
704     ARM_HWCAP2_A64_FRINT        = 1 << 8,
705     ARM_HWCAP2_A64_SVEI8MM      = 1 << 9,
706     ARM_HWCAP2_A64_SVEF32MM     = 1 << 10,
707     ARM_HWCAP2_A64_SVEF64MM     = 1 << 11,
708     ARM_HWCAP2_A64_SVEBF16      = 1 << 12,
709     ARM_HWCAP2_A64_I8MM         = 1 << 13,
710     ARM_HWCAP2_A64_BF16         = 1 << 14,
711     ARM_HWCAP2_A64_DGH          = 1 << 15,
712     ARM_HWCAP2_A64_RNG          = 1 << 16,
713     ARM_HWCAP2_A64_BTI          = 1 << 17,
714     ARM_HWCAP2_A64_MTE          = 1 << 18,
715     ARM_HWCAP2_A64_ECV          = 1 << 19,
716     ARM_HWCAP2_A64_AFP          = 1 << 20,
717     ARM_HWCAP2_A64_RPRES        = 1 << 21,
718     ARM_HWCAP2_A64_MTE3         = 1 << 22,
719     ARM_HWCAP2_A64_SME          = 1 << 23,
720     ARM_HWCAP2_A64_SME_I16I64   = 1 << 24,
721     ARM_HWCAP2_A64_SME_F64F64   = 1 << 25,
722     ARM_HWCAP2_A64_SME_I8I32    = 1 << 26,
723     ARM_HWCAP2_A64_SME_F16F32   = 1 << 27,
724     ARM_HWCAP2_A64_SME_B16F32   = 1 << 28,
725     ARM_HWCAP2_A64_SME_F32F32   = 1 << 29,
726     ARM_HWCAP2_A64_SME_FA64     = 1 << 30,
727     ARM_HWCAP2_A64_WFXT         = 1ULL << 31,
728     ARM_HWCAP2_A64_EBF16        = 1ULL << 32,
729     ARM_HWCAP2_A64_SVE_EBF16    = 1ULL << 33,
730     ARM_HWCAP2_A64_CSSC         = 1ULL << 34,
731     ARM_HWCAP2_A64_RPRFM        = 1ULL << 35,
732     ARM_HWCAP2_A64_SVE2P1       = 1ULL << 36,
733     ARM_HWCAP2_A64_SME2         = 1ULL << 37,
734     ARM_HWCAP2_A64_SME2P1       = 1ULL << 38,
735     ARM_HWCAP2_A64_SME_I16I32   = 1ULL << 39,
736     ARM_HWCAP2_A64_SME_BI32I32  = 1ULL << 40,
737     ARM_HWCAP2_A64_SME_B16B16   = 1ULL << 41,
738     ARM_HWCAP2_A64_SME_F16F16   = 1ULL << 42,
739     ARM_HWCAP2_A64_MOPS         = 1ULL << 43,
740     ARM_HWCAP2_A64_HBC          = 1ULL << 44,
741 };
742 
743 #define ELF_HWCAP   get_elf_hwcap()
744 #define ELF_HWCAP2  get_elf_hwcap2()
745 
746 #define GET_FEATURE_ID(feat, hwcap) \
747     do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
748 
749 uint32_t get_elf_hwcap(void)
750 {
751     ARMCPU *cpu = ARM_CPU(thread_cpu);
752     uint32_t hwcaps = 0;
753 
754     hwcaps |= ARM_HWCAP_A64_FP;
755     hwcaps |= ARM_HWCAP_A64_ASIMD;
756     hwcaps |= ARM_HWCAP_A64_CPUID;
757 
758     /* probe for the extra features */
759 
760     GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES);
761     GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL);
762     GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1);
763     GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2);
764     GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512);
765     GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32);
766     GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3);
767     GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3);
768     GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4);
769     GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
770     GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS);
771     GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM);
772     GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP);
773     GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA);
774     GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE);
775     GET_FEATURE_ID(aa64_pauth, ARM_HWCAP_A64_PACA | ARM_HWCAP_A64_PACG);
776     GET_FEATURE_ID(aa64_fhm, ARM_HWCAP_A64_ASIMDFHM);
777     GET_FEATURE_ID(aa64_jscvt, ARM_HWCAP_A64_JSCVT);
778     GET_FEATURE_ID(aa64_sb, ARM_HWCAP_A64_SB);
779     GET_FEATURE_ID(aa64_condm_4, ARM_HWCAP_A64_FLAGM);
780     GET_FEATURE_ID(aa64_dcpop, ARM_HWCAP_A64_DCPOP);
781     GET_FEATURE_ID(aa64_rcpc_8_3, ARM_HWCAP_A64_LRCPC);
782     GET_FEATURE_ID(aa64_rcpc_8_4, ARM_HWCAP_A64_ILRCPC);
783 
784     return hwcaps;
785 }
786 
787 uint32_t get_elf_hwcap2(void)
788 {
789     ARMCPU *cpu = ARM_CPU(thread_cpu);
790     uint32_t hwcaps = 0;
791 
792     GET_FEATURE_ID(aa64_dcpodp, ARM_HWCAP2_A64_DCPODP);
793     GET_FEATURE_ID(aa64_sve2, ARM_HWCAP2_A64_SVE2);
794     GET_FEATURE_ID(aa64_sve2_aes, ARM_HWCAP2_A64_SVEAES);
795     GET_FEATURE_ID(aa64_sve2_pmull128, ARM_HWCAP2_A64_SVEPMULL);
796     GET_FEATURE_ID(aa64_sve2_bitperm, ARM_HWCAP2_A64_SVEBITPERM);
797     GET_FEATURE_ID(aa64_sve2_sha3, ARM_HWCAP2_A64_SVESHA3);
798     GET_FEATURE_ID(aa64_sve2_sm4, ARM_HWCAP2_A64_SVESM4);
799     GET_FEATURE_ID(aa64_condm_5, ARM_HWCAP2_A64_FLAGM2);
800     GET_FEATURE_ID(aa64_frint, ARM_HWCAP2_A64_FRINT);
801     GET_FEATURE_ID(aa64_sve_i8mm, ARM_HWCAP2_A64_SVEI8MM);
802     GET_FEATURE_ID(aa64_sve_f32mm, ARM_HWCAP2_A64_SVEF32MM);
803     GET_FEATURE_ID(aa64_sve_f64mm, ARM_HWCAP2_A64_SVEF64MM);
804     GET_FEATURE_ID(aa64_sve_bf16, ARM_HWCAP2_A64_SVEBF16);
805     GET_FEATURE_ID(aa64_i8mm, ARM_HWCAP2_A64_I8MM);
806     GET_FEATURE_ID(aa64_bf16, ARM_HWCAP2_A64_BF16);
807     GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG);
808     GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI);
809     GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE);
810     GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME |
811                               ARM_HWCAP2_A64_SME_F32F32 |
812                               ARM_HWCAP2_A64_SME_B16F32 |
813                               ARM_HWCAP2_A64_SME_F16F32 |
814                               ARM_HWCAP2_A64_SME_I8I32));
815     GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64);
816     GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64);
817     GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64);
818     GET_FEATURE_ID(aa64_hbc, ARM_HWCAP2_A64_HBC);
819     GET_FEATURE_ID(aa64_mops, ARM_HWCAP2_A64_MOPS);
820 
821     return hwcaps;
822 }
823 
824 const char *elf_hwcap_str(uint32_t bit)
825 {
826     static const char *hwcap_str[] = {
827     [__builtin_ctz(ARM_HWCAP_A64_FP      )] = "fp",
828     [__builtin_ctz(ARM_HWCAP_A64_ASIMD   )] = "asimd",
829     [__builtin_ctz(ARM_HWCAP_A64_EVTSTRM )] = "evtstrm",
830     [__builtin_ctz(ARM_HWCAP_A64_AES     )] = "aes",
831     [__builtin_ctz(ARM_HWCAP_A64_PMULL   )] = "pmull",
832     [__builtin_ctz(ARM_HWCAP_A64_SHA1    )] = "sha1",
833     [__builtin_ctz(ARM_HWCAP_A64_SHA2    )] = "sha2",
834     [__builtin_ctz(ARM_HWCAP_A64_CRC32   )] = "crc32",
835     [__builtin_ctz(ARM_HWCAP_A64_ATOMICS )] = "atomics",
836     [__builtin_ctz(ARM_HWCAP_A64_FPHP    )] = "fphp",
837     [__builtin_ctz(ARM_HWCAP_A64_ASIMDHP )] = "asimdhp",
838     [__builtin_ctz(ARM_HWCAP_A64_CPUID   )] = "cpuid",
839     [__builtin_ctz(ARM_HWCAP_A64_ASIMDRDM)] = "asimdrdm",
840     [__builtin_ctz(ARM_HWCAP_A64_JSCVT   )] = "jscvt",
841     [__builtin_ctz(ARM_HWCAP_A64_FCMA    )] = "fcma",
842     [__builtin_ctz(ARM_HWCAP_A64_LRCPC   )] = "lrcpc",
843     [__builtin_ctz(ARM_HWCAP_A64_DCPOP   )] = "dcpop",
844     [__builtin_ctz(ARM_HWCAP_A64_SHA3    )] = "sha3",
845     [__builtin_ctz(ARM_HWCAP_A64_SM3     )] = "sm3",
846     [__builtin_ctz(ARM_HWCAP_A64_SM4     )] = "sm4",
847     [__builtin_ctz(ARM_HWCAP_A64_ASIMDDP )] = "asimddp",
848     [__builtin_ctz(ARM_HWCAP_A64_SHA512  )] = "sha512",
849     [__builtin_ctz(ARM_HWCAP_A64_SVE     )] = "sve",
850     [__builtin_ctz(ARM_HWCAP_A64_ASIMDFHM)] = "asimdfhm",
851     [__builtin_ctz(ARM_HWCAP_A64_DIT     )] = "dit",
852     [__builtin_ctz(ARM_HWCAP_A64_USCAT   )] = "uscat",
853     [__builtin_ctz(ARM_HWCAP_A64_ILRCPC  )] = "ilrcpc",
854     [__builtin_ctz(ARM_HWCAP_A64_FLAGM   )] = "flagm",
855     [__builtin_ctz(ARM_HWCAP_A64_SSBS    )] = "ssbs",
856     [__builtin_ctz(ARM_HWCAP_A64_SB      )] = "sb",
857     [__builtin_ctz(ARM_HWCAP_A64_PACA    )] = "paca",
858     [__builtin_ctz(ARM_HWCAP_A64_PACG    )] = "pacg",
859     };
860 
861     return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
862 }
863 
864 const char *elf_hwcap2_str(uint32_t bit)
865 {
866     static const char *hwcap_str[] = {
867     [__builtin_ctz(ARM_HWCAP2_A64_DCPODP       )] = "dcpodp",
868     [__builtin_ctz(ARM_HWCAP2_A64_SVE2         )] = "sve2",
869     [__builtin_ctz(ARM_HWCAP2_A64_SVEAES       )] = "sveaes",
870     [__builtin_ctz(ARM_HWCAP2_A64_SVEPMULL     )] = "svepmull",
871     [__builtin_ctz(ARM_HWCAP2_A64_SVEBITPERM   )] = "svebitperm",
872     [__builtin_ctz(ARM_HWCAP2_A64_SVESHA3      )] = "svesha3",
873     [__builtin_ctz(ARM_HWCAP2_A64_SVESM4       )] = "svesm4",
874     [__builtin_ctz(ARM_HWCAP2_A64_FLAGM2       )] = "flagm2",
875     [__builtin_ctz(ARM_HWCAP2_A64_FRINT        )] = "frint",
876     [__builtin_ctz(ARM_HWCAP2_A64_SVEI8MM      )] = "svei8mm",
877     [__builtin_ctz(ARM_HWCAP2_A64_SVEF32MM     )] = "svef32mm",
878     [__builtin_ctz(ARM_HWCAP2_A64_SVEF64MM     )] = "svef64mm",
879     [__builtin_ctz(ARM_HWCAP2_A64_SVEBF16      )] = "svebf16",
880     [__builtin_ctz(ARM_HWCAP2_A64_I8MM         )] = "i8mm",
881     [__builtin_ctz(ARM_HWCAP2_A64_BF16         )] = "bf16",
882     [__builtin_ctz(ARM_HWCAP2_A64_DGH          )] = "dgh",
883     [__builtin_ctz(ARM_HWCAP2_A64_RNG          )] = "rng",
884     [__builtin_ctz(ARM_HWCAP2_A64_BTI          )] = "bti",
885     [__builtin_ctz(ARM_HWCAP2_A64_MTE          )] = "mte",
886     [__builtin_ctz(ARM_HWCAP2_A64_ECV          )] = "ecv",
887     [__builtin_ctz(ARM_HWCAP2_A64_AFP          )] = "afp",
888     [__builtin_ctz(ARM_HWCAP2_A64_RPRES        )] = "rpres",
889     [__builtin_ctz(ARM_HWCAP2_A64_MTE3         )] = "mte3",
890     [__builtin_ctz(ARM_HWCAP2_A64_SME          )] = "sme",
891     [__builtin_ctz(ARM_HWCAP2_A64_SME_I16I64   )] = "smei16i64",
892     [__builtin_ctz(ARM_HWCAP2_A64_SME_F64F64   )] = "smef64f64",
893     [__builtin_ctz(ARM_HWCAP2_A64_SME_I8I32    )] = "smei8i32",
894     [__builtin_ctz(ARM_HWCAP2_A64_SME_F16F32   )] = "smef16f32",
895     [__builtin_ctz(ARM_HWCAP2_A64_SME_B16F32   )] = "smeb16f32",
896     [__builtin_ctz(ARM_HWCAP2_A64_SME_F32F32   )] = "smef32f32",
897     [__builtin_ctz(ARM_HWCAP2_A64_SME_FA64     )] = "smefa64",
898     [__builtin_ctz(ARM_HWCAP2_A64_WFXT         )] = "wfxt",
899     [__builtin_ctzll(ARM_HWCAP2_A64_EBF16      )] = "ebf16",
900     [__builtin_ctzll(ARM_HWCAP2_A64_SVE_EBF16  )] = "sveebf16",
901     [__builtin_ctzll(ARM_HWCAP2_A64_CSSC       )] = "cssc",
902     [__builtin_ctzll(ARM_HWCAP2_A64_RPRFM      )] = "rprfm",
903     [__builtin_ctzll(ARM_HWCAP2_A64_SVE2P1     )] = "sve2p1",
904     [__builtin_ctzll(ARM_HWCAP2_A64_SME2       )] = "sme2",
905     [__builtin_ctzll(ARM_HWCAP2_A64_SME2P1     )] = "sme2p1",
906     [__builtin_ctzll(ARM_HWCAP2_A64_SME_I16I32 )] = "smei16i32",
907     [__builtin_ctzll(ARM_HWCAP2_A64_SME_BI32I32)] = "smebi32i32",
908     [__builtin_ctzll(ARM_HWCAP2_A64_SME_B16B16 )] = "smeb16b16",
909     [__builtin_ctzll(ARM_HWCAP2_A64_SME_F16F16 )] = "smef16f16",
910     [__builtin_ctzll(ARM_HWCAP2_A64_MOPS       )] = "mops",
911     [__builtin_ctzll(ARM_HWCAP2_A64_HBC        )] = "hbc",
912     };
913 
914     return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
915 }
916 
917 #undef GET_FEATURE_ID
918 
919 #endif /* not TARGET_AARCH64 */
920 #endif /* TARGET_ARM */
921 
922 #ifdef TARGET_SPARC
923 #ifdef TARGET_SPARC64
924 
925 #define ELF_HWCAP  (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
926                     | HWCAP_SPARC_MULDIV | HWCAP_SPARC_V9)
927 #ifndef TARGET_ABI32
928 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
929 #else
930 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
931 #endif
932 
933 #define ELF_CLASS   ELFCLASS64
934 #define ELF_ARCH    EM_SPARCV9
935 #else
936 #define ELF_HWCAP  (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
937                     | HWCAP_SPARC_MULDIV)
938 #define ELF_CLASS   ELFCLASS32
939 #define ELF_ARCH    EM_SPARC
940 #endif /* TARGET_SPARC64 */
941 
942 static inline void init_thread(struct target_pt_regs *regs,
943                                struct image_info *infop)
944 {
945     /* Note that target_cpu_copy_regs does not read psr/tstate. */
946     regs->pc = infop->entry;
947     regs->npc = regs->pc + 4;
948     regs->y = 0;
949     regs->u_regs[14] = (infop->start_stack - 16 * sizeof(abi_ulong)
950                         - TARGET_STACK_BIAS);
951 }
952 #endif /* TARGET_SPARC */
953 
954 #ifdef TARGET_PPC
955 
956 #define ELF_MACHINE    PPC_ELF_MACHINE
957 
958 #if defined(TARGET_PPC64)
959 
960 #define elf_check_arch(x) ( (x) == EM_PPC64 )
961 
962 #define ELF_CLASS       ELFCLASS64
963 
964 #else
965 
966 #define ELF_CLASS       ELFCLASS32
967 #define EXSTACK_DEFAULT true
968 
969 #endif
970 
971 #define ELF_ARCH        EM_PPC
972 
973 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
974    See arch/powerpc/include/asm/cputable.h.  */
975 enum {
976     QEMU_PPC_FEATURE_32 = 0x80000000,
977     QEMU_PPC_FEATURE_64 = 0x40000000,
978     QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
979     QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
980     QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
981     QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
982     QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
983     QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
984     QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
985     QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
986     QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
987     QEMU_PPC_FEATURE_NO_TB = 0x00100000,
988     QEMU_PPC_FEATURE_POWER4 = 0x00080000,
989     QEMU_PPC_FEATURE_POWER5 = 0x00040000,
990     QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
991     QEMU_PPC_FEATURE_CELL = 0x00010000,
992     QEMU_PPC_FEATURE_BOOKE = 0x00008000,
993     QEMU_PPC_FEATURE_SMT = 0x00004000,
994     QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
995     QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
996     QEMU_PPC_FEATURE_PA6T = 0x00000800,
997     QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
998     QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
999     QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
1000     QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
1001     QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
1002 
1003     QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
1004     QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
1005 
1006     /* Feature definitions in AT_HWCAP2.  */
1007     QEMU_PPC_FEATURE2_ARCH_2_07 = 0x80000000, /* ISA 2.07 */
1008     QEMU_PPC_FEATURE2_HAS_HTM = 0x40000000, /* Hardware Transactional Memory */
1009     QEMU_PPC_FEATURE2_HAS_DSCR = 0x20000000, /* Data Stream Control Register */
1010     QEMU_PPC_FEATURE2_HAS_EBB = 0x10000000, /* Event Base Branching */
1011     QEMU_PPC_FEATURE2_HAS_ISEL = 0x08000000, /* Integer Select */
1012     QEMU_PPC_FEATURE2_HAS_TAR = 0x04000000, /* Target Address Register */
1013     QEMU_PPC_FEATURE2_VEC_CRYPTO = 0x02000000,
1014     QEMU_PPC_FEATURE2_HTM_NOSC = 0x01000000,
1015     QEMU_PPC_FEATURE2_ARCH_3_00 = 0x00800000, /* ISA 3.00 */
1016     QEMU_PPC_FEATURE2_HAS_IEEE128 = 0x00400000, /* VSX IEEE Bin Float 128-bit */
1017     QEMU_PPC_FEATURE2_DARN = 0x00200000, /* darn random number insn */
1018     QEMU_PPC_FEATURE2_SCV = 0x00100000, /* scv syscall */
1019     QEMU_PPC_FEATURE2_HTM_NO_SUSPEND = 0x00080000, /* TM w/o suspended state */
1020     QEMU_PPC_FEATURE2_ARCH_3_1 = 0x00040000, /* ISA 3.1 */
1021     QEMU_PPC_FEATURE2_MMA = 0x00020000, /* Matrix-Multiply Assist */
1022 };
1023 
1024 #define ELF_HWCAP get_elf_hwcap()
1025 
1026 static uint32_t get_elf_hwcap(void)
1027 {
1028     PowerPCCPU *cpu = POWERPC_CPU(thread_cpu);
1029     uint32_t features = 0;
1030 
1031     /* We don't have to be terribly complete here; the high points are
1032        Altivec/FP/SPE support.  Anything else is just a bonus.  */
1033 #define GET_FEATURE(flag, feature)                                      \
1034     do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0)
1035 #define GET_FEATURE2(flags, feature) \
1036     do { \
1037         if ((cpu->env.insns_flags2 & flags) == flags) { \
1038             features |= feature; \
1039         } \
1040     } while (0)
1041     GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
1042     GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
1043     GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
1044     GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
1045     GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
1046     GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
1047     GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
1048     GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
1049     GET_FEATURE2(PPC2_DFP, QEMU_PPC_FEATURE_HAS_DFP);
1050     GET_FEATURE2(PPC2_VSX, QEMU_PPC_FEATURE_HAS_VSX);
1051     GET_FEATURE2((PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 |
1052                   PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206),
1053                   QEMU_PPC_FEATURE_ARCH_2_06);
1054 #undef GET_FEATURE
1055 #undef GET_FEATURE2
1056 
1057     return features;
1058 }
1059 
1060 #define ELF_HWCAP2 get_elf_hwcap2()
1061 
1062 static uint32_t get_elf_hwcap2(void)
1063 {
1064     PowerPCCPU *cpu = POWERPC_CPU(thread_cpu);
1065     uint32_t features = 0;
1066 
1067 #define GET_FEATURE(flag, feature)                                      \
1068     do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0)
1069 #define GET_FEATURE2(flag, feature)                                      \
1070     do { if (cpu->env.insns_flags2 & flag) { features |= feature; } } while (0)
1071 
1072     GET_FEATURE(PPC_ISEL, QEMU_PPC_FEATURE2_HAS_ISEL);
1073     GET_FEATURE2(PPC2_BCTAR_ISA207, QEMU_PPC_FEATURE2_HAS_TAR);
1074     GET_FEATURE2((PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
1075                   PPC2_ISA207S), QEMU_PPC_FEATURE2_ARCH_2_07 |
1076                   QEMU_PPC_FEATURE2_VEC_CRYPTO);
1077     GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00 |
1078                  QEMU_PPC_FEATURE2_DARN | QEMU_PPC_FEATURE2_HAS_IEEE128);
1079     GET_FEATURE2(PPC2_ISA310, QEMU_PPC_FEATURE2_ARCH_3_1 |
1080                  QEMU_PPC_FEATURE2_MMA);
1081 
1082 #undef GET_FEATURE
1083 #undef GET_FEATURE2
1084 
1085     return features;
1086 }
1087 
1088 /*
1089  * The requirements here are:
1090  * - keep the final alignment of sp (sp & 0xf)
1091  * - make sure the 32-bit value at the first 16 byte aligned position of
1092  *   AUXV is greater than 16 for glibc compatibility.
1093  *   AT_IGNOREPPC is used for that.
1094  * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
1095  *   even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
1096  */
1097 #define DLINFO_ARCH_ITEMS       5
1098 #define ARCH_DLINFO                                     \
1099     do {                                                \
1100         PowerPCCPU *cpu = POWERPC_CPU(thread_cpu);              \
1101         /*                                              \
1102          * Handle glibc compatibility: these magic entries must \
1103          * be at the lowest addresses in the final auxv.        \
1104          */                                             \
1105         NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);        \
1106         NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);        \
1107         NEW_AUX_ENT(AT_DCACHEBSIZE, cpu->env.dcache_line_size); \
1108         NEW_AUX_ENT(AT_ICACHEBSIZE, cpu->env.icache_line_size); \
1109         NEW_AUX_ENT(AT_UCACHEBSIZE, 0);                 \
1110     } while (0)
1111 
1112 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
1113 {
1114     _regs->gpr[1] = infop->start_stack;
1115 #if defined(TARGET_PPC64)
1116     if (get_ppc64_abi(infop) < 2) {
1117         uint64_t val;
1118         get_user_u64(val, infop->entry + 8);
1119         _regs->gpr[2] = val + infop->load_bias;
1120         get_user_u64(val, infop->entry);
1121         infop->entry = val + infop->load_bias;
1122     } else {
1123         _regs->gpr[12] = infop->entry;  /* r12 set to global entry address */
1124     }
1125 #endif
1126     _regs->nip = infop->entry;
1127 }
1128 
1129 /* See linux kernel: arch/powerpc/include/asm/elf.h.  */
1130 #define ELF_NREG 48
1131 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1132 
1133 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *env)
1134 {
1135     int i;
1136     target_ulong ccr = 0;
1137 
1138     for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
1139         (*regs)[i] = tswapreg(env->gpr[i]);
1140     }
1141 
1142     (*regs)[32] = tswapreg(env->nip);
1143     (*regs)[33] = tswapreg(env->msr);
1144     (*regs)[35] = tswapreg(env->ctr);
1145     (*regs)[36] = tswapreg(env->lr);
1146     (*regs)[37] = tswapreg(cpu_read_xer(env));
1147 
1148     ccr = ppc_get_cr(env);
1149     (*regs)[38] = tswapreg(ccr);
1150 }
1151 
1152 #define USE_ELF_CORE_DUMP
1153 #define ELF_EXEC_PAGESIZE       4096
1154 
1155 #endif
1156 
1157 #ifdef TARGET_LOONGARCH64
1158 
1159 #define ELF_CLASS   ELFCLASS64
1160 #define ELF_ARCH    EM_LOONGARCH
1161 #define EXSTACK_DEFAULT true
1162 
1163 #define elf_check_arch(x) ((x) == EM_LOONGARCH)
1164 
1165 static inline void init_thread(struct target_pt_regs *regs,
1166                                struct image_info *infop)
1167 {
1168     /*Set crmd PG,DA = 1,0 */
1169     regs->csr.crmd = 2 << 3;
1170     regs->csr.era = infop->entry;
1171     regs->regs[3] = infop->start_stack;
1172 }
1173 
1174 /* See linux kernel: arch/loongarch/include/asm/elf.h */
1175 #define ELF_NREG 45
1176 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1177 
1178 enum {
1179     TARGET_EF_R0 = 0,
1180     TARGET_EF_CSR_ERA = TARGET_EF_R0 + 33,
1181     TARGET_EF_CSR_BADV = TARGET_EF_R0 + 34,
1182 };
1183 
1184 static void elf_core_copy_regs(target_elf_gregset_t *regs,
1185                                const CPULoongArchState *env)
1186 {
1187     int i;
1188 
1189     (*regs)[TARGET_EF_R0] = 0;
1190 
1191     for (i = 1; i < ARRAY_SIZE(env->gpr); i++) {
1192         (*regs)[TARGET_EF_R0 + i] = tswapreg(env->gpr[i]);
1193     }
1194 
1195     (*regs)[TARGET_EF_CSR_ERA] = tswapreg(env->pc);
1196     (*regs)[TARGET_EF_CSR_BADV] = tswapreg(env->CSR_BADV);
1197 }
1198 
1199 #define USE_ELF_CORE_DUMP
1200 #define ELF_EXEC_PAGESIZE        4096
1201 
1202 #define ELF_HWCAP get_elf_hwcap()
1203 
1204 /* See arch/loongarch/include/uapi/asm/hwcap.h */
1205 enum {
1206     HWCAP_LOONGARCH_CPUCFG   = (1 << 0),
1207     HWCAP_LOONGARCH_LAM      = (1 << 1),
1208     HWCAP_LOONGARCH_UAL      = (1 << 2),
1209     HWCAP_LOONGARCH_FPU      = (1 << 3),
1210     HWCAP_LOONGARCH_LSX      = (1 << 4),
1211     HWCAP_LOONGARCH_LASX     = (1 << 5),
1212     HWCAP_LOONGARCH_CRC32    = (1 << 6),
1213     HWCAP_LOONGARCH_COMPLEX  = (1 << 7),
1214     HWCAP_LOONGARCH_CRYPTO   = (1 << 8),
1215     HWCAP_LOONGARCH_LVZ      = (1 << 9),
1216     HWCAP_LOONGARCH_LBT_X86  = (1 << 10),
1217     HWCAP_LOONGARCH_LBT_ARM  = (1 << 11),
1218     HWCAP_LOONGARCH_LBT_MIPS = (1 << 12),
1219 };
1220 
1221 static uint32_t get_elf_hwcap(void)
1222 {
1223     LoongArchCPU *cpu = LOONGARCH_CPU(thread_cpu);
1224     uint32_t hwcaps = 0;
1225 
1226     hwcaps |= HWCAP_LOONGARCH_CRC32;
1227 
1228     if (FIELD_EX32(cpu->env.cpucfg[1], CPUCFG1, UAL)) {
1229         hwcaps |= HWCAP_LOONGARCH_UAL;
1230     }
1231 
1232     if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, FP)) {
1233         hwcaps |= HWCAP_LOONGARCH_FPU;
1234     }
1235 
1236     if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LAM)) {
1237         hwcaps |= HWCAP_LOONGARCH_LAM;
1238     }
1239 
1240     if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) {
1241         hwcaps |= HWCAP_LOONGARCH_LSX;
1242     }
1243 
1244     if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) {
1245         hwcaps |= HWCAP_LOONGARCH_LASX;
1246     }
1247 
1248     return hwcaps;
1249 }
1250 
1251 #define ELF_PLATFORM "loongarch"
1252 
1253 #endif /* TARGET_LOONGARCH64 */
1254 
1255 #ifdef TARGET_MIPS
1256 
1257 #ifdef TARGET_MIPS64
1258 #define ELF_CLASS   ELFCLASS64
1259 #else
1260 #define ELF_CLASS   ELFCLASS32
1261 #endif
1262 #define ELF_ARCH    EM_MIPS
1263 #define EXSTACK_DEFAULT true
1264 
1265 #ifdef TARGET_ABI_MIPSN32
1266 #define elf_check_abi(x) ((x) & EF_MIPS_ABI2)
1267 #else
1268 #define elf_check_abi(x) (!((x) & EF_MIPS_ABI2))
1269 #endif
1270 
1271 #define ELF_BASE_PLATFORM get_elf_base_platform()
1272 
1273 #define MATCH_PLATFORM_INSN(_flags, _base_platform)      \
1274     do { if ((cpu->env.insn_flags & (_flags)) == _flags) \
1275     { return _base_platform; } } while (0)
1276 
1277 static const char *get_elf_base_platform(void)
1278 {
1279     MIPSCPU *cpu = MIPS_CPU(thread_cpu);
1280 
1281     /* 64 bit ISAs goes first */
1282     MATCH_PLATFORM_INSN(CPU_MIPS64R6, "mips64r6");
1283     MATCH_PLATFORM_INSN(CPU_MIPS64R5, "mips64r5");
1284     MATCH_PLATFORM_INSN(CPU_MIPS64R2, "mips64r2");
1285     MATCH_PLATFORM_INSN(CPU_MIPS64R1, "mips64");
1286     MATCH_PLATFORM_INSN(CPU_MIPS5, "mips5");
1287     MATCH_PLATFORM_INSN(CPU_MIPS4, "mips4");
1288     MATCH_PLATFORM_INSN(CPU_MIPS3, "mips3");
1289 
1290     /* 32 bit ISAs */
1291     MATCH_PLATFORM_INSN(CPU_MIPS32R6, "mips32r6");
1292     MATCH_PLATFORM_INSN(CPU_MIPS32R5, "mips32r5");
1293     MATCH_PLATFORM_INSN(CPU_MIPS32R2, "mips32r2");
1294     MATCH_PLATFORM_INSN(CPU_MIPS32R1, "mips32");
1295     MATCH_PLATFORM_INSN(CPU_MIPS2, "mips2");
1296 
1297     /* Fallback */
1298     return "mips";
1299 }
1300 #undef MATCH_PLATFORM_INSN
1301 
1302 static inline void init_thread(struct target_pt_regs *regs,
1303                                struct image_info *infop)
1304 {
1305     regs->cp0_status = 2 << CP0St_KSU;
1306     regs->cp0_epc = infop->entry;
1307     regs->regs[29] = infop->start_stack;
1308 }
1309 
1310 /* See linux kernel: arch/mips/include/asm/elf.h.  */
1311 #define ELF_NREG 45
1312 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1313 
1314 /* See linux kernel: arch/mips/include/asm/reg.h.  */
1315 enum {
1316 #ifdef TARGET_MIPS64
1317     TARGET_EF_R0 = 0,
1318 #else
1319     TARGET_EF_R0 = 6,
1320 #endif
1321     TARGET_EF_R26 = TARGET_EF_R0 + 26,
1322     TARGET_EF_R27 = TARGET_EF_R0 + 27,
1323     TARGET_EF_LO = TARGET_EF_R0 + 32,
1324     TARGET_EF_HI = TARGET_EF_R0 + 33,
1325     TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34,
1326     TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35,
1327     TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36,
1328     TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37
1329 };
1330 
1331 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
1332 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMIPSState *env)
1333 {
1334     int i;
1335 
1336     for (i = 0; i < TARGET_EF_R0; i++) {
1337         (*regs)[i] = 0;
1338     }
1339     (*regs)[TARGET_EF_R0] = 0;
1340 
1341     for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) {
1342         (*regs)[TARGET_EF_R0 + i] = tswapreg(env->active_tc.gpr[i]);
1343     }
1344 
1345     (*regs)[TARGET_EF_R26] = 0;
1346     (*regs)[TARGET_EF_R27] = 0;
1347     (*regs)[TARGET_EF_LO] = tswapreg(env->active_tc.LO[0]);
1348     (*regs)[TARGET_EF_HI] = tswapreg(env->active_tc.HI[0]);
1349     (*regs)[TARGET_EF_CP0_EPC] = tswapreg(env->active_tc.PC);
1350     (*regs)[TARGET_EF_CP0_BADVADDR] = tswapreg(env->CP0_BadVAddr);
1351     (*regs)[TARGET_EF_CP0_STATUS] = tswapreg(env->CP0_Status);
1352     (*regs)[TARGET_EF_CP0_CAUSE] = tswapreg(env->CP0_Cause);
1353 }
1354 
1355 #define USE_ELF_CORE_DUMP
1356 #define ELF_EXEC_PAGESIZE        4096
1357 
1358 /* See arch/mips/include/uapi/asm/hwcap.h.  */
1359 enum {
1360     HWCAP_MIPS_R6           = (1 << 0),
1361     HWCAP_MIPS_MSA          = (1 << 1),
1362     HWCAP_MIPS_CRC32        = (1 << 2),
1363     HWCAP_MIPS_MIPS16       = (1 << 3),
1364     HWCAP_MIPS_MDMX         = (1 << 4),
1365     HWCAP_MIPS_MIPS3D       = (1 << 5),
1366     HWCAP_MIPS_SMARTMIPS    = (1 << 6),
1367     HWCAP_MIPS_DSP          = (1 << 7),
1368     HWCAP_MIPS_DSP2         = (1 << 8),
1369     HWCAP_MIPS_DSP3         = (1 << 9),
1370     HWCAP_MIPS_MIPS16E2     = (1 << 10),
1371     HWCAP_LOONGSON_MMI      = (1 << 11),
1372     HWCAP_LOONGSON_EXT      = (1 << 12),
1373     HWCAP_LOONGSON_EXT2     = (1 << 13),
1374     HWCAP_LOONGSON_CPUCFG   = (1 << 14),
1375 };
1376 
1377 #define ELF_HWCAP get_elf_hwcap()
1378 
1379 #define GET_FEATURE_INSN(_flag, _hwcap) \
1380     do { if (cpu->env.insn_flags & (_flag)) { hwcaps |= _hwcap; } } while (0)
1381 
1382 #define GET_FEATURE_REG_SET(_reg, _mask, _hwcap) \
1383     do { if (cpu->env._reg & (_mask)) { hwcaps |= _hwcap; } } while (0)
1384 
1385 #define GET_FEATURE_REG_EQU(_reg, _start, _length, _val, _hwcap) \
1386     do { \
1387         if (extract32(cpu->env._reg, (_start), (_length)) == (_val)) { \
1388             hwcaps |= _hwcap; \
1389         } \
1390     } while (0)
1391 
1392 static uint32_t get_elf_hwcap(void)
1393 {
1394     MIPSCPU *cpu = MIPS_CPU(thread_cpu);
1395     uint32_t hwcaps = 0;
1396 
1397     GET_FEATURE_REG_EQU(CP0_Config0, CP0C0_AR, CP0C0_AR_LENGTH,
1398                         2, HWCAP_MIPS_R6);
1399     GET_FEATURE_REG_SET(CP0_Config3, 1 << CP0C3_MSAP, HWCAP_MIPS_MSA);
1400     GET_FEATURE_INSN(ASE_LMMI, HWCAP_LOONGSON_MMI);
1401     GET_FEATURE_INSN(ASE_LEXT, HWCAP_LOONGSON_EXT);
1402 
1403     return hwcaps;
1404 }
1405 
1406 #undef GET_FEATURE_REG_EQU
1407 #undef GET_FEATURE_REG_SET
1408 #undef GET_FEATURE_INSN
1409 
1410 #endif /* TARGET_MIPS */
1411 
1412 #ifdef TARGET_MICROBLAZE
1413 
1414 #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD)
1415 
1416 #define ELF_CLASS   ELFCLASS32
1417 #define ELF_ARCH    EM_MICROBLAZE
1418 
1419 static inline void init_thread(struct target_pt_regs *regs,
1420                                struct image_info *infop)
1421 {
1422     regs->pc = infop->entry;
1423     regs->r1 = infop->start_stack;
1424 
1425 }
1426 
1427 #define ELF_EXEC_PAGESIZE        4096
1428 
1429 #define USE_ELF_CORE_DUMP
1430 #define ELF_NREG 38
1431 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1432 
1433 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
1434 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env)
1435 {
1436     int i, pos = 0;
1437 
1438     for (i = 0; i < 32; i++) {
1439         (*regs)[pos++] = tswapreg(env->regs[i]);
1440     }
1441 
1442     (*regs)[pos++] = tswapreg(env->pc);
1443     (*regs)[pos++] = tswapreg(mb_cpu_read_msr(env));
1444     (*regs)[pos++] = 0;
1445     (*regs)[pos++] = tswapreg(env->ear);
1446     (*regs)[pos++] = 0;
1447     (*regs)[pos++] = tswapreg(env->esr);
1448 }
1449 
1450 #endif /* TARGET_MICROBLAZE */
1451 
1452 #ifdef TARGET_NIOS2
1453 
1454 #define elf_check_arch(x) ((x) == EM_ALTERA_NIOS2)
1455 
1456 #define ELF_CLASS   ELFCLASS32
1457 #define ELF_ARCH    EM_ALTERA_NIOS2
1458 
1459 static void init_thread(struct target_pt_regs *regs, struct image_info *infop)
1460 {
1461     regs->ea = infop->entry;
1462     regs->sp = infop->start_stack;
1463 }
1464 
1465 #define LO_COMMPAGE  TARGET_PAGE_SIZE
1466 
1467 static bool init_guest_commpage(void)
1468 {
1469     static const uint8_t kuser_page[4 + 2 * 64] = {
1470         /* __kuser_helper_version */
1471         [0x00] = 0x02, 0x00, 0x00, 0x00,
1472 
1473         /* __kuser_cmpxchg */
1474         [0x04] = 0x3a, 0x6c, 0x3b, 0x00,  /* trap 16 */
1475                  0x3a, 0x28, 0x00, 0xf8,  /* ret */
1476 
1477         /* __kuser_sigtramp */
1478         [0x44] = 0xc4, 0x22, 0x80, 0x00,  /* movi r2, __NR_rt_sigreturn */
1479                  0x3a, 0x68, 0x3b, 0x00,  /* trap 0 */
1480     };
1481 
1482     void *want = g2h_untagged(LO_COMMPAGE & -qemu_host_page_size);
1483     void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
1484                       MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
1485 
1486     if (addr == MAP_FAILED) {
1487         perror("Allocating guest commpage");
1488         exit(EXIT_FAILURE);
1489     }
1490     if (addr != want) {
1491         return false;
1492     }
1493 
1494     memcpy(addr, kuser_page, sizeof(kuser_page));
1495 
1496     if (mprotect(addr, qemu_host_page_size, PROT_READ)) {
1497         perror("Protecting guest commpage");
1498         exit(EXIT_FAILURE);
1499     }
1500 
1501     page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK,
1502                    PAGE_READ | PAGE_EXEC | PAGE_VALID);
1503     return true;
1504 }
1505 
1506 #define ELF_EXEC_PAGESIZE        4096
1507 
1508 #define USE_ELF_CORE_DUMP
1509 #define ELF_NREG 49
1510 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1511 
1512 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
1513 static void elf_core_copy_regs(target_elf_gregset_t *regs,
1514                                const CPUNios2State *env)
1515 {
1516     int i;
1517 
1518     (*regs)[0] = -1;
1519     for (i = 1; i < 8; i++)    /* r0-r7 */
1520         (*regs)[i] = tswapreg(env->regs[i + 7]);
1521 
1522     for (i = 8; i < 16; i++)   /* r8-r15 */
1523         (*regs)[i] = tswapreg(env->regs[i - 8]);
1524 
1525     for (i = 16; i < 24; i++)  /* r16-r23 */
1526         (*regs)[i] = tswapreg(env->regs[i + 7]);
1527     (*regs)[24] = -1;    /* R_ET */
1528     (*regs)[25] = -1;    /* R_BT */
1529     (*regs)[26] = tswapreg(env->regs[R_GP]);
1530     (*regs)[27] = tswapreg(env->regs[R_SP]);
1531     (*regs)[28] = tswapreg(env->regs[R_FP]);
1532     (*regs)[29] = tswapreg(env->regs[R_EA]);
1533     (*regs)[30] = -1;    /* R_SSTATUS */
1534     (*regs)[31] = tswapreg(env->regs[R_RA]);
1535 
1536     (*regs)[32] = tswapreg(env->pc);
1537 
1538     (*regs)[33] = -1; /* R_STATUS */
1539     (*regs)[34] = tswapreg(env->regs[CR_ESTATUS]);
1540 
1541     for (i = 35; i < 49; i++)    /* ... */
1542         (*regs)[i] = -1;
1543 }
1544 
1545 #endif /* TARGET_NIOS2 */
1546 
1547 #ifdef TARGET_OPENRISC
1548 
1549 #define ELF_ARCH EM_OPENRISC
1550 #define ELF_CLASS ELFCLASS32
1551 #define ELF_DATA  ELFDATA2MSB
1552 
1553 static inline void init_thread(struct target_pt_regs *regs,
1554                                struct image_info *infop)
1555 {
1556     regs->pc = infop->entry;
1557     regs->gpr[1] = infop->start_stack;
1558 }
1559 
1560 #define USE_ELF_CORE_DUMP
1561 #define ELF_EXEC_PAGESIZE 8192
1562 
1563 /* See linux kernel arch/openrisc/include/asm/elf.h.  */
1564 #define ELF_NREG 34 /* gprs and pc, sr */
1565 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1566 
1567 static void elf_core_copy_regs(target_elf_gregset_t *regs,
1568                                const CPUOpenRISCState *env)
1569 {
1570     int i;
1571 
1572     for (i = 0; i < 32; i++) {
1573         (*regs)[i] = tswapreg(cpu_get_gpr(env, i));
1574     }
1575     (*regs)[32] = tswapreg(env->pc);
1576     (*regs)[33] = tswapreg(cpu_get_sr(env));
1577 }
1578 #define ELF_HWCAP 0
1579 #define ELF_PLATFORM NULL
1580 
1581 #endif /* TARGET_OPENRISC */
1582 
1583 #ifdef TARGET_SH4
1584 
1585 #define ELF_CLASS ELFCLASS32
1586 #define ELF_ARCH  EM_SH
1587 
1588 static inline void init_thread(struct target_pt_regs *regs,
1589                                struct image_info *infop)
1590 {
1591     /* Check other registers XXXXX */
1592     regs->pc = infop->entry;
1593     regs->regs[15] = infop->start_stack;
1594 }
1595 
1596 /* See linux kernel: arch/sh/include/asm/elf.h.  */
1597 #define ELF_NREG 23
1598 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1599 
1600 /* See linux kernel: arch/sh/include/asm/ptrace.h.  */
1601 enum {
1602     TARGET_REG_PC = 16,
1603     TARGET_REG_PR = 17,
1604     TARGET_REG_SR = 18,
1605     TARGET_REG_GBR = 19,
1606     TARGET_REG_MACH = 20,
1607     TARGET_REG_MACL = 21,
1608     TARGET_REG_SYSCALL = 22
1609 };
1610 
1611 static inline void elf_core_copy_regs(target_elf_gregset_t *regs,
1612                                       const CPUSH4State *env)
1613 {
1614     int i;
1615 
1616     for (i = 0; i < 16; i++) {
1617         (*regs)[i] = tswapreg(env->gregs[i]);
1618     }
1619 
1620     (*regs)[TARGET_REG_PC] = tswapreg(env->pc);
1621     (*regs)[TARGET_REG_PR] = tswapreg(env->pr);
1622     (*regs)[TARGET_REG_SR] = tswapreg(env->sr);
1623     (*regs)[TARGET_REG_GBR] = tswapreg(env->gbr);
1624     (*regs)[TARGET_REG_MACH] = tswapreg(env->mach);
1625     (*regs)[TARGET_REG_MACL] = tswapreg(env->macl);
1626     (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */
1627 }
1628 
1629 #define USE_ELF_CORE_DUMP
1630 #define ELF_EXEC_PAGESIZE        4096
1631 
1632 enum {
1633     SH_CPU_HAS_FPU            = 0x0001, /* Hardware FPU support */
1634     SH_CPU_HAS_P2_FLUSH_BUG   = 0x0002, /* Need to flush the cache in P2 area */
1635     SH_CPU_HAS_MMU_PAGE_ASSOC = 0x0004, /* SH3: TLB way selection bit support */
1636     SH_CPU_HAS_DSP            = 0x0008, /* SH-DSP: DSP support */
1637     SH_CPU_HAS_PERF_COUNTER   = 0x0010, /* Hardware performance counters */
1638     SH_CPU_HAS_PTEA           = 0x0020, /* PTEA register */
1639     SH_CPU_HAS_LLSC           = 0x0040, /* movli.l/movco.l */
1640     SH_CPU_HAS_L2_CACHE       = 0x0080, /* Secondary cache / URAM */
1641     SH_CPU_HAS_OP32           = 0x0100, /* 32-bit instruction support */
1642     SH_CPU_HAS_PTEAEX         = 0x0200, /* PTE ASID Extension support */
1643 };
1644 
1645 #define ELF_HWCAP get_elf_hwcap()
1646 
1647 static uint32_t get_elf_hwcap(void)
1648 {
1649     SuperHCPU *cpu = SUPERH_CPU(thread_cpu);
1650     uint32_t hwcap = 0;
1651 
1652     hwcap |= SH_CPU_HAS_FPU;
1653 
1654     if (cpu->env.features & SH_FEATURE_SH4A) {
1655         hwcap |= SH_CPU_HAS_LLSC;
1656     }
1657 
1658     return hwcap;
1659 }
1660 
1661 #endif
1662 
1663 #ifdef TARGET_CRIS
1664 
1665 #define ELF_CLASS ELFCLASS32
1666 #define ELF_ARCH  EM_CRIS
1667 
1668 static inline void init_thread(struct target_pt_regs *regs,
1669                                struct image_info *infop)
1670 {
1671     regs->erp = infop->entry;
1672 }
1673 
1674 #define ELF_EXEC_PAGESIZE        8192
1675 
1676 #endif
1677 
1678 #ifdef TARGET_M68K
1679 
1680 #define ELF_CLASS       ELFCLASS32
1681 #define ELF_ARCH        EM_68K
1682 
1683 /* ??? Does this need to do anything?
1684    #define ELF_PLAT_INIT(_r) */
1685 
1686 static inline void init_thread(struct target_pt_regs *regs,
1687                                struct image_info *infop)
1688 {
1689     regs->usp = infop->start_stack;
1690     regs->sr = 0;
1691     regs->pc = infop->entry;
1692 }
1693 
1694 /* See linux kernel: arch/m68k/include/asm/elf.h.  */
1695 #define ELF_NREG 20
1696 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1697 
1698 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUM68KState *env)
1699 {
1700     (*regs)[0] = tswapreg(env->dregs[1]);
1701     (*regs)[1] = tswapreg(env->dregs[2]);
1702     (*regs)[2] = tswapreg(env->dregs[3]);
1703     (*regs)[3] = tswapreg(env->dregs[4]);
1704     (*regs)[4] = tswapreg(env->dregs[5]);
1705     (*regs)[5] = tswapreg(env->dregs[6]);
1706     (*regs)[6] = tswapreg(env->dregs[7]);
1707     (*regs)[7] = tswapreg(env->aregs[0]);
1708     (*regs)[8] = tswapreg(env->aregs[1]);
1709     (*regs)[9] = tswapreg(env->aregs[2]);
1710     (*regs)[10] = tswapreg(env->aregs[3]);
1711     (*regs)[11] = tswapreg(env->aregs[4]);
1712     (*regs)[12] = tswapreg(env->aregs[5]);
1713     (*regs)[13] = tswapreg(env->aregs[6]);
1714     (*regs)[14] = tswapreg(env->dregs[0]);
1715     (*regs)[15] = tswapreg(env->aregs[7]);
1716     (*regs)[16] = tswapreg(env->dregs[0]); /* FIXME: orig_d0 */
1717     (*regs)[17] = tswapreg(env->sr);
1718     (*regs)[18] = tswapreg(env->pc);
1719     (*regs)[19] = 0;  /* FIXME: regs->format | regs->vector */
1720 }
1721 
1722 #define USE_ELF_CORE_DUMP
1723 #define ELF_EXEC_PAGESIZE       8192
1724 
1725 #endif
1726 
1727 #ifdef TARGET_ALPHA
1728 
1729 #define ELF_CLASS      ELFCLASS64
1730 #define ELF_ARCH       EM_ALPHA
1731 
1732 static inline void init_thread(struct target_pt_regs *regs,
1733                                struct image_info *infop)
1734 {
1735     regs->pc = infop->entry;
1736     regs->ps = 8;
1737     regs->usp = infop->start_stack;
1738 }
1739 
1740 #define ELF_EXEC_PAGESIZE        8192
1741 
1742 #endif /* TARGET_ALPHA */
1743 
1744 #ifdef TARGET_S390X
1745 
1746 #define ELF_CLASS	ELFCLASS64
1747 #define ELF_DATA	ELFDATA2MSB
1748 #define ELF_ARCH	EM_S390
1749 
1750 #include "elf.h"
1751 
1752 #define ELF_HWCAP get_elf_hwcap()
1753 
1754 #define GET_FEATURE(_feat, _hwcap) \
1755     do { if (s390_has_feat(_feat)) { hwcap |= _hwcap; } } while (0)
1756 
1757 uint32_t get_elf_hwcap(void)
1758 {
1759     /*
1760      * Let's assume we always have esan3 and zarch.
1761      * 31-bit processes can use 64-bit registers (high gprs).
1762      */
1763     uint32_t hwcap = HWCAP_S390_ESAN3 | HWCAP_S390_ZARCH | HWCAP_S390_HIGH_GPRS;
1764 
1765     GET_FEATURE(S390_FEAT_STFLE, HWCAP_S390_STFLE);
1766     GET_FEATURE(S390_FEAT_MSA, HWCAP_S390_MSA);
1767     GET_FEATURE(S390_FEAT_LONG_DISPLACEMENT, HWCAP_S390_LDISP);
1768     GET_FEATURE(S390_FEAT_EXTENDED_IMMEDIATE, HWCAP_S390_EIMM);
1769     if (s390_has_feat(S390_FEAT_EXTENDED_TRANSLATION_3) &&
1770         s390_has_feat(S390_FEAT_ETF3_ENH)) {
1771         hwcap |= HWCAP_S390_ETF3EH;
1772     }
1773     GET_FEATURE(S390_FEAT_VECTOR, HWCAP_S390_VXRS);
1774     GET_FEATURE(S390_FEAT_VECTOR_ENH, HWCAP_S390_VXRS_EXT);
1775     GET_FEATURE(S390_FEAT_VECTOR_ENH2, HWCAP_S390_VXRS_EXT2);
1776 
1777     return hwcap;
1778 }
1779 
1780 const char *elf_hwcap_str(uint32_t bit)
1781 {
1782     static const char *hwcap_str[] = {
1783         [HWCAP_S390_NR_ESAN3]     = "esan3",
1784         [HWCAP_S390_NR_ZARCH]     = "zarch",
1785         [HWCAP_S390_NR_STFLE]     = "stfle",
1786         [HWCAP_S390_NR_MSA]       = "msa",
1787         [HWCAP_S390_NR_LDISP]     = "ldisp",
1788         [HWCAP_S390_NR_EIMM]      = "eimm",
1789         [HWCAP_S390_NR_DFP]       = "dfp",
1790         [HWCAP_S390_NR_HPAGE]     = "edat",
1791         [HWCAP_S390_NR_ETF3EH]    = "etf3eh",
1792         [HWCAP_S390_NR_HIGH_GPRS] = "highgprs",
1793         [HWCAP_S390_NR_TE]        = "te",
1794         [HWCAP_S390_NR_VXRS]      = "vx",
1795         [HWCAP_S390_NR_VXRS_BCD]  = "vxd",
1796         [HWCAP_S390_NR_VXRS_EXT]  = "vxe",
1797         [HWCAP_S390_NR_GS]        = "gs",
1798         [HWCAP_S390_NR_VXRS_EXT2] = "vxe2",
1799         [HWCAP_S390_NR_VXRS_PDE]  = "vxp",
1800         [HWCAP_S390_NR_SORT]      = "sort",
1801         [HWCAP_S390_NR_DFLT]      = "dflt",
1802         [HWCAP_S390_NR_NNPA]      = "nnpa",
1803         [HWCAP_S390_NR_PCI_MIO]   = "pcimio",
1804         [HWCAP_S390_NR_SIE]       = "sie",
1805     };
1806 
1807     return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
1808 }
1809 
1810 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
1811 {
1812     regs->psw.addr = infop->entry;
1813     regs->psw.mask = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \
1814                      PSW_MASK_MCHECK | PSW_MASK_PSTATE | PSW_MASK_64 | \
1815                      PSW_MASK_32;
1816     regs->gprs[15] = infop->start_stack;
1817 }
1818 
1819 /* See linux kernel: arch/s390/include/uapi/asm/ptrace.h (s390_regs).  */
1820 #define ELF_NREG 27
1821 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1822 
1823 enum {
1824     TARGET_REG_PSWM = 0,
1825     TARGET_REG_PSWA = 1,
1826     TARGET_REG_GPRS = 2,
1827     TARGET_REG_ARS = 18,
1828     TARGET_REG_ORIG_R2 = 26,
1829 };
1830 
1831 static void elf_core_copy_regs(target_elf_gregset_t *regs,
1832                                const CPUS390XState *env)
1833 {
1834     int i;
1835     uint32_t *aregs;
1836 
1837     (*regs)[TARGET_REG_PSWM] = tswapreg(env->psw.mask);
1838     (*regs)[TARGET_REG_PSWA] = tswapreg(env->psw.addr);
1839     for (i = 0; i < 16; i++) {
1840         (*regs)[TARGET_REG_GPRS + i] = tswapreg(env->regs[i]);
1841     }
1842     aregs = (uint32_t *)&((*regs)[TARGET_REG_ARS]);
1843     for (i = 0; i < 16; i++) {
1844         aregs[i] = tswap32(env->aregs[i]);
1845     }
1846     (*regs)[TARGET_REG_ORIG_R2] = 0;
1847 }
1848 
1849 #define USE_ELF_CORE_DUMP
1850 #define ELF_EXEC_PAGESIZE 4096
1851 
1852 #endif /* TARGET_S390X */
1853 
1854 #ifdef TARGET_RISCV
1855 
1856 #define ELF_ARCH  EM_RISCV
1857 
1858 #ifdef TARGET_RISCV32
1859 #define ELF_CLASS ELFCLASS32
1860 #else
1861 #define ELF_CLASS ELFCLASS64
1862 #endif
1863 
1864 #define ELF_HWCAP get_elf_hwcap()
1865 
1866 static uint32_t get_elf_hwcap(void)
1867 {
1868 #define MISA_BIT(EXT) (1 << (EXT - 'A'))
1869     RISCVCPU *cpu = RISCV_CPU(thread_cpu);
1870     uint32_t mask = MISA_BIT('I') | MISA_BIT('M') | MISA_BIT('A')
1871                     | MISA_BIT('F') | MISA_BIT('D') | MISA_BIT('C')
1872                     | MISA_BIT('V');
1873 
1874     return cpu->env.misa_ext & mask;
1875 #undef MISA_BIT
1876 }
1877 
1878 static inline void init_thread(struct target_pt_regs *regs,
1879                                struct image_info *infop)
1880 {
1881     regs->sepc = infop->entry;
1882     regs->sp = infop->start_stack;
1883 }
1884 
1885 #define ELF_EXEC_PAGESIZE 4096
1886 
1887 #endif /* TARGET_RISCV */
1888 
1889 #ifdef TARGET_HPPA
1890 
1891 #define ELF_CLASS       ELFCLASS32
1892 #define ELF_ARCH        EM_PARISC
1893 #define ELF_PLATFORM    "PARISC"
1894 #define STACK_GROWS_DOWN 0
1895 #define STACK_ALIGNMENT  64
1896 
1897 static inline void init_thread(struct target_pt_regs *regs,
1898                                struct image_info *infop)
1899 {
1900     regs->iaoq[0] = infop->entry;
1901     regs->iaoq[1] = infop->entry + 4;
1902     regs->gr[23] = 0;
1903     regs->gr[24] = infop->argv;
1904     regs->gr[25] = infop->argc;
1905     /* The top-of-stack contains a linkage buffer.  */
1906     regs->gr[30] = infop->start_stack + 64;
1907     regs->gr[31] = infop->entry;
1908 }
1909 
1910 #define LO_COMMPAGE  0
1911 
1912 static bool init_guest_commpage(void)
1913 {
1914     void *want = g2h_untagged(LO_COMMPAGE);
1915     void *addr = mmap(want, qemu_host_page_size, PROT_NONE,
1916                       MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
1917 
1918     if (addr == MAP_FAILED) {
1919         perror("Allocating guest commpage");
1920         exit(EXIT_FAILURE);
1921     }
1922     if (addr != want) {
1923         return false;
1924     }
1925 
1926     /*
1927      * On Linux, page zero is normally marked execute only + gateway.
1928      * Normal read or write is supposed to fail (thus PROT_NONE above),
1929      * but specific offsets have kernel code mapped to raise permissions
1930      * and implement syscalls.  Here, simply mark the page executable.
1931      * Special case the entry points during translation (see do_page_zero).
1932      */
1933     page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK,
1934                    PAGE_EXEC | PAGE_VALID);
1935     return true;
1936 }
1937 
1938 #endif /* TARGET_HPPA */
1939 
1940 #ifdef TARGET_XTENSA
1941 
1942 #define ELF_CLASS       ELFCLASS32
1943 #define ELF_ARCH        EM_XTENSA
1944 
1945 static inline void init_thread(struct target_pt_regs *regs,
1946                                struct image_info *infop)
1947 {
1948     regs->windowbase = 0;
1949     regs->windowstart = 1;
1950     regs->areg[1] = infop->start_stack;
1951     regs->pc = infop->entry;
1952     if (info_is_fdpic(infop)) {
1953         regs->areg[4] = infop->loadmap_addr;
1954         regs->areg[5] = infop->interpreter_loadmap_addr;
1955         if (infop->interpreter_loadmap_addr) {
1956             regs->areg[6] = infop->interpreter_pt_dynamic_addr;
1957         } else {
1958             regs->areg[6] = infop->pt_dynamic_addr;
1959         }
1960     }
1961 }
1962 
1963 /* See linux kernel: arch/xtensa/include/asm/elf.h.  */
1964 #define ELF_NREG 128
1965 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1966 
1967 enum {
1968     TARGET_REG_PC,
1969     TARGET_REG_PS,
1970     TARGET_REG_LBEG,
1971     TARGET_REG_LEND,
1972     TARGET_REG_LCOUNT,
1973     TARGET_REG_SAR,
1974     TARGET_REG_WINDOWSTART,
1975     TARGET_REG_WINDOWBASE,
1976     TARGET_REG_THREADPTR,
1977     TARGET_REG_AR0 = 64,
1978 };
1979 
1980 static void elf_core_copy_regs(target_elf_gregset_t *regs,
1981                                const CPUXtensaState *env)
1982 {
1983     unsigned i;
1984 
1985     (*regs)[TARGET_REG_PC] = tswapreg(env->pc);
1986     (*regs)[TARGET_REG_PS] = tswapreg(env->sregs[PS] & ~PS_EXCM);
1987     (*regs)[TARGET_REG_LBEG] = tswapreg(env->sregs[LBEG]);
1988     (*regs)[TARGET_REG_LEND] = tswapreg(env->sregs[LEND]);
1989     (*regs)[TARGET_REG_LCOUNT] = tswapreg(env->sregs[LCOUNT]);
1990     (*regs)[TARGET_REG_SAR] = tswapreg(env->sregs[SAR]);
1991     (*regs)[TARGET_REG_WINDOWSTART] = tswapreg(env->sregs[WINDOW_START]);
1992     (*regs)[TARGET_REG_WINDOWBASE] = tswapreg(env->sregs[WINDOW_BASE]);
1993     (*regs)[TARGET_REG_THREADPTR] = tswapreg(env->uregs[THREADPTR]);
1994     xtensa_sync_phys_from_window((CPUXtensaState *)env);
1995     for (i = 0; i < env->config->nareg; ++i) {
1996         (*regs)[TARGET_REG_AR0 + i] = tswapreg(env->phys_regs[i]);
1997     }
1998 }
1999 
2000 #define USE_ELF_CORE_DUMP
2001 #define ELF_EXEC_PAGESIZE       4096
2002 
2003 #endif /* TARGET_XTENSA */
2004 
2005 #ifdef TARGET_HEXAGON
2006 
2007 #define ELF_CLASS       ELFCLASS32
2008 #define ELF_ARCH        EM_HEXAGON
2009 
2010 static inline void init_thread(struct target_pt_regs *regs,
2011                                struct image_info *infop)
2012 {
2013     regs->sepc = infop->entry;
2014     regs->sp = infop->start_stack;
2015 }
2016 
2017 #endif /* TARGET_HEXAGON */
2018 
2019 #ifndef ELF_BASE_PLATFORM
2020 #define ELF_BASE_PLATFORM (NULL)
2021 #endif
2022 
2023 #ifndef ELF_PLATFORM
2024 #define ELF_PLATFORM (NULL)
2025 #endif
2026 
2027 #ifndef ELF_MACHINE
2028 #define ELF_MACHINE ELF_ARCH
2029 #endif
2030 
2031 #ifndef elf_check_arch
2032 #define elf_check_arch(x) ((x) == ELF_ARCH)
2033 #endif
2034 
2035 #ifndef elf_check_abi
2036 #define elf_check_abi(x) (1)
2037 #endif
2038 
2039 #ifndef ELF_HWCAP
2040 #define ELF_HWCAP 0
2041 #endif
2042 
2043 #ifndef STACK_GROWS_DOWN
2044 #define STACK_GROWS_DOWN 1
2045 #endif
2046 
2047 #ifndef STACK_ALIGNMENT
2048 #define STACK_ALIGNMENT 16
2049 #endif
2050 
2051 #ifdef TARGET_ABI32
2052 #undef ELF_CLASS
2053 #define ELF_CLASS ELFCLASS32
2054 #undef bswaptls
2055 #define bswaptls(ptr) bswap32s(ptr)
2056 #endif
2057 
2058 #ifndef EXSTACK_DEFAULT
2059 #define EXSTACK_DEFAULT false
2060 #endif
2061 
2062 #include "elf.h"
2063 
2064 /* We must delay the following stanzas until after "elf.h". */
2065 #if defined(TARGET_AARCH64)
2066 
2067 static bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz,
2068                                     const uint32_t *data,
2069                                     struct image_info *info,
2070                                     Error **errp)
2071 {
2072     if (pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) {
2073         if (pr_datasz != sizeof(uint32_t)) {
2074             error_setg(errp, "Ill-formed GNU_PROPERTY_AARCH64_FEATURE_1_AND");
2075             return false;
2076         }
2077         /* We will extract GNU_PROPERTY_AARCH64_FEATURE_1_BTI later. */
2078         info->note_flags = *data;
2079     }
2080     return true;
2081 }
2082 #define ARCH_USE_GNU_PROPERTY 1
2083 
2084 #else
2085 
2086 static bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz,
2087                                     const uint32_t *data,
2088                                     struct image_info *info,
2089                                     Error **errp)
2090 {
2091     g_assert_not_reached();
2092 }
2093 #define ARCH_USE_GNU_PROPERTY 0
2094 
2095 #endif
2096 
2097 struct exec
2098 {
2099     unsigned int a_info;   /* Use macros N_MAGIC, etc for access */
2100     unsigned int a_text;   /* length of text, in bytes */
2101     unsigned int a_data;   /* length of data, in bytes */
2102     unsigned int a_bss;    /* length of uninitialized data area, in bytes */
2103     unsigned int a_syms;   /* length of symbol table data in file, in bytes */
2104     unsigned int a_entry;  /* start address */
2105     unsigned int a_trsize; /* length of relocation info for text, in bytes */
2106     unsigned int a_drsize; /* length of relocation info for data, in bytes */
2107 };
2108 
2109 
2110 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
2111 #define OMAGIC 0407
2112 #define NMAGIC 0410
2113 #define ZMAGIC 0413
2114 #define QMAGIC 0314
2115 
2116 #define DLINFO_ITEMS 16
2117 
2118 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
2119 {
2120     memcpy(to, from, n);
2121 }
2122 
2123 #ifdef BSWAP_NEEDED
2124 static void bswap_ehdr(struct elfhdr *ehdr)
2125 {
2126     bswap16s(&ehdr->e_type);            /* Object file type */
2127     bswap16s(&ehdr->e_machine);         /* Architecture */
2128     bswap32s(&ehdr->e_version);         /* Object file version */
2129     bswaptls(&ehdr->e_entry);           /* Entry point virtual address */
2130     bswaptls(&ehdr->e_phoff);           /* Program header table file offset */
2131     bswaptls(&ehdr->e_shoff);           /* Section header table file offset */
2132     bswap32s(&ehdr->e_flags);           /* Processor-specific flags */
2133     bswap16s(&ehdr->e_ehsize);          /* ELF header size in bytes */
2134     bswap16s(&ehdr->e_phentsize);       /* Program header table entry size */
2135     bswap16s(&ehdr->e_phnum);           /* Program header table entry count */
2136     bswap16s(&ehdr->e_shentsize);       /* Section header table entry size */
2137     bswap16s(&ehdr->e_shnum);           /* Section header table entry count */
2138     bswap16s(&ehdr->e_shstrndx);        /* Section header string table index */
2139 }
2140 
2141 static void bswap_phdr(struct elf_phdr *phdr, int phnum)
2142 {
2143     int i;
2144     for (i = 0; i < phnum; ++i, ++phdr) {
2145         bswap32s(&phdr->p_type);        /* Segment type */
2146         bswap32s(&phdr->p_flags);       /* Segment flags */
2147         bswaptls(&phdr->p_offset);      /* Segment file offset */
2148         bswaptls(&phdr->p_vaddr);       /* Segment virtual address */
2149         bswaptls(&phdr->p_paddr);       /* Segment physical address */
2150         bswaptls(&phdr->p_filesz);      /* Segment size in file */
2151         bswaptls(&phdr->p_memsz);       /* Segment size in memory */
2152         bswaptls(&phdr->p_align);       /* Segment alignment */
2153     }
2154 }
2155 
2156 static void bswap_shdr(struct elf_shdr *shdr, int shnum)
2157 {
2158     int i;
2159     for (i = 0; i < shnum; ++i, ++shdr) {
2160         bswap32s(&shdr->sh_name);
2161         bswap32s(&shdr->sh_type);
2162         bswaptls(&shdr->sh_flags);
2163         bswaptls(&shdr->sh_addr);
2164         bswaptls(&shdr->sh_offset);
2165         bswaptls(&shdr->sh_size);
2166         bswap32s(&shdr->sh_link);
2167         bswap32s(&shdr->sh_info);
2168         bswaptls(&shdr->sh_addralign);
2169         bswaptls(&shdr->sh_entsize);
2170     }
2171 }
2172 
2173 static void bswap_sym(struct elf_sym *sym)
2174 {
2175     bswap32s(&sym->st_name);
2176     bswaptls(&sym->st_value);
2177     bswaptls(&sym->st_size);
2178     bswap16s(&sym->st_shndx);
2179 }
2180 
2181 #ifdef TARGET_MIPS
2182 static void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags)
2183 {
2184     bswap16s(&abiflags->version);
2185     bswap32s(&abiflags->ases);
2186     bswap32s(&abiflags->isa_ext);
2187     bswap32s(&abiflags->flags1);
2188     bswap32s(&abiflags->flags2);
2189 }
2190 #endif
2191 #else
2192 static inline void bswap_ehdr(struct elfhdr *ehdr) { }
2193 static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { }
2194 static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { }
2195 static inline void bswap_sym(struct elf_sym *sym) { }
2196 #ifdef TARGET_MIPS
2197 static inline void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) { }
2198 #endif
2199 #endif
2200 
2201 #ifdef USE_ELF_CORE_DUMP
2202 static int elf_core_dump(int, const CPUArchState *);
2203 #endif /* USE_ELF_CORE_DUMP */
2204 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias);
2205 
2206 /* Verify the portions of EHDR within E_IDENT for the target.
2207    This can be performed before bswapping the entire header.  */
2208 static bool elf_check_ident(struct elfhdr *ehdr)
2209 {
2210     return (ehdr->e_ident[EI_MAG0] == ELFMAG0
2211             && ehdr->e_ident[EI_MAG1] == ELFMAG1
2212             && ehdr->e_ident[EI_MAG2] == ELFMAG2
2213             && ehdr->e_ident[EI_MAG3] == ELFMAG3
2214             && ehdr->e_ident[EI_CLASS] == ELF_CLASS
2215             && ehdr->e_ident[EI_DATA] == ELF_DATA
2216             && ehdr->e_ident[EI_VERSION] == EV_CURRENT);
2217 }
2218 
2219 /* Verify the portions of EHDR outside of E_IDENT for the target.
2220    This has to wait until after bswapping the header.  */
2221 static bool elf_check_ehdr(struct elfhdr *ehdr)
2222 {
2223     return (elf_check_arch(ehdr->e_machine)
2224             && elf_check_abi(ehdr->e_flags)
2225             && ehdr->e_ehsize == sizeof(struct elfhdr)
2226             && ehdr->e_phentsize == sizeof(struct elf_phdr)
2227             && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN));
2228 }
2229 
2230 /*
2231  * 'copy_elf_strings()' copies argument/envelope strings from user
2232  * memory to free pages in kernel mem. These are in a format ready
2233  * to be put directly into the top of new user memory.
2234  *
2235  */
2236 static abi_ulong copy_elf_strings(int argc, char **argv, char *scratch,
2237                                   abi_ulong p, abi_ulong stack_limit)
2238 {
2239     char *tmp;
2240     int len, i;
2241     abi_ulong top = p;
2242 
2243     if (!p) {
2244         return 0;       /* bullet-proofing */
2245     }
2246 
2247     if (STACK_GROWS_DOWN) {
2248         int offset = ((p - 1) % TARGET_PAGE_SIZE) + 1;
2249         for (i = argc - 1; i >= 0; --i) {
2250             tmp = argv[i];
2251             if (!tmp) {
2252                 fprintf(stderr, "VFS: argc is wrong");
2253                 exit(-1);
2254             }
2255             len = strlen(tmp) + 1;
2256             tmp += len;
2257 
2258             if (len > (p - stack_limit)) {
2259                 return 0;
2260             }
2261             while (len) {
2262                 int bytes_to_copy = (len > offset) ? offset : len;
2263                 tmp -= bytes_to_copy;
2264                 p -= bytes_to_copy;
2265                 offset -= bytes_to_copy;
2266                 len -= bytes_to_copy;
2267 
2268                 memcpy_fromfs(scratch + offset, tmp, bytes_to_copy);
2269 
2270                 if (offset == 0) {
2271                     memcpy_to_target(p, scratch, top - p);
2272                     top = p;
2273                     offset = TARGET_PAGE_SIZE;
2274                 }
2275             }
2276         }
2277         if (p != top) {
2278             memcpy_to_target(p, scratch + offset, top - p);
2279         }
2280     } else {
2281         int remaining = TARGET_PAGE_SIZE - (p % TARGET_PAGE_SIZE);
2282         for (i = 0; i < argc; ++i) {
2283             tmp = argv[i];
2284             if (!tmp) {
2285                 fprintf(stderr, "VFS: argc is wrong");
2286                 exit(-1);
2287             }
2288             len = strlen(tmp) + 1;
2289             if (len > (stack_limit - p)) {
2290                 return 0;
2291             }
2292             while (len) {
2293                 int bytes_to_copy = (len > remaining) ? remaining : len;
2294 
2295                 memcpy_fromfs(scratch + (p - top), tmp, bytes_to_copy);
2296 
2297                 tmp += bytes_to_copy;
2298                 remaining -= bytes_to_copy;
2299                 p += bytes_to_copy;
2300                 len -= bytes_to_copy;
2301 
2302                 if (remaining == 0) {
2303                     memcpy_to_target(top, scratch, p - top);
2304                     top = p;
2305                     remaining = TARGET_PAGE_SIZE;
2306                 }
2307             }
2308         }
2309         if (p != top) {
2310             memcpy_to_target(top, scratch, p - top);
2311         }
2312     }
2313 
2314     return p;
2315 }
2316 
2317 /* Older linux kernels provide up to MAX_ARG_PAGES (default: 32) of
2318  * argument/environment space. Newer kernels (>2.6.33) allow more,
2319  * dependent on stack size, but guarantee at least 32 pages for
2320  * backwards compatibility.
2321  */
2322 #define STACK_LOWER_LIMIT (32 * TARGET_PAGE_SIZE)
2323 
2324 static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
2325                                  struct image_info *info)
2326 {
2327     abi_ulong size, error, guard;
2328     int prot;
2329 
2330     size = guest_stack_size;
2331     if (size < STACK_LOWER_LIMIT) {
2332         size = STACK_LOWER_LIMIT;
2333     }
2334 
2335     if (STACK_GROWS_DOWN) {
2336         guard = TARGET_PAGE_SIZE;
2337         if (guard < qemu_real_host_page_size()) {
2338             guard = qemu_real_host_page_size();
2339         }
2340     } else {
2341         /* no guard page for hppa target where stack grows upwards. */
2342         guard = 0;
2343     }
2344 
2345     prot = PROT_READ | PROT_WRITE;
2346     if (info->exec_stack) {
2347         prot |= PROT_EXEC;
2348     }
2349     error = target_mmap(0, size + guard, prot,
2350                         MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
2351     if (error == -1) {
2352         perror("mmap stack");
2353         exit(-1);
2354     }
2355 
2356     /* We reserve one extra page at the top of the stack as guard.  */
2357     if (STACK_GROWS_DOWN) {
2358         target_mprotect(error, guard, PROT_NONE);
2359         info->stack_limit = error + guard;
2360         return info->stack_limit + size - sizeof(void *);
2361     } else {
2362         info->stack_limit = error + size;
2363         return error;
2364     }
2365 }
2366 
2367 /**
2368  * zero_bss:
2369  *
2370  * Map and zero the bss.  We need to explicitly zero any fractional pages
2371  * after the data section (i.e. bss).  Return false on mapping failure.
2372  */
2373 static bool zero_bss(abi_ulong start_bss, abi_ulong end_bss,
2374                      int prot, Error **errp)
2375 {
2376     abi_ulong align_bss;
2377 
2378     /* We only expect writable bss; the code segment shouldn't need this. */
2379     if (!(prot & PROT_WRITE)) {
2380         error_setg(errp, "PT_LOAD with non-writable bss");
2381         return false;
2382     }
2383 
2384     align_bss = TARGET_PAGE_ALIGN(start_bss);
2385     end_bss = TARGET_PAGE_ALIGN(end_bss);
2386 
2387     if (start_bss < align_bss) {
2388         int flags = page_get_flags(start_bss);
2389 
2390         if (!(flags & PAGE_BITS)) {
2391             /*
2392              * The whole address space of the executable was reserved
2393              * at the start, therefore all pages will be VALID.
2394              * But assuming there are no PROT_NONE PT_LOAD segments,
2395              * a PROT_NONE page means no data all bss, and we can
2396              * simply extend the new anon mapping back to the start
2397              * of the page of bss.
2398              */
2399             align_bss -= TARGET_PAGE_SIZE;
2400         } else {
2401             /*
2402              * The start of the bss shares a page with something.
2403              * The only thing that we expect is the data section,
2404              * which would already be marked writable.
2405              * Overlapping the RX code segment seems malformed.
2406              */
2407             if (!(flags & PAGE_WRITE)) {
2408                 error_setg(errp, "PT_LOAD with bss overlapping "
2409                            "non-writable page");
2410                 return false;
2411             }
2412 
2413             /* The page is already mapped and writable. */
2414             memset(g2h_untagged(start_bss), 0, align_bss - start_bss);
2415         }
2416     }
2417 
2418     if (align_bss < end_bss &&
2419         target_mmap(align_bss, end_bss - align_bss, prot,
2420                     MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) {
2421         error_setg_errno(errp, errno, "Error mapping bss");
2422         return false;
2423     }
2424     return true;
2425 }
2426 
2427 #if defined(TARGET_ARM)
2428 static int elf_is_fdpic(struct elfhdr *exec)
2429 {
2430     return exec->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC;
2431 }
2432 #elif defined(TARGET_XTENSA)
2433 static int elf_is_fdpic(struct elfhdr *exec)
2434 {
2435     return exec->e_ident[EI_OSABI] == ELFOSABI_XTENSA_FDPIC;
2436 }
2437 #else
2438 /* Default implementation, always false.  */
2439 static int elf_is_fdpic(struct elfhdr *exec)
2440 {
2441     return 0;
2442 }
2443 #endif
2444 
2445 static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp)
2446 {
2447     uint16_t n;
2448     struct elf32_fdpic_loadseg *loadsegs = info->loadsegs;
2449 
2450     /* elf32_fdpic_loadseg */
2451     n = info->nsegs;
2452     while (n--) {
2453         sp -= 12;
2454         put_user_u32(loadsegs[n].addr, sp+0);
2455         put_user_u32(loadsegs[n].p_vaddr, sp+4);
2456         put_user_u32(loadsegs[n].p_memsz, sp+8);
2457     }
2458 
2459     /* elf32_fdpic_loadmap */
2460     sp -= 4;
2461     put_user_u16(0, sp+0); /* version */
2462     put_user_u16(info->nsegs, sp+2); /* nsegs */
2463 
2464     info->personality = PER_LINUX_FDPIC;
2465     info->loadmap_addr = sp;
2466 
2467     return sp;
2468 }
2469 
2470 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
2471                                    struct elfhdr *exec,
2472                                    struct image_info *info,
2473                                    struct image_info *interp_info)
2474 {
2475     abi_ulong sp;
2476     abi_ulong u_argc, u_argv, u_envp, u_auxv;
2477     int size;
2478     int i;
2479     abi_ulong u_rand_bytes;
2480     uint8_t k_rand_bytes[16];
2481     abi_ulong u_platform, u_base_platform;
2482     const char *k_platform, *k_base_platform;
2483     const int n = sizeof(elf_addr_t);
2484 
2485     sp = p;
2486 
2487     /* Needs to be before we load the env/argc/... */
2488     if (elf_is_fdpic(exec)) {
2489         /* Need 4 byte alignment for these structs */
2490         sp &= ~3;
2491         sp = loader_build_fdpic_loadmap(info, sp);
2492         info->other_info = interp_info;
2493         if (interp_info) {
2494             interp_info->other_info = info;
2495             sp = loader_build_fdpic_loadmap(interp_info, sp);
2496             info->interpreter_loadmap_addr = interp_info->loadmap_addr;
2497             info->interpreter_pt_dynamic_addr = interp_info->pt_dynamic_addr;
2498         } else {
2499             info->interpreter_loadmap_addr = 0;
2500             info->interpreter_pt_dynamic_addr = 0;
2501         }
2502     }
2503 
2504     u_base_platform = 0;
2505     k_base_platform = ELF_BASE_PLATFORM;
2506     if (k_base_platform) {
2507         size_t len = strlen(k_base_platform) + 1;
2508         if (STACK_GROWS_DOWN) {
2509             sp -= (len + n - 1) & ~(n - 1);
2510             u_base_platform = sp;
2511             /* FIXME - check return value of memcpy_to_target() for failure */
2512             memcpy_to_target(sp, k_base_platform, len);
2513         } else {
2514             memcpy_to_target(sp, k_base_platform, len);
2515             u_base_platform = sp;
2516             sp += len + 1;
2517         }
2518     }
2519 
2520     u_platform = 0;
2521     k_platform = ELF_PLATFORM;
2522     if (k_platform) {
2523         size_t len = strlen(k_platform) + 1;
2524         if (STACK_GROWS_DOWN) {
2525             sp -= (len + n - 1) & ~(n - 1);
2526             u_platform = sp;
2527             /* FIXME - check return value of memcpy_to_target() for failure */
2528             memcpy_to_target(sp, k_platform, len);
2529         } else {
2530             memcpy_to_target(sp, k_platform, len);
2531             u_platform = sp;
2532             sp += len + 1;
2533         }
2534     }
2535 
2536     /* Provide 16 byte alignment for the PRNG, and basic alignment for
2537      * the argv and envp pointers.
2538      */
2539     if (STACK_GROWS_DOWN) {
2540         sp = QEMU_ALIGN_DOWN(sp, 16);
2541     } else {
2542         sp = QEMU_ALIGN_UP(sp, 16);
2543     }
2544 
2545     /*
2546      * Generate 16 random bytes for userspace PRNG seeding.
2547      */
2548     qemu_guest_getrandom_nofail(k_rand_bytes, sizeof(k_rand_bytes));
2549     if (STACK_GROWS_DOWN) {
2550         sp -= 16;
2551         u_rand_bytes = sp;
2552         /* FIXME - check return value of memcpy_to_target() for failure */
2553         memcpy_to_target(sp, k_rand_bytes, 16);
2554     } else {
2555         memcpy_to_target(sp, k_rand_bytes, 16);
2556         u_rand_bytes = sp;
2557         sp += 16;
2558     }
2559 
2560     size = (DLINFO_ITEMS + 1) * 2;
2561     if (k_base_platform)
2562         size += 2;
2563     if (k_platform)
2564         size += 2;
2565 #ifdef DLINFO_ARCH_ITEMS
2566     size += DLINFO_ARCH_ITEMS * 2;
2567 #endif
2568 #ifdef ELF_HWCAP2
2569     size += 2;
2570 #endif
2571     info->auxv_len = size * n;
2572 
2573     size += envc + argc + 2;
2574     size += 1;  /* argc itself */
2575     size *= n;
2576 
2577     /* Allocate space and finalize stack alignment for entry now.  */
2578     if (STACK_GROWS_DOWN) {
2579         u_argc = QEMU_ALIGN_DOWN(sp - size, STACK_ALIGNMENT);
2580         sp = u_argc;
2581     } else {
2582         u_argc = sp;
2583         sp = QEMU_ALIGN_UP(sp + size, STACK_ALIGNMENT);
2584     }
2585 
2586     u_argv = u_argc + n;
2587     u_envp = u_argv + (argc + 1) * n;
2588     u_auxv = u_envp + (envc + 1) * n;
2589     info->saved_auxv = u_auxv;
2590     info->argc = argc;
2591     info->envc = envc;
2592     info->argv = u_argv;
2593     info->envp = u_envp;
2594 
2595     /* This is correct because Linux defines
2596      * elf_addr_t as Elf32_Off / Elf64_Off
2597      */
2598 #define NEW_AUX_ENT(id, val) do {               \
2599         put_user_ual(id, u_auxv);  u_auxv += n; \
2600         put_user_ual(val, u_auxv); u_auxv += n; \
2601     } while(0)
2602 
2603 #ifdef ARCH_DLINFO
2604     /*
2605      * ARCH_DLINFO must come first so platform specific code can enforce
2606      * special alignment requirements on the AUXV if necessary (eg. PPC).
2607      */
2608     ARCH_DLINFO;
2609 #endif
2610     /* There must be exactly DLINFO_ITEMS entries here, or the assert
2611      * on info->auxv_len will trigger.
2612      */
2613     NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff));
2614     NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
2615     NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
2616     if ((info->alignment & ~qemu_host_page_mask) != 0) {
2617         /* Target doesn't support host page size alignment */
2618         NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
2619     } else {
2620         NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(MAX(TARGET_PAGE_SIZE,
2621                                                qemu_host_page_size)));
2622     }
2623     NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0));
2624     NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
2625     NEW_AUX_ENT(AT_ENTRY, info->entry);
2626     NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
2627     NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
2628     NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
2629     NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
2630     NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
2631     NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
2632     NEW_AUX_ENT(AT_RANDOM, (abi_ulong) u_rand_bytes);
2633     NEW_AUX_ENT(AT_SECURE, (abi_ulong) qemu_getauxval(AT_SECURE));
2634     NEW_AUX_ENT(AT_EXECFN, info->file_string);
2635 
2636 #ifdef ELF_HWCAP2
2637     NEW_AUX_ENT(AT_HWCAP2, (abi_ulong) ELF_HWCAP2);
2638 #endif
2639 
2640     if (u_base_platform) {
2641         NEW_AUX_ENT(AT_BASE_PLATFORM, u_base_platform);
2642     }
2643     if (u_platform) {
2644         NEW_AUX_ENT(AT_PLATFORM, u_platform);
2645     }
2646     NEW_AUX_ENT (AT_NULL, 0);
2647 #undef NEW_AUX_ENT
2648 
2649     /* Check that our initial calculation of the auxv length matches how much
2650      * we actually put into it.
2651      */
2652     assert(info->auxv_len == u_auxv - info->saved_auxv);
2653 
2654     put_user_ual(argc, u_argc);
2655 
2656     p = info->arg_strings;
2657     for (i = 0; i < argc; ++i) {
2658         put_user_ual(p, u_argv);
2659         u_argv += n;
2660         p += target_strlen(p) + 1;
2661     }
2662     put_user_ual(0, u_argv);
2663 
2664     p = info->env_strings;
2665     for (i = 0; i < envc; ++i) {
2666         put_user_ual(p, u_envp);
2667         u_envp += n;
2668         p += target_strlen(p) + 1;
2669     }
2670     put_user_ual(0, u_envp);
2671 
2672     return sp;
2673 }
2674 
2675 #if defined(HI_COMMPAGE)
2676 #define LO_COMMPAGE -1
2677 #elif defined(LO_COMMPAGE)
2678 #define HI_COMMPAGE 0
2679 #else
2680 #define HI_COMMPAGE 0
2681 #define LO_COMMPAGE -1
2682 #ifndef INIT_GUEST_COMMPAGE
2683 #define init_guest_commpage() true
2684 #endif
2685 #endif
2686 
2687 /**
2688  * pgb_try_mmap:
2689  * @addr: host start address
2690  * @addr_last: host last address
2691  * @keep: do not unmap the probe region
2692  *
2693  * Return 1 if [@addr, @addr_last] is not mapped in the host,
2694  * return 0 if it is not available to map, and -1 on mmap error.
2695  * If @keep, the region is left mapped on success, otherwise unmapped.
2696  */
2697 static int pgb_try_mmap(uintptr_t addr, uintptr_t addr_last, bool keep)
2698 {
2699     size_t size = addr_last - addr + 1;
2700     void *p = mmap((void *)addr, size, PROT_NONE,
2701                    MAP_ANONYMOUS | MAP_PRIVATE |
2702                    MAP_NORESERVE | MAP_FIXED_NOREPLACE, -1, 0);
2703     int ret;
2704 
2705     if (p == MAP_FAILED) {
2706         return errno == EEXIST ? 0 : -1;
2707     }
2708     ret = p == (void *)addr;
2709     if (!keep || !ret) {
2710         munmap(p, size);
2711     }
2712     return ret;
2713 }
2714 
2715 /**
2716  * pgb_try_mmap_skip_brk(uintptr_t addr, uintptr_t size, uintptr_t brk)
2717  * @addr: host address
2718  * @addr_last: host last address
2719  * @brk: host brk
2720  *
2721  * Like pgb_try_mmap, but additionally reserve some memory following brk.
2722  */
2723 static int pgb_try_mmap_skip_brk(uintptr_t addr, uintptr_t addr_last,
2724                                  uintptr_t brk, bool keep)
2725 {
2726     uintptr_t brk_last = brk + 16 * MiB - 1;
2727 
2728     /* Do not map anything close to the host brk. */
2729     if (addr <= brk_last && brk <= addr_last) {
2730         return 0;
2731     }
2732     return pgb_try_mmap(addr, addr_last, keep);
2733 }
2734 
2735 /**
2736  * pgb_try_mmap_set:
2737  * @ga: set of guest addrs
2738  * @base: guest_base
2739  * @brk: host brk
2740  *
2741  * Return true if all @ga can be mapped by the host at @base.
2742  * On success, retain the mapping at index 0 for reserved_va.
2743  */
2744 
2745 typedef struct PGBAddrs {
2746     uintptr_t bounds[3][2]; /* start/last pairs */
2747     int nbounds;
2748 } PGBAddrs;
2749 
2750 static bool pgb_try_mmap_set(const PGBAddrs *ga, uintptr_t base, uintptr_t brk)
2751 {
2752     for (int i = ga->nbounds - 1; i >= 0; --i) {
2753         if (pgb_try_mmap_skip_brk(ga->bounds[i][0] + base,
2754                                   ga->bounds[i][1] + base,
2755                                   brk, i == 0 && reserved_va) <= 0) {
2756             return false;
2757         }
2758     }
2759     return true;
2760 }
2761 
2762 /**
2763  * pgb_addr_set:
2764  * @ga: output set of guest addrs
2765  * @guest_loaddr: guest image low address
2766  * @guest_loaddr: guest image high address
2767  * @identity: create for identity mapping
2768  *
2769  * Fill in @ga with the image, COMMPAGE and NULL page.
2770  */
2771 static bool pgb_addr_set(PGBAddrs *ga, abi_ulong guest_loaddr,
2772                          abi_ulong guest_hiaddr, bool try_identity)
2773 {
2774     int n;
2775 
2776     /*
2777      * With a low commpage, or a guest mapped very low,
2778      * we may not be able to use the identity map.
2779      */
2780     if (try_identity) {
2781         if (LO_COMMPAGE != -1 && LO_COMMPAGE < mmap_min_addr) {
2782             return false;
2783         }
2784         if (guest_loaddr != 0 && guest_loaddr < mmap_min_addr) {
2785             return false;
2786         }
2787     }
2788 
2789     memset(ga, 0, sizeof(*ga));
2790     n = 0;
2791 
2792     if (reserved_va) {
2793         ga->bounds[n][0] = try_identity ? mmap_min_addr : 0;
2794         ga->bounds[n][1] = reserved_va;
2795         n++;
2796         /* LO_COMMPAGE and NULL handled by reserving from 0. */
2797     } else {
2798         /* Add any LO_COMMPAGE or NULL page. */
2799         if (LO_COMMPAGE != -1) {
2800             ga->bounds[n][0] = 0;
2801             ga->bounds[n][1] = LO_COMMPAGE + TARGET_PAGE_SIZE - 1;
2802             n++;
2803         } else if (!try_identity) {
2804             ga->bounds[n][0] = 0;
2805             ga->bounds[n][1] = TARGET_PAGE_SIZE - 1;
2806             n++;
2807         }
2808 
2809         /* Add the guest image for ET_EXEC. */
2810         if (guest_loaddr) {
2811             ga->bounds[n][0] = guest_loaddr;
2812             ga->bounds[n][1] = guest_hiaddr;
2813             n++;
2814         }
2815     }
2816 
2817     /*
2818      * Temporarily disable
2819      *   "comparison is always false due to limited range of data type"
2820      * due to comparison between unsigned and (possible) 0.
2821      */
2822 #pragma GCC diagnostic push
2823 #pragma GCC diagnostic ignored "-Wtype-limits"
2824 
2825     /* Add any HI_COMMPAGE not covered by reserved_va. */
2826     if (reserved_va < HI_COMMPAGE) {
2827         ga->bounds[n][0] = HI_COMMPAGE & qemu_host_page_mask;
2828         ga->bounds[n][1] = HI_COMMPAGE + TARGET_PAGE_SIZE - 1;
2829         n++;
2830     }
2831 
2832 #pragma GCC diagnostic pop
2833 
2834     ga->nbounds = n;
2835     return true;
2836 }
2837 
2838 static void pgb_fail_in_use(const char *image_name)
2839 {
2840     error_report("%s: requires virtual address space that is in use "
2841                  "(omit the -B option or choose a different value)",
2842                  image_name);
2843     exit(EXIT_FAILURE);
2844 }
2845 
2846 static void pgb_fixed(const char *image_name, uintptr_t guest_loaddr,
2847                       uintptr_t guest_hiaddr, uintptr_t align)
2848 {
2849     PGBAddrs ga;
2850     uintptr_t brk = (uintptr_t)sbrk(0);
2851 
2852     if (!QEMU_IS_ALIGNED(guest_base, align)) {
2853         fprintf(stderr, "Requested guest base %p does not satisfy "
2854                 "host minimum alignment (0x%" PRIxPTR ")\n",
2855                 (void *)guest_base, align);
2856         exit(EXIT_FAILURE);
2857     }
2858 
2859     if (!pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, !guest_base)
2860         || !pgb_try_mmap_set(&ga, guest_base, brk)) {
2861         pgb_fail_in_use(image_name);
2862     }
2863 }
2864 
2865 /**
2866  * pgb_find_fallback:
2867  *
2868  * This is a fallback method for finding holes in the host address space
2869  * if we don't have the benefit of being able to access /proc/self/map.
2870  * It can potentially take a very long time as we can only dumbly iterate
2871  * up the host address space seeing if the allocation would work.
2872  */
2873 static uintptr_t pgb_find_fallback(const PGBAddrs *ga, uintptr_t align,
2874                                    uintptr_t brk)
2875 {
2876     /* TODO: come up with a better estimate of how much to skip. */
2877     uintptr_t skip = sizeof(uintptr_t) == 4 ? MiB : GiB;
2878 
2879     for (uintptr_t base = skip; ; base += skip) {
2880         base = ROUND_UP(base, align);
2881         if (pgb_try_mmap_set(ga, base, brk)) {
2882             return base;
2883         }
2884         if (base >= -skip) {
2885             return -1;
2886         }
2887     }
2888 }
2889 
2890 static uintptr_t pgb_try_itree(const PGBAddrs *ga, uintptr_t base,
2891                                IntervalTreeRoot *root)
2892 {
2893     for (int i = ga->nbounds - 1; i >= 0; --i) {
2894         uintptr_t s = base + ga->bounds[i][0];
2895         uintptr_t l = base + ga->bounds[i][1];
2896         IntervalTreeNode *n;
2897 
2898         if (l < s) {
2899             /* Wraparound. Skip to advance S to mmap_min_addr. */
2900             return mmap_min_addr - s;
2901         }
2902 
2903         n = interval_tree_iter_first(root, s, l);
2904         if (n != NULL) {
2905             /* Conflict.  Skip to advance S to LAST + 1. */
2906             return n->last - s + 1;
2907         }
2908     }
2909     return 0;  /* success */
2910 }
2911 
2912 static uintptr_t pgb_find_itree(const PGBAddrs *ga, IntervalTreeRoot *root,
2913                                 uintptr_t align, uintptr_t brk)
2914 {
2915     uintptr_t last = mmap_min_addr;
2916     uintptr_t base, skip;
2917 
2918     while (true) {
2919         base = ROUND_UP(last, align);
2920         if (base < last) {
2921             return -1;
2922         }
2923 
2924         skip = pgb_try_itree(ga, base, root);
2925         if (skip == 0) {
2926             break;
2927         }
2928 
2929         last = base + skip;
2930         if (last < base) {
2931             return -1;
2932         }
2933     }
2934 
2935     /*
2936      * We've chosen 'base' based on holes in the interval tree,
2937      * but we don't yet know if it is a valid host address.
2938      * Because it is the first matching hole, if the host addresses
2939      * are invalid we know there are no further matches.
2940      */
2941     return pgb_try_mmap_set(ga, base, brk) ? base : -1;
2942 }
2943 
2944 static void pgb_dynamic(const char *image_name, uintptr_t guest_loaddr,
2945                         uintptr_t guest_hiaddr, uintptr_t align)
2946 {
2947     IntervalTreeRoot *root;
2948     uintptr_t brk, ret;
2949     PGBAddrs ga;
2950 
2951     assert(QEMU_IS_ALIGNED(guest_loaddr, align));
2952 
2953     /* Try the identity map first. */
2954     if (pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, true)) {
2955         brk = (uintptr_t)sbrk(0);
2956         if (pgb_try_mmap_set(&ga, 0, brk)) {
2957             guest_base = 0;
2958             return;
2959         }
2960     }
2961 
2962     /*
2963      * Rebuild the address set for non-identity map.
2964      * This differs in the mapping of the guest NULL page.
2965      */
2966     pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, false);
2967 
2968     root = read_self_maps();
2969 
2970     /* Read brk after we've read the maps, which will malloc. */
2971     brk = (uintptr_t)sbrk(0);
2972 
2973     if (!root) {
2974         ret = pgb_find_fallback(&ga, align, brk);
2975     } else {
2976         /*
2977          * Reserve the area close to the host brk.
2978          * This will be freed with the rest of the tree.
2979          */
2980         IntervalTreeNode *b = g_new0(IntervalTreeNode, 1);
2981         b->start = brk;
2982         b->last = brk + 16 * MiB - 1;
2983         interval_tree_insert(b, root);
2984 
2985         ret = pgb_find_itree(&ga, root, align, brk);
2986         free_self_maps(root);
2987     }
2988 
2989     if (ret == -1) {
2990         int w = TARGET_LONG_BITS / 4;
2991 
2992         error_report("%s: Unable to find a guest_base to satisfy all "
2993                      "guest address mapping requirements", image_name);
2994 
2995         for (int i = 0; i < ga.nbounds; ++i) {
2996             error_printf("  %0*" PRIx64 "-%0*" PRIx64 "\n",
2997                          w, (uint64_t)ga.bounds[i][0],
2998                          w, (uint64_t)ga.bounds[i][1]);
2999         }
3000         exit(EXIT_FAILURE);
3001     }
3002     guest_base = ret;
3003 }
3004 
3005 void probe_guest_base(const char *image_name, abi_ulong guest_loaddr,
3006                       abi_ulong guest_hiaddr)
3007 {
3008     /* In order to use host shmat, we must be able to honor SHMLBA.  */
3009     uintptr_t align = MAX(SHMLBA, qemu_host_page_size);
3010 
3011     /* Sanity check the guest binary. */
3012     if (reserved_va) {
3013         if (guest_hiaddr > reserved_va) {
3014             error_report("%s: requires more than reserved virtual "
3015                          "address space (0x%" PRIx64 " > 0x%lx)",
3016                          image_name, (uint64_t)guest_hiaddr, reserved_va);
3017             exit(EXIT_FAILURE);
3018         }
3019     } else {
3020         if (guest_hiaddr != (uintptr_t)guest_hiaddr) {
3021             error_report("%s: requires more virtual address space "
3022                          "than the host can provide (0x%" PRIx64 ")",
3023                          image_name, (uint64_t)guest_hiaddr + 1);
3024             exit(EXIT_FAILURE);
3025         }
3026     }
3027 
3028     if (have_guest_base) {
3029         pgb_fixed(image_name, guest_loaddr, guest_hiaddr, align);
3030     } else {
3031         pgb_dynamic(image_name, guest_loaddr, guest_hiaddr, align);
3032     }
3033 
3034     /* Reserve and initialize the commpage. */
3035     if (!init_guest_commpage()) {
3036         /* We have already probed for the commpage being free. */
3037         g_assert_not_reached();
3038     }
3039 
3040     assert(QEMU_IS_ALIGNED(guest_base, align));
3041     qemu_log_mask(CPU_LOG_PAGE, "Locating guest address space "
3042                   "@ 0x%" PRIx64 "\n", (uint64_t)guest_base);
3043 }
3044 
3045 enum {
3046     /* The string "GNU\0" as a magic number. */
3047     GNU0_MAGIC = const_le32('G' | 'N' << 8 | 'U' << 16),
3048     NOTE_DATA_SZ = 1 * KiB,
3049     NOTE_NAME_SZ = 4,
3050     ELF_GNU_PROPERTY_ALIGN = ELF_CLASS == ELFCLASS32 ? 4 : 8,
3051 };
3052 
3053 /*
3054  * Process a single gnu_property entry.
3055  * Return false for error.
3056  */
3057 static bool parse_elf_property(const uint32_t *data, int *off, int datasz,
3058                                struct image_info *info, bool have_prev_type,
3059                                uint32_t *prev_type, Error **errp)
3060 {
3061     uint32_t pr_type, pr_datasz, step;
3062 
3063     if (*off > datasz || !QEMU_IS_ALIGNED(*off, ELF_GNU_PROPERTY_ALIGN)) {
3064         goto error_data;
3065     }
3066     datasz -= *off;
3067     data += *off / sizeof(uint32_t);
3068 
3069     if (datasz < 2 * sizeof(uint32_t)) {
3070         goto error_data;
3071     }
3072     pr_type = data[0];
3073     pr_datasz = data[1];
3074     data += 2;
3075     datasz -= 2 * sizeof(uint32_t);
3076     step = ROUND_UP(pr_datasz, ELF_GNU_PROPERTY_ALIGN);
3077     if (step > datasz) {
3078         goto error_data;
3079     }
3080 
3081     /* Properties are supposed to be unique and sorted on pr_type. */
3082     if (have_prev_type && pr_type <= *prev_type) {
3083         if (pr_type == *prev_type) {
3084             error_setg(errp, "Duplicate property in PT_GNU_PROPERTY");
3085         } else {
3086             error_setg(errp, "Unsorted property in PT_GNU_PROPERTY");
3087         }
3088         return false;
3089     }
3090     *prev_type = pr_type;
3091 
3092     if (!arch_parse_elf_property(pr_type, pr_datasz, data, info, errp)) {
3093         return false;
3094     }
3095 
3096     *off += 2 * sizeof(uint32_t) + step;
3097     return true;
3098 
3099  error_data:
3100     error_setg(errp, "Ill-formed property in PT_GNU_PROPERTY");
3101     return false;
3102 }
3103 
3104 /* Process NT_GNU_PROPERTY_TYPE_0. */
3105 static bool parse_elf_properties(int image_fd,
3106                                  struct image_info *info,
3107                                  const struct elf_phdr *phdr,
3108                                  char bprm_buf[BPRM_BUF_SIZE],
3109                                  Error **errp)
3110 {
3111     union {
3112         struct elf_note nhdr;
3113         uint32_t data[NOTE_DATA_SZ / sizeof(uint32_t)];
3114     } note;
3115 
3116     int n, off, datasz;
3117     bool have_prev_type;
3118     uint32_t prev_type;
3119 
3120     /* Unless the arch requires properties, ignore them. */
3121     if (!ARCH_USE_GNU_PROPERTY) {
3122         return true;
3123     }
3124 
3125     /* If the properties are crazy large, that's too bad. */
3126     n = phdr->p_filesz;
3127     if (n > sizeof(note)) {
3128         error_setg(errp, "PT_GNU_PROPERTY too large");
3129         return false;
3130     }
3131     if (n < sizeof(note.nhdr)) {
3132         error_setg(errp, "PT_GNU_PROPERTY too small");
3133         return false;
3134     }
3135 
3136     if (phdr->p_offset + n <= BPRM_BUF_SIZE) {
3137         memcpy(&note, bprm_buf + phdr->p_offset, n);
3138     } else {
3139         ssize_t len = pread(image_fd, &note, n, phdr->p_offset);
3140         if (len != n) {
3141             error_setg_errno(errp, errno, "Error reading file header");
3142             return false;
3143         }
3144     }
3145 
3146     /*
3147      * The contents of a valid PT_GNU_PROPERTY is a sequence
3148      * of uint32_t -- swap them all now.
3149      */
3150 #ifdef BSWAP_NEEDED
3151     for (int i = 0; i < n / 4; i++) {
3152         bswap32s(note.data + i);
3153     }
3154 #endif
3155 
3156     /*
3157      * Note that nhdr is 3 words, and that the "name" described by namesz
3158      * immediately follows nhdr and is thus at the 4th word.  Further, all
3159      * of the inputs to the kernel's round_up are multiples of 4.
3160      */
3161     if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 ||
3162         note.nhdr.n_namesz != NOTE_NAME_SZ ||
3163         note.data[3] != GNU0_MAGIC) {
3164         error_setg(errp, "Invalid note in PT_GNU_PROPERTY");
3165         return false;
3166     }
3167     off = sizeof(note.nhdr) + NOTE_NAME_SZ;
3168 
3169     datasz = note.nhdr.n_descsz + off;
3170     if (datasz > n) {
3171         error_setg(errp, "Invalid note size in PT_GNU_PROPERTY");
3172         return false;
3173     }
3174 
3175     have_prev_type = false;
3176     prev_type = 0;
3177     while (1) {
3178         if (off == datasz) {
3179             return true;  /* end, exit ok */
3180         }
3181         if (!parse_elf_property(note.data, &off, datasz, info,
3182                                 have_prev_type, &prev_type, errp)) {
3183             return false;
3184         }
3185         have_prev_type = true;
3186     }
3187 }
3188 
3189 /* Load an ELF image into the address space.
3190 
3191    IMAGE_NAME is the filename of the image, to use in error messages.
3192    IMAGE_FD is the open file descriptor for the image.
3193 
3194    BPRM_BUF is a copy of the beginning of the file; this of course
3195    contains the elf file header at offset 0.  It is assumed that this
3196    buffer is sufficiently aligned to present no problems to the host
3197    in accessing data at aligned offsets within the buffer.
3198 
3199    On return: INFO values will be filled in, as necessary or available.  */
3200 
3201 static void load_elf_image(const char *image_name, int image_fd,
3202                            struct image_info *info, char **pinterp_name,
3203                            char bprm_buf[BPRM_BUF_SIZE])
3204 {
3205     struct elfhdr *ehdr = (struct elfhdr *)bprm_buf;
3206     struct elf_phdr *phdr;
3207     abi_ulong load_addr, load_bias, loaddr, hiaddr, error;
3208     int i, retval, prot_exec;
3209     Error *err = NULL;
3210 
3211     /* First of all, some simple consistency checks */
3212     if (!elf_check_ident(ehdr)) {
3213         error_setg(&err, "Invalid ELF image for this architecture");
3214         goto exit_errmsg;
3215     }
3216     bswap_ehdr(ehdr);
3217     if (!elf_check_ehdr(ehdr)) {
3218         error_setg(&err, "Invalid ELF image for this architecture");
3219         goto exit_errmsg;
3220     }
3221 
3222     i = ehdr->e_phnum * sizeof(struct elf_phdr);
3223     if (ehdr->e_phoff + i <= BPRM_BUF_SIZE) {
3224         phdr = (struct elf_phdr *)(bprm_buf + ehdr->e_phoff);
3225     } else {
3226         phdr = (struct elf_phdr *) alloca(i);
3227         retval = pread(image_fd, phdr, i, ehdr->e_phoff);
3228         if (retval != i) {
3229             goto exit_read;
3230         }
3231     }
3232     bswap_phdr(phdr, ehdr->e_phnum);
3233 
3234     info->nsegs = 0;
3235     info->pt_dynamic_addr = 0;
3236 
3237     mmap_lock();
3238 
3239     /*
3240      * Find the maximum size of the image and allocate an appropriate
3241      * amount of memory to handle that.  Locate the interpreter, if any.
3242      */
3243     loaddr = -1, hiaddr = 0;
3244     info->alignment = 0;
3245     info->exec_stack = EXSTACK_DEFAULT;
3246     for (i = 0; i < ehdr->e_phnum; ++i) {
3247         struct elf_phdr *eppnt = phdr + i;
3248         if (eppnt->p_type == PT_LOAD) {
3249             abi_ulong a = eppnt->p_vaddr - eppnt->p_offset;
3250             if (a < loaddr) {
3251                 loaddr = a;
3252             }
3253             a = eppnt->p_vaddr + eppnt->p_memsz - 1;
3254             if (a > hiaddr) {
3255                 hiaddr = a;
3256             }
3257             ++info->nsegs;
3258             info->alignment |= eppnt->p_align;
3259         } else if (eppnt->p_type == PT_INTERP && pinterp_name) {
3260             g_autofree char *interp_name = NULL;
3261 
3262             if (*pinterp_name) {
3263                 error_setg(&err, "Multiple PT_INTERP entries");
3264                 goto exit_errmsg;
3265             }
3266 
3267             interp_name = g_malloc(eppnt->p_filesz);
3268 
3269             if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) {
3270                 memcpy(interp_name, bprm_buf + eppnt->p_offset,
3271                        eppnt->p_filesz);
3272             } else {
3273                 retval = pread(image_fd, interp_name, eppnt->p_filesz,
3274                                eppnt->p_offset);
3275                 if (retval != eppnt->p_filesz) {
3276                     goto exit_read;
3277                 }
3278             }
3279             if (interp_name[eppnt->p_filesz - 1] != 0) {
3280                 error_setg(&err, "Invalid PT_INTERP entry");
3281                 goto exit_errmsg;
3282             }
3283             *pinterp_name = g_steal_pointer(&interp_name);
3284         } else if (eppnt->p_type == PT_GNU_PROPERTY) {
3285             if (!parse_elf_properties(image_fd, info, eppnt, bprm_buf, &err)) {
3286                 goto exit_errmsg;
3287             }
3288         } else if (eppnt->p_type == PT_GNU_STACK) {
3289             info->exec_stack = eppnt->p_flags & PF_X;
3290         }
3291     }
3292 
3293     load_addr = loaddr;
3294 
3295     if (pinterp_name != NULL) {
3296         if (ehdr->e_type == ET_EXEC) {
3297             /*
3298              * Make sure that the low address does not conflict with
3299              * MMAP_MIN_ADDR or the QEMU application itself.
3300              */
3301             probe_guest_base(image_name, loaddr, hiaddr);
3302         } else {
3303             abi_ulong align;
3304 
3305             /*
3306              * The binary is dynamic, but we still need to
3307              * select guest_base.  In this case we pass a size.
3308              */
3309             probe_guest_base(image_name, 0, hiaddr - loaddr);
3310 
3311             /*
3312              * Avoid collision with the loader by providing a different
3313              * default load address.
3314              */
3315             load_addr += elf_et_dyn_base;
3316 
3317             /*
3318              * TODO: Better support for mmap alignment is desirable.
3319              * Since we do not have complete control over the guest
3320              * address space, we prefer the kernel to choose some address
3321              * rather than force the use of LOAD_ADDR via MAP_FIXED.
3322              * But without MAP_FIXED we cannot guarantee alignment,
3323              * only suggest it.
3324              */
3325             align = pow2ceil(info->alignment);
3326             if (align) {
3327                 load_addr &= -align;
3328             }
3329         }
3330     }
3331 
3332     /*
3333      * Reserve address space for all of this.
3334      *
3335      * In the case of ET_EXEC, we supply MAP_FIXED_NOREPLACE so that we get
3336      * exactly the address range that is required.  Without reserved_va,
3337      * the guest address space is not isolated.  We have attempted to avoid
3338      * conflict with the host program itself via probe_guest_base, but using
3339      * MAP_FIXED_NOREPLACE instead of MAP_FIXED provides an extra check.
3340      *
3341      * Otherwise this is ET_DYN, and we are searching for a location
3342      * that can hold the memory space required.  If the image is
3343      * pre-linked, LOAD_ADDR will be non-zero, and the kernel should
3344      * honor that address if it happens to be free.
3345      *
3346      * In both cases, we will overwrite pages in this range with mappings
3347      * from the executable.
3348      */
3349     load_addr = target_mmap(load_addr, (size_t)hiaddr - loaddr + 1, PROT_NONE,
3350                             MAP_PRIVATE | MAP_ANON | MAP_NORESERVE |
3351                             (ehdr->e_type == ET_EXEC ? MAP_FIXED_NOREPLACE : 0),
3352                             -1, 0);
3353     if (load_addr == -1) {
3354         goto exit_mmap;
3355     }
3356     load_bias = load_addr - loaddr;
3357 
3358     if (elf_is_fdpic(ehdr)) {
3359         struct elf32_fdpic_loadseg *loadsegs = info->loadsegs =
3360             g_malloc(sizeof(*loadsegs) * info->nsegs);
3361 
3362         for (i = 0; i < ehdr->e_phnum; ++i) {
3363             switch (phdr[i].p_type) {
3364             case PT_DYNAMIC:
3365                 info->pt_dynamic_addr = phdr[i].p_vaddr + load_bias;
3366                 break;
3367             case PT_LOAD:
3368                 loadsegs->addr = phdr[i].p_vaddr + load_bias;
3369                 loadsegs->p_vaddr = phdr[i].p_vaddr;
3370                 loadsegs->p_memsz = phdr[i].p_memsz;
3371                 ++loadsegs;
3372                 break;
3373             }
3374         }
3375     }
3376 
3377     info->load_bias = load_bias;
3378     info->code_offset = load_bias;
3379     info->data_offset = load_bias;
3380     info->load_addr = load_addr;
3381     info->entry = ehdr->e_entry + load_bias;
3382     info->start_code = -1;
3383     info->end_code = 0;
3384     info->start_data = -1;
3385     info->end_data = 0;
3386     /* Usual start for brk is after all sections of the main executable. */
3387     info->brk = TARGET_PAGE_ALIGN(hiaddr + load_bias);
3388     info->elf_flags = ehdr->e_flags;
3389 
3390     prot_exec = PROT_EXEC;
3391 #ifdef TARGET_AARCH64
3392     /*
3393      * If the BTI feature is present, this indicates that the executable
3394      * pages of the startup binary should be mapped with PROT_BTI, so that
3395      * branch targets are enforced.
3396      *
3397      * The startup binary is either the interpreter or the static executable.
3398      * The interpreter is responsible for all pages of a dynamic executable.
3399      *
3400      * Elf notes are backward compatible to older cpus.
3401      * Do not enable BTI unless it is supported.
3402      */
3403     if ((info->note_flags & GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
3404         && (pinterp_name == NULL || *pinterp_name == 0)
3405         && cpu_isar_feature(aa64_bti, ARM_CPU(thread_cpu))) {
3406         prot_exec |= TARGET_PROT_BTI;
3407     }
3408 #endif
3409 
3410     for (i = 0; i < ehdr->e_phnum; i++) {
3411         struct elf_phdr *eppnt = phdr + i;
3412         if (eppnt->p_type == PT_LOAD) {
3413             abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em;
3414             int elf_prot = 0;
3415 
3416             if (eppnt->p_flags & PF_R) {
3417                 elf_prot |= PROT_READ;
3418             }
3419             if (eppnt->p_flags & PF_W) {
3420                 elf_prot |= PROT_WRITE;
3421             }
3422             if (eppnt->p_flags & PF_X) {
3423                 elf_prot |= prot_exec;
3424             }
3425 
3426             vaddr = load_bias + eppnt->p_vaddr;
3427             vaddr_po = vaddr & ~TARGET_PAGE_MASK;
3428             vaddr_ps = vaddr & TARGET_PAGE_MASK;
3429 
3430             vaddr_ef = vaddr + eppnt->p_filesz;
3431             vaddr_em = vaddr + eppnt->p_memsz;
3432 
3433             /*
3434              * Some segments may be completely empty, with a non-zero p_memsz
3435              * but no backing file segment.
3436              */
3437             if (eppnt->p_filesz != 0) {
3438                 error = target_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po,
3439                                     elf_prot, MAP_PRIVATE | MAP_FIXED,
3440                                     image_fd, eppnt->p_offset - vaddr_po);
3441                 if (error == -1) {
3442                     goto exit_mmap;
3443                 }
3444             }
3445 
3446             /* If the load segment requests extra zeros (e.g. bss), map it. */
3447             if (vaddr_ef < vaddr_em &&
3448                 !zero_bss(vaddr_ef, vaddr_em, elf_prot, &err)) {
3449                 goto exit_errmsg;
3450             }
3451 
3452             /* Find the full program boundaries.  */
3453             if (elf_prot & PROT_EXEC) {
3454                 if (vaddr < info->start_code) {
3455                     info->start_code = vaddr;
3456                 }
3457                 if (vaddr_ef > info->end_code) {
3458                     info->end_code = vaddr_ef;
3459                 }
3460             }
3461             if (elf_prot & PROT_WRITE) {
3462                 if (vaddr < info->start_data) {
3463                     info->start_data = vaddr;
3464                 }
3465                 if (vaddr_ef > info->end_data) {
3466                     info->end_data = vaddr_ef;
3467                 }
3468             }
3469 #ifdef TARGET_MIPS
3470         } else if (eppnt->p_type == PT_MIPS_ABIFLAGS) {
3471             Mips_elf_abiflags_v0 abiflags;
3472             if (eppnt->p_filesz < sizeof(Mips_elf_abiflags_v0)) {
3473                 error_setg(&err, "Invalid PT_MIPS_ABIFLAGS entry");
3474                 goto exit_errmsg;
3475             }
3476             if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) {
3477                 memcpy(&abiflags, bprm_buf + eppnt->p_offset,
3478                        sizeof(Mips_elf_abiflags_v0));
3479             } else {
3480                 retval = pread(image_fd, &abiflags, sizeof(Mips_elf_abiflags_v0),
3481                                eppnt->p_offset);
3482                 if (retval != sizeof(Mips_elf_abiflags_v0)) {
3483                     goto exit_read;
3484                 }
3485             }
3486             bswap_mips_abiflags(&abiflags);
3487             info->fp_abi = abiflags.fp_abi;
3488 #endif
3489         }
3490     }
3491 
3492     if (info->end_data == 0) {
3493         info->start_data = info->end_code;
3494         info->end_data = info->end_code;
3495     }
3496 
3497     if (qemu_log_enabled()) {
3498         load_symbols(ehdr, image_fd, load_bias);
3499     }
3500 
3501     debuginfo_report_elf(image_name, image_fd, load_bias);
3502 
3503     mmap_unlock();
3504 
3505     close(image_fd);
3506     return;
3507 
3508  exit_read:
3509     if (retval >= 0) {
3510         error_setg(&err, "Incomplete read of file header");
3511     } else {
3512         error_setg_errno(&err, errno, "Error reading file header");
3513     }
3514     goto exit_errmsg;
3515  exit_mmap:
3516     error_setg_errno(&err, errno, "Error mapping file");
3517     goto exit_errmsg;
3518  exit_errmsg:
3519     error_reportf_err(err, "%s: ", image_name);
3520     exit(-1);
3521 }
3522 
3523 static void load_elf_interp(const char *filename, struct image_info *info,
3524                             char bprm_buf[BPRM_BUF_SIZE])
3525 {
3526     int fd, retval;
3527     Error *err = NULL;
3528 
3529     fd = open(path(filename), O_RDONLY);
3530     if (fd < 0) {
3531         error_setg_file_open(&err, errno, filename);
3532         error_report_err(err);
3533         exit(-1);
3534     }
3535 
3536     retval = read(fd, bprm_buf, BPRM_BUF_SIZE);
3537     if (retval < 0) {
3538         error_setg_errno(&err, errno, "Error reading file header");
3539         error_reportf_err(err, "%s: ", filename);
3540         exit(-1);
3541     }
3542 
3543     if (retval < BPRM_BUF_SIZE) {
3544         memset(bprm_buf + retval, 0, BPRM_BUF_SIZE - retval);
3545     }
3546 
3547     load_elf_image(filename, fd, info, NULL, bprm_buf);
3548 }
3549 
3550 static int symfind(const void *s0, const void *s1)
3551 {
3552     struct elf_sym *sym = (struct elf_sym *)s1;
3553     __typeof(sym->st_value) addr = *(uint64_t *)s0;
3554     int result = 0;
3555 
3556     if (addr < sym->st_value) {
3557         result = -1;
3558     } else if (addr >= sym->st_value + sym->st_size) {
3559         result = 1;
3560     }
3561     return result;
3562 }
3563 
3564 static const char *lookup_symbolxx(struct syminfo *s, uint64_t orig_addr)
3565 {
3566 #if ELF_CLASS == ELFCLASS32
3567     struct elf_sym *syms = s->disas_symtab.elf32;
3568 #else
3569     struct elf_sym *syms = s->disas_symtab.elf64;
3570 #endif
3571 
3572     // binary search
3573     struct elf_sym *sym;
3574 
3575     sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind);
3576     if (sym != NULL) {
3577         return s->disas_strtab + sym->st_name;
3578     }
3579 
3580     return "";
3581 }
3582 
3583 /* FIXME: This should use elf_ops.h  */
3584 static int symcmp(const void *s0, const void *s1)
3585 {
3586     struct elf_sym *sym0 = (struct elf_sym *)s0;
3587     struct elf_sym *sym1 = (struct elf_sym *)s1;
3588     return (sym0->st_value < sym1->st_value)
3589         ? -1
3590         : ((sym0->st_value > sym1->st_value) ? 1 : 0);
3591 }
3592 
3593 /* Best attempt to load symbols from this ELF object. */
3594 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias)
3595 {
3596     int i, shnum, nsyms, sym_idx = 0, str_idx = 0;
3597     uint64_t segsz;
3598     struct elf_shdr *shdr;
3599     char *strings = NULL;
3600     struct syminfo *s = NULL;
3601     struct elf_sym *new_syms, *syms = NULL;
3602 
3603     shnum = hdr->e_shnum;
3604     i = shnum * sizeof(struct elf_shdr);
3605     shdr = (struct elf_shdr *)alloca(i);
3606     if (pread(fd, shdr, i, hdr->e_shoff) != i) {
3607         return;
3608     }
3609 
3610     bswap_shdr(shdr, shnum);
3611     for (i = 0; i < shnum; ++i) {
3612         if (shdr[i].sh_type == SHT_SYMTAB) {
3613             sym_idx = i;
3614             str_idx = shdr[i].sh_link;
3615             goto found;
3616         }
3617     }
3618 
3619     /* There will be no symbol table if the file was stripped.  */
3620     return;
3621 
3622  found:
3623     /* Now know where the strtab and symtab are.  Snarf them.  */
3624     s = g_try_new(struct syminfo, 1);
3625     if (!s) {
3626         goto give_up;
3627     }
3628 
3629     segsz = shdr[str_idx].sh_size;
3630     s->disas_strtab = strings = g_try_malloc(segsz);
3631     if (!strings ||
3632         pread(fd, strings, segsz, shdr[str_idx].sh_offset) != segsz) {
3633         goto give_up;
3634     }
3635 
3636     segsz = shdr[sym_idx].sh_size;
3637     syms = g_try_malloc(segsz);
3638     if (!syms || pread(fd, syms, segsz, shdr[sym_idx].sh_offset) != segsz) {
3639         goto give_up;
3640     }
3641 
3642     if (segsz / sizeof(struct elf_sym) > INT_MAX) {
3643         /* Implausibly large symbol table: give up rather than ploughing
3644          * on with the number of symbols calculation overflowing
3645          */
3646         goto give_up;
3647     }
3648     nsyms = segsz / sizeof(struct elf_sym);
3649     for (i = 0; i < nsyms; ) {
3650         bswap_sym(syms + i);
3651         /* Throw away entries which we do not need.  */
3652         if (syms[i].st_shndx == SHN_UNDEF
3653             || syms[i].st_shndx >= SHN_LORESERVE
3654             || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
3655             if (i < --nsyms) {
3656                 syms[i] = syms[nsyms];
3657             }
3658         } else {
3659 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
3660             /* The bottom address bit marks a Thumb or MIPS16 symbol.  */
3661             syms[i].st_value &= ~(target_ulong)1;
3662 #endif
3663             syms[i].st_value += load_bias;
3664             i++;
3665         }
3666     }
3667 
3668     /* No "useful" symbol.  */
3669     if (nsyms == 0) {
3670         goto give_up;
3671     }
3672 
3673     /* Attempt to free the storage associated with the local symbols
3674        that we threw away.  Whether or not this has any effect on the
3675        memory allocation depends on the malloc implementation and how
3676        many symbols we managed to discard.  */
3677     new_syms = g_try_renew(struct elf_sym, syms, nsyms);
3678     if (new_syms == NULL) {
3679         goto give_up;
3680     }
3681     syms = new_syms;
3682 
3683     qsort(syms, nsyms, sizeof(*syms), symcmp);
3684 
3685     s->disas_num_syms = nsyms;
3686 #if ELF_CLASS == ELFCLASS32
3687     s->disas_symtab.elf32 = syms;
3688 #else
3689     s->disas_symtab.elf64 = syms;
3690 #endif
3691     s->lookup_symbol = lookup_symbolxx;
3692     s->next = syminfos;
3693     syminfos = s;
3694 
3695     return;
3696 
3697 give_up:
3698     g_free(s);
3699     g_free(strings);
3700     g_free(syms);
3701 }
3702 
3703 uint32_t get_elf_eflags(int fd)
3704 {
3705     struct elfhdr ehdr;
3706     off_t offset;
3707     int ret;
3708 
3709     /* Read ELF header */
3710     offset = lseek(fd, 0, SEEK_SET);
3711     if (offset == (off_t) -1) {
3712         return 0;
3713     }
3714     ret = read(fd, &ehdr, sizeof(ehdr));
3715     if (ret < sizeof(ehdr)) {
3716         return 0;
3717     }
3718     offset = lseek(fd, offset, SEEK_SET);
3719     if (offset == (off_t) -1) {
3720         return 0;
3721     }
3722 
3723     /* Check ELF signature */
3724     if (!elf_check_ident(&ehdr)) {
3725         return 0;
3726     }
3727 
3728     /* check header */
3729     bswap_ehdr(&ehdr);
3730     if (!elf_check_ehdr(&ehdr)) {
3731         return 0;
3732     }
3733 
3734     /* return architecture id */
3735     return ehdr.e_flags;
3736 }
3737 
3738 int load_elf_binary(struct linux_binprm *bprm, struct image_info *info)
3739 {
3740     struct image_info interp_info;
3741     struct elfhdr elf_ex;
3742     char *elf_interpreter = NULL;
3743     char *scratch;
3744 
3745     memset(&interp_info, 0, sizeof(interp_info));
3746 #ifdef TARGET_MIPS
3747     interp_info.fp_abi = MIPS_ABI_FP_UNKNOWN;
3748 #endif
3749 
3750     load_elf_image(bprm->filename, bprm->fd, info,
3751                    &elf_interpreter, bprm->buf);
3752 
3753     /* ??? We need a copy of the elf header for passing to create_elf_tables.
3754        If we do nothing, we'll have overwritten this when we re-use bprm->buf
3755        when we load the interpreter.  */
3756     elf_ex = *(struct elfhdr *)bprm->buf;
3757 
3758     /* Do this so that we can load the interpreter, if need be.  We will
3759        change some of these later */
3760     bprm->p = setup_arg_pages(bprm, info);
3761 
3762     scratch = g_new0(char, TARGET_PAGE_SIZE);
3763     if (STACK_GROWS_DOWN) {
3764         bprm->p = copy_elf_strings(1, &bprm->filename, scratch,
3765                                    bprm->p, info->stack_limit);
3766         info->file_string = bprm->p;
3767         bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch,
3768                                    bprm->p, info->stack_limit);
3769         info->env_strings = bprm->p;
3770         bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch,
3771                                    bprm->p, info->stack_limit);
3772         info->arg_strings = bprm->p;
3773     } else {
3774         info->arg_strings = bprm->p;
3775         bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch,
3776                                    bprm->p, info->stack_limit);
3777         info->env_strings = bprm->p;
3778         bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch,
3779                                    bprm->p, info->stack_limit);
3780         info->file_string = bprm->p;
3781         bprm->p = copy_elf_strings(1, &bprm->filename, scratch,
3782                                    bprm->p, info->stack_limit);
3783     }
3784 
3785     g_free(scratch);
3786 
3787     if (!bprm->p) {
3788         fprintf(stderr, "%s: %s\n", bprm->filename, strerror(E2BIG));
3789         exit(-1);
3790     }
3791 
3792     if (elf_interpreter) {
3793         load_elf_interp(elf_interpreter, &interp_info, bprm->buf);
3794 
3795         /*
3796          * While unusual because of ELF_ET_DYN_BASE, if we are unlucky
3797          * with the mappings the interpreter can be loaded above but
3798          * near the main executable, which can leave very little room
3799          * for the heap.
3800          * If the current brk has less than 16MB, use the end of the
3801          * interpreter.
3802          */
3803         if (interp_info.brk > info->brk &&
3804             interp_info.load_bias - info->brk < 16 * MiB)  {
3805             info->brk = interp_info.brk;
3806         }
3807 
3808         /* If the program interpreter is one of these two, then assume
3809            an iBCS2 image.  Otherwise assume a native linux image.  */
3810 
3811         if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0
3812             || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) {
3813             info->personality = PER_SVR4;
3814 
3815             /* Why this, you ask???  Well SVr4 maps page 0 as read-only,
3816                and some applications "depend" upon this behavior.  Since
3817                we do not have the power to recompile these, we emulate
3818                the SVr4 behavior.  Sigh.  */
3819             target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
3820                         MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
3821         }
3822 #ifdef TARGET_MIPS
3823         info->interp_fp_abi = interp_info.fp_abi;
3824 #endif
3825     }
3826 
3827     /*
3828      * TODO: load a vdso, which would also contain the signal trampolines.
3829      * Otherwise, allocate a private page to hold them.
3830      */
3831     if (TARGET_ARCH_HAS_SIGTRAMP_PAGE) {
3832         abi_long tramp_page = target_mmap(0, TARGET_PAGE_SIZE,
3833                                           PROT_READ | PROT_WRITE,
3834                                           MAP_PRIVATE | MAP_ANON, -1, 0);
3835         if (tramp_page == -1) {
3836             return -errno;
3837         }
3838 
3839         setup_sigtramp(tramp_page);
3840         target_mprotect(tramp_page, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC);
3841     }
3842 
3843     bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &elf_ex,
3844                                 info, (elf_interpreter ? &interp_info : NULL));
3845     info->start_stack = bprm->p;
3846 
3847     /* If we have an interpreter, set that as the program's entry point.
3848        Copy the load_bias as well, to help PPC64 interpret the entry
3849        point as a function descriptor.  Do this after creating elf tables
3850        so that we copy the original program entry point into the AUXV.  */
3851     if (elf_interpreter) {
3852         info->load_bias = interp_info.load_bias;
3853         info->entry = interp_info.entry;
3854         g_free(elf_interpreter);
3855     }
3856 
3857 #ifdef USE_ELF_CORE_DUMP
3858     bprm->core_dump = &elf_core_dump;
3859 #endif
3860 
3861     return 0;
3862 }
3863 
3864 #ifdef USE_ELF_CORE_DUMP
3865 /*
3866  * Definitions to generate Intel SVR4-like core files.
3867  * These mostly have the same names as the SVR4 types with "target_elf_"
3868  * tacked on the front to prevent clashes with linux definitions,
3869  * and the typedef forms have been avoided.  This is mostly like
3870  * the SVR4 structure, but more Linuxy, with things that Linux does
3871  * not support and which gdb doesn't really use excluded.
3872  *
3873  * Fields we don't dump (their contents is zero) in linux-user qemu
3874  * are marked with XXX.
3875  *
3876  * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
3877  *
3878  * Porting ELF coredump for target is (quite) simple process.  First you
3879  * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
3880  * the target resides):
3881  *
3882  * #define USE_ELF_CORE_DUMP
3883  *
3884  * Next you define type of register set used for dumping.  ELF specification
3885  * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
3886  *
3887  * typedef <target_regtype> target_elf_greg_t;
3888  * #define ELF_NREG <number of registers>
3889  * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
3890  *
3891  * Last step is to implement target specific function that copies registers
3892  * from given cpu into just specified register set.  Prototype is:
3893  *
3894  * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
3895  *                                const CPUArchState *env);
3896  *
3897  * Parameters:
3898  *     regs - copy register values into here (allocated and zeroed by caller)
3899  *     env - copy registers from here
3900  *
3901  * Example for ARM target is provided in this file.
3902  */
3903 
3904 /* An ELF note in memory */
3905 struct memelfnote {
3906     const char *name;
3907     size_t     namesz;
3908     size_t     namesz_rounded;
3909     int        type;
3910     size_t     datasz;
3911     size_t     datasz_rounded;
3912     void       *data;
3913     size_t     notesz;
3914 };
3915 
3916 struct target_elf_siginfo {
3917     abi_int    si_signo; /* signal number */
3918     abi_int    si_code;  /* extra code */
3919     abi_int    si_errno; /* errno */
3920 };
3921 
3922 struct target_elf_prstatus {
3923     struct target_elf_siginfo pr_info;      /* Info associated with signal */
3924     abi_short          pr_cursig;    /* Current signal */
3925     abi_ulong          pr_sigpend;   /* XXX */
3926     abi_ulong          pr_sighold;   /* XXX */
3927     target_pid_t       pr_pid;
3928     target_pid_t       pr_ppid;
3929     target_pid_t       pr_pgrp;
3930     target_pid_t       pr_sid;
3931     struct target_timeval pr_utime;  /* XXX User time */
3932     struct target_timeval pr_stime;  /* XXX System time */
3933     struct target_timeval pr_cutime; /* XXX Cumulative user time */
3934     struct target_timeval pr_cstime; /* XXX Cumulative system time */
3935     target_elf_gregset_t      pr_reg;       /* GP registers */
3936     abi_int            pr_fpvalid;   /* XXX */
3937 };
3938 
3939 #define ELF_PRARGSZ     (80) /* Number of chars for args */
3940 
3941 struct target_elf_prpsinfo {
3942     char         pr_state;       /* numeric process state */
3943     char         pr_sname;       /* char for pr_state */
3944     char         pr_zomb;        /* zombie */
3945     char         pr_nice;        /* nice val */
3946     abi_ulong    pr_flag;        /* flags */
3947     target_uid_t pr_uid;
3948     target_gid_t pr_gid;
3949     target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
3950     /* Lots missing */
3951     char    pr_fname[16] QEMU_NONSTRING; /* filename of executable */
3952     char    pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
3953 };
3954 
3955 /* Here is the structure in which status of each thread is captured. */
3956 struct elf_thread_status {
3957     QTAILQ_ENTRY(elf_thread_status)  ets_link;
3958     struct target_elf_prstatus prstatus;   /* NT_PRSTATUS */
3959 #if 0
3960     elf_fpregset_t fpu;             /* NT_PRFPREG */
3961     struct task_struct *thread;
3962     elf_fpxregset_t xfpu;           /* ELF_CORE_XFPREG_TYPE */
3963 #endif
3964     struct memelfnote notes[1];
3965     int num_notes;
3966 };
3967 
3968 struct elf_note_info {
3969     struct memelfnote   *notes;
3970     struct target_elf_prstatus *prstatus;  /* NT_PRSTATUS */
3971     struct target_elf_prpsinfo *psinfo;    /* NT_PRPSINFO */
3972 
3973     QTAILQ_HEAD(, elf_thread_status) thread_list;
3974 #if 0
3975     /*
3976      * Current version of ELF coredump doesn't support
3977      * dumping fp regs etc.
3978      */
3979     elf_fpregset_t *fpu;
3980     elf_fpxregset_t *xfpu;
3981     int thread_status_size;
3982 #endif
3983     int notes_size;
3984     int numnote;
3985 };
3986 
3987 struct vm_area_struct {
3988     target_ulong   vma_start;  /* start vaddr of memory region */
3989     target_ulong   vma_end;    /* end vaddr of memory region */
3990     abi_ulong      vma_flags;  /* protection etc. flags for the region */
3991     QTAILQ_ENTRY(vm_area_struct) vma_link;
3992 };
3993 
3994 struct mm_struct {
3995     QTAILQ_HEAD(, vm_area_struct) mm_mmap;
3996     int mm_count;           /* number of mappings */
3997 };
3998 
3999 static struct mm_struct *vma_init(void);
4000 static void vma_delete(struct mm_struct *);
4001 static int vma_add_mapping(struct mm_struct *, target_ulong,
4002                            target_ulong, abi_ulong);
4003 static int vma_get_mapping_count(const struct mm_struct *);
4004 static struct vm_area_struct *vma_first(const struct mm_struct *);
4005 static struct vm_area_struct *vma_next(struct vm_area_struct *);
4006 static abi_ulong vma_dump_size(const struct vm_area_struct *);
4007 static int vma_walker(void *priv, target_ulong start, target_ulong end,
4008                       unsigned long flags);
4009 
4010 static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
4011 static void fill_note(struct memelfnote *, const char *, int,
4012                       unsigned int, void *);
4013 static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
4014 static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
4015 static void fill_auxv_note(struct memelfnote *, const TaskState *);
4016 static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
4017 static size_t note_size(const struct memelfnote *);
4018 static void free_note_info(struct elf_note_info *);
4019 static int fill_note_info(struct elf_note_info *, long, const CPUArchState *);
4020 static void fill_thread_info(struct elf_note_info *, const CPUArchState *);
4021 
4022 static int dump_write(int, const void *, size_t);
4023 static int write_note(struct memelfnote *, int);
4024 static int write_note_info(struct elf_note_info *, int);
4025 
4026 #ifdef BSWAP_NEEDED
4027 static void bswap_prstatus(struct target_elf_prstatus *prstatus)
4028 {
4029     prstatus->pr_info.si_signo = tswap32(prstatus->pr_info.si_signo);
4030     prstatus->pr_info.si_code = tswap32(prstatus->pr_info.si_code);
4031     prstatus->pr_info.si_errno = tswap32(prstatus->pr_info.si_errno);
4032     prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
4033     prstatus->pr_sigpend = tswapal(prstatus->pr_sigpend);
4034     prstatus->pr_sighold = tswapal(prstatus->pr_sighold);
4035     prstatus->pr_pid = tswap32(prstatus->pr_pid);
4036     prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
4037     prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
4038     prstatus->pr_sid = tswap32(prstatus->pr_sid);
4039     /* cpu times are not filled, so we skip them */
4040     /* regs should be in correct format already */
4041     prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
4042 }
4043 
4044 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
4045 {
4046     psinfo->pr_flag = tswapal(psinfo->pr_flag);
4047     psinfo->pr_uid = tswap16(psinfo->pr_uid);
4048     psinfo->pr_gid = tswap16(psinfo->pr_gid);
4049     psinfo->pr_pid = tswap32(psinfo->pr_pid);
4050     psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
4051     psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
4052     psinfo->pr_sid = tswap32(psinfo->pr_sid);
4053 }
4054 
4055 static void bswap_note(struct elf_note *en)
4056 {
4057     bswap32s(&en->n_namesz);
4058     bswap32s(&en->n_descsz);
4059     bswap32s(&en->n_type);
4060 }
4061 #else
4062 static inline void bswap_prstatus(struct target_elf_prstatus *p) { }
4063 static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {}
4064 static inline void bswap_note(struct elf_note *en) { }
4065 #endif /* BSWAP_NEEDED */
4066 
4067 /*
4068  * Minimal support for linux memory regions.  These are needed
4069  * when we are finding out what memory exactly belongs to
4070  * emulated process.  No locks needed here, as long as
4071  * thread that received the signal is stopped.
4072  */
4073 
4074 static struct mm_struct *vma_init(void)
4075 {
4076     struct mm_struct *mm;
4077 
4078     if ((mm = g_malloc(sizeof (*mm))) == NULL)
4079         return (NULL);
4080 
4081     mm->mm_count = 0;
4082     QTAILQ_INIT(&mm->mm_mmap);
4083 
4084     return (mm);
4085 }
4086 
4087 static void vma_delete(struct mm_struct *mm)
4088 {
4089     struct vm_area_struct *vma;
4090 
4091     while ((vma = vma_first(mm)) != NULL) {
4092         QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
4093         g_free(vma);
4094     }
4095     g_free(mm);
4096 }
4097 
4098 static int vma_add_mapping(struct mm_struct *mm, target_ulong start,
4099                            target_ulong end, abi_ulong flags)
4100 {
4101     struct vm_area_struct *vma;
4102 
4103     if ((vma = g_malloc0(sizeof (*vma))) == NULL)
4104         return (-1);
4105 
4106     vma->vma_start = start;
4107     vma->vma_end = end;
4108     vma->vma_flags = flags;
4109 
4110     QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
4111     mm->mm_count++;
4112 
4113     return (0);
4114 }
4115 
4116 static struct vm_area_struct *vma_first(const struct mm_struct *mm)
4117 {
4118     return (QTAILQ_FIRST(&mm->mm_mmap));
4119 }
4120 
4121 static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
4122 {
4123     return (QTAILQ_NEXT(vma, vma_link));
4124 }
4125 
4126 static int vma_get_mapping_count(const struct mm_struct *mm)
4127 {
4128     return (mm->mm_count);
4129 }
4130 
4131 /*
4132  * Calculate file (dump) size of given memory region.
4133  */
4134 static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
4135 {
4136     /* if we cannot even read the first page, skip it */
4137     if (!access_ok_untagged(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
4138         return (0);
4139 
4140     /*
4141      * Usually we don't dump executable pages as they contain
4142      * non-writable code that debugger can read directly from
4143      * target library etc.  However, thread stacks are marked
4144      * also executable so we read in first page of given region
4145      * and check whether it contains elf header.  If there is
4146      * no elf header, we dump it.
4147      */
4148     if (vma->vma_flags & PROT_EXEC) {
4149         char page[TARGET_PAGE_SIZE];
4150 
4151         if (copy_from_user(page, vma->vma_start, sizeof (page))) {
4152             return 0;
4153         }
4154         if ((page[EI_MAG0] == ELFMAG0) &&
4155             (page[EI_MAG1] == ELFMAG1) &&
4156             (page[EI_MAG2] == ELFMAG2) &&
4157             (page[EI_MAG3] == ELFMAG3)) {
4158             /*
4159              * Mappings are possibly from ELF binary.  Don't dump
4160              * them.
4161              */
4162             return (0);
4163         }
4164     }
4165 
4166     return (vma->vma_end - vma->vma_start);
4167 }
4168 
4169 static int vma_walker(void *priv, target_ulong start, target_ulong end,
4170                       unsigned long flags)
4171 {
4172     struct mm_struct *mm = (struct mm_struct *)priv;
4173 
4174     vma_add_mapping(mm, start, end, flags);
4175     return (0);
4176 }
4177 
4178 static void fill_note(struct memelfnote *note, const char *name, int type,
4179                       unsigned int sz, void *data)
4180 {
4181     unsigned int namesz;
4182 
4183     namesz = strlen(name) + 1;
4184     note->name = name;
4185     note->namesz = namesz;
4186     note->namesz_rounded = roundup(namesz, sizeof (int32_t));
4187     note->type = type;
4188     note->datasz = sz;
4189     note->datasz_rounded = roundup(sz, sizeof (int32_t));
4190 
4191     note->data = data;
4192 
4193     /*
4194      * We calculate rounded up note size here as specified by
4195      * ELF document.
4196      */
4197     note->notesz = sizeof (struct elf_note) +
4198         note->namesz_rounded + note->datasz_rounded;
4199 }
4200 
4201 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
4202                             uint32_t flags)
4203 {
4204     (void) memset(elf, 0, sizeof(*elf));
4205 
4206     (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
4207     elf->e_ident[EI_CLASS] = ELF_CLASS;
4208     elf->e_ident[EI_DATA] = ELF_DATA;
4209     elf->e_ident[EI_VERSION] = EV_CURRENT;
4210     elf->e_ident[EI_OSABI] = ELF_OSABI;
4211 
4212     elf->e_type = ET_CORE;
4213     elf->e_machine = machine;
4214     elf->e_version = EV_CURRENT;
4215     elf->e_phoff = sizeof(struct elfhdr);
4216     elf->e_flags = flags;
4217     elf->e_ehsize = sizeof(struct elfhdr);
4218     elf->e_phentsize = sizeof(struct elf_phdr);
4219     elf->e_phnum = segs;
4220 
4221     bswap_ehdr(elf);
4222 }
4223 
4224 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
4225 {
4226     phdr->p_type = PT_NOTE;
4227     phdr->p_offset = offset;
4228     phdr->p_vaddr = 0;
4229     phdr->p_paddr = 0;
4230     phdr->p_filesz = sz;
4231     phdr->p_memsz = 0;
4232     phdr->p_flags = 0;
4233     phdr->p_align = 0;
4234 
4235     bswap_phdr(phdr, 1);
4236 }
4237 
4238 static size_t note_size(const struct memelfnote *note)
4239 {
4240     return (note->notesz);
4241 }
4242 
4243 static void fill_prstatus(struct target_elf_prstatus *prstatus,
4244                           const TaskState *ts, int signr)
4245 {
4246     (void) memset(prstatus, 0, sizeof (*prstatus));
4247     prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
4248     prstatus->pr_pid = ts->ts_tid;
4249     prstatus->pr_ppid = getppid();
4250     prstatus->pr_pgrp = getpgrp();
4251     prstatus->pr_sid = getsid(0);
4252 
4253     bswap_prstatus(prstatus);
4254 }
4255 
4256 static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
4257 {
4258     char *base_filename;
4259     unsigned int i, len;
4260 
4261     (void) memset(psinfo, 0, sizeof (*psinfo));
4262 
4263     len = ts->info->env_strings - ts->info->arg_strings;
4264     if (len >= ELF_PRARGSZ)
4265         len = ELF_PRARGSZ - 1;
4266     if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_strings, len)) {
4267         return -EFAULT;
4268     }
4269     for (i = 0; i < len; i++)
4270         if (psinfo->pr_psargs[i] == 0)
4271             psinfo->pr_psargs[i] = ' ';
4272     psinfo->pr_psargs[len] = 0;
4273 
4274     psinfo->pr_pid = getpid();
4275     psinfo->pr_ppid = getppid();
4276     psinfo->pr_pgrp = getpgrp();
4277     psinfo->pr_sid = getsid(0);
4278     psinfo->pr_uid = getuid();
4279     psinfo->pr_gid = getgid();
4280 
4281     base_filename = g_path_get_basename(ts->bprm->filename);
4282     /*
4283      * Using strncpy here is fine: at max-length,
4284      * this field is not NUL-terminated.
4285      */
4286     (void) strncpy(psinfo->pr_fname, base_filename,
4287                    sizeof(psinfo->pr_fname));
4288 
4289     g_free(base_filename);
4290     bswap_psinfo(psinfo);
4291     return (0);
4292 }
4293 
4294 static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
4295 {
4296     elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
4297     elf_addr_t orig_auxv = auxv;
4298     void *ptr;
4299     int len = ts->info->auxv_len;
4300 
4301     /*
4302      * Auxiliary vector is stored in target process stack.  It contains
4303      * {type, value} pairs that we need to dump into note.  This is not
4304      * strictly necessary but we do it here for sake of completeness.
4305      */
4306 
4307     /* read in whole auxv vector and copy it to memelfnote */
4308     ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
4309     if (ptr != NULL) {
4310         fill_note(note, "CORE", NT_AUXV, len, ptr);
4311         unlock_user(ptr, auxv, len);
4312     }
4313 }
4314 
4315 /*
4316  * Constructs name of coredump file.  We have following convention
4317  * for the name:
4318  *     qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
4319  *
4320  * Returns the filename
4321  */
4322 static char *core_dump_filename(const TaskState *ts)
4323 {
4324     g_autoptr(GDateTime) now = g_date_time_new_now_local();
4325     g_autofree char *nowstr = g_date_time_format(now, "%Y%m%d-%H%M%S");
4326     g_autofree char *base_filename = g_path_get_basename(ts->bprm->filename);
4327 
4328     return g_strdup_printf("qemu_%s_%s_%d.core",
4329                            base_filename, nowstr, (int)getpid());
4330 }
4331 
4332 static int dump_write(int fd, const void *ptr, size_t size)
4333 {
4334     const char *bufp = (const char *)ptr;
4335     ssize_t bytes_written, bytes_left;
4336     struct rlimit dumpsize;
4337     off_t pos;
4338 
4339     bytes_written = 0;
4340     getrlimit(RLIMIT_CORE, &dumpsize);
4341     if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
4342         if (errno == ESPIPE) { /* not a seekable stream */
4343             bytes_left = size;
4344         } else {
4345             return pos;
4346         }
4347     } else {
4348         if (dumpsize.rlim_cur <= pos) {
4349             return -1;
4350         } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
4351             bytes_left = size;
4352         } else {
4353             size_t limit_left=dumpsize.rlim_cur - pos;
4354             bytes_left = limit_left >= size ? size : limit_left ;
4355         }
4356     }
4357 
4358     /*
4359      * In normal conditions, single write(2) should do but
4360      * in case of socket etc. this mechanism is more portable.
4361      */
4362     do {
4363         bytes_written = write(fd, bufp, bytes_left);
4364         if (bytes_written < 0) {
4365             if (errno == EINTR)
4366                 continue;
4367             return (-1);
4368         } else if (bytes_written == 0) { /* eof */
4369             return (-1);
4370         }
4371         bufp += bytes_written;
4372         bytes_left -= bytes_written;
4373     } while (bytes_left > 0);
4374 
4375     return (0);
4376 }
4377 
4378 static int write_note(struct memelfnote *men, int fd)
4379 {
4380     struct elf_note en;
4381 
4382     en.n_namesz = men->namesz;
4383     en.n_type = men->type;
4384     en.n_descsz = men->datasz;
4385 
4386     bswap_note(&en);
4387 
4388     if (dump_write(fd, &en, sizeof(en)) != 0)
4389         return (-1);
4390     if (dump_write(fd, men->name, men->namesz_rounded) != 0)
4391         return (-1);
4392     if (dump_write(fd, men->data, men->datasz_rounded) != 0)
4393         return (-1);
4394 
4395     return (0);
4396 }
4397 
4398 static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env)
4399 {
4400     CPUState *cpu = env_cpu((CPUArchState *)env);
4401     TaskState *ts = (TaskState *)cpu->opaque;
4402     struct elf_thread_status *ets;
4403 
4404     ets = g_malloc0(sizeof (*ets));
4405     ets->num_notes = 1; /* only prstatus is dumped */
4406     fill_prstatus(&ets->prstatus, ts, 0);
4407     elf_core_copy_regs(&ets->prstatus.pr_reg, env);
4408     fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
4409               &ets->prstatus);
4410 
4411     QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
4412 
4413     info->notes_size += note_size(&ets->notes[0]);
4414 }
4415 
4416 static void init_note_info(struct elf_note_info *info)
4417 {
4418     /* Initialize the elf_note_info structure so that it is at
4419      * least safe to call free_note_info() on it. Must be
4420      * called before calling fill_note_info().
4421      */
4422     memset(info, 0, sizeof (*info));
4423     QTAILQ_INIT(&info->thread_list);
4424 }
4425 
4426 static int fill_note_info(struct elf_note_info *info,
4427                           long signr, const CPUArchState *env)
4428 {
4429 #define NUMNOTES 3
4430     CPUState *cpu = env_cpu((CPUArchState *)env);
4431     TaskState *ts = (TaskState *)cpu->opaque;
4432     int i;
4433 
4434     info->notes = g_new0(struct memelfnote, NUMNOTES);
4435     if (info->notes == NULL)
4436         return (-ENOMEM);
4437     info->prstatus = g_malloc0(sizeof (*info->prstatus));
4438     if (info->prstatus == NULL)
4439         return (-ENOMEM);
4440     info->psinfo = g_malloc0(sizeof (*info->psinfo));
4441     if (info->prstatus == NULL)
4442         return (-ENOMEM);
4443 
4444     /*
4445      * First fill in status (and registers) of current thread
4446      * including process info & aux vector.
4447      */
4448     fill_prstatus(info->prstatus, ts, signr);
4449     elf_core_copy_regs(&info->prstatus->pr_reg, env);
4450     fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
4451               sizeof (*info->prstatus), info->prstatus);
4452     fill_psinfo(info->psinfo, ts);
4453     fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
4454               sizeof (*info->psinfo), info->psinfo);
4455     fill_auxv_note(&info->notes[2], ts);
4456     info->numnote = 3;
4457 
4458     info->notes_size = 0;
4459     for (i = 0; i < info->numnote; i++)
4460         info->notes_size += note_size(&info->notes[i]);
4461 
4462     /* read and fill status of all threads */
4463     WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) {
4464         CPU_FOREACH(cpu) {
4465             if (cpu == thread_cpu) {
4466                 continue;
4467             }
4468             fill_thread_info(info, cpu_env(cpu));
4469         }
4470     }
4471 
4472     return (0);
4473 }
4474 
4475 static void free_note_info(struct elf_note_info *info)
4476 {
4477     struct elf_thread_status *ets;
4478 
4479     while (!QTAILQ_EMPTY(&info->thread_list)) {
4480         ets = QTAILQ_FIRST(&info->thread_list);
4481         QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
4482         g_free(ets);
4483     }
4484 
4485     g_free(info->prstatus);
4486     g_free(info->psinfo);
4487     g_free(info->notes);
4488 }
4489 
4490 static int write_note_info(struct elf_note_info *info, int fd)
4491 {
4492     struct elf_thread_status *ets;
4493     int i, error = 0;
4494 
4495     /* write prstatus, psinfo and auxv for current thread */
4496     for (i = 0; i < info->numnote; i++)
4497         if ((error = write_note(&info->notes[i], fd)) != 0)
4498             return (error);
4499 
4500     /* write prstatus for each thread */
4501     QTAILQ_FOREACH(ets, &info->thread_list, ets_link) {
4502         if ((error = write_note(&ets->notes[0], fd)) != 0)
4503             return (error);
4504     }
4505 
4506     return (0);
4507 }
4508 
4509 /*
4510  * Write out ELF coredump.
4511  *
4512  * See documentation of ELF object file format in:
4513  * http://www.caldera.com/developers/devspecs/gabi41.pdf
4514  *
4515  * Coredump format in linux is following:
4516  *
4517  * 0   +----------------------+         \
4518  *     | ELF header           | ET_CORE  |
4519  *     +----------------------+          |
4520  *     | ELF program headers  |          |--- headers
4521  *     | - NOTE section       |          |
4522  *     | - PT_LOAD sections   |          |
4523  *     +----------------------+         /
4524  *     | NOTEs:               |
4525  *     | - NT_PRSTATUS        |
4526  *     | - NT_PRSINFO         |
4527  *     | - NT_AUXV            |
4528  *     +----------------------+ <-- aligned to target page
4529  *     | Process memory dump  |
4530  *     :                      :
4531  *     .                      .
4532  *     :                      :
4533  *     |                      |
4534  *     +----------------------+
4535  *
4536  * NT_PRSTATUS -> struct elf_prstatus (per thread)
4537  * NT_PRSINFO  -> struct elf_prpsinfo
4538  * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
4539  *
4540  * Format follows System V format as close as possible.  Current
4541  * version limitations are as follows:
4542  *     - no floating point registers are dumped
4543  *
4544  * Function returns 0 in case of success, negative errno otherwise.
4545  *
4546  * TODO: make this work also during runtime: it should be
4547  * possible to force coredump from running process and then
4548  * continue processing.  For example qemu could set up SIGUSR2
4549  * handler (provided that target process haven't registered
4550  * handler for that) that does the dump when signal is received.
4551  */
4552 static int elf_core_dump(int signr, const CPUArchState *env)
4553 {
4554     const CPUState *cpu = env_cpu((CPUArchState *)env);
4555     const TaskState *ts = (const TaskState *)cpu->opaque;
4556     struct vm_area_struct *vma = NULL;
4557     g_autofree char *corefile = NULL;
4558     struct elf_note_info info;
4559     struct elfhdr elf;
4560     struct elf_phdr phdr;
4561     struct rlimit dumpsize;
4562     struct mm_struct *mm = NULL;
4563     off_t offset = 0, data_offset = 0;
4564     int segs = 0;
4565     int fd = -1;
4566 
4567     init_note_info(&info);
4568 
4569     errno = 0;
4570     getrlimit(RLIMIT_CORE, &dumpsize);
4571     if (dumpsize.rlim_cur == 0)
4572         return 0;
4573 
4574     corefile = core_dump_filename(ts);
4575 
4576     if ((fd = open(corefile, O_WRONLY | O_CREAT,
4577                    S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
4578         return (-errno);
4579 
4580     /*
4581      * Walk through target process memory mappings and
4582      * set up structure containing this information.  After
4583      * this point vma_xxx functions can be used.
4584      */
4585     if ((mm = vma_init()) == NULL)
4586         goto out;
4587 
4588     walk_memory_regions(mm, vma_walker);
4589     segs = vma_get_mapping_count(mm);
4590 
4591     /*
4592      * Construct valid coredump ELF header.  We also
4593      * add one more segment for notes.
4594      */
4595     fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
4596     if (dump_write(fd, &elf, sizeof (elf)) != 0)
4597         goto out;
4598 
4599     /* fill in the in-memory version of notes */
4600     if (fill_note_info(&info, signr, env) < 0)
4601         goto out;
4602 
4603     offset += sizeof (elf);                             /* elf header */
4604     offset += (segs + 1) * sizeof (struct elf_phdr);    /* program headers */
4605 
4606     /* write out notes program header */
4607     fill_elf_note_phdr(&phdr, info.notes_size, offset);
4608 
4609     offset += info.notes_size;
4610     if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
4611         goto out;
4612 
4613     /*
4614      * ELF specification wants data to start at page boundary so
4615      * we align it here.
4616      */
4617     data_offset = offset = roundup(offset, ELF_EXEC_PAGESIZE);
4618 
4619     /*
4620      * Write program headers for memory regions mapped in
4621      * the target process.
4622      */
4623     for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
4624         (void) memset(&phdr, 0, sizeof (phdr));
4625 
4626         phdr.p_type = PT_LOAD;
4627         phdr.p_offset = offset;
4628         phdr.p_vaddr = vma->vma_start;
4629         phdr.p_paddr = 0;
4630         phdr.p_filesz = vma_dump_size(vma);
4631         offset += phdr.p_filesz;
4632         phdr.p_memsz = vma->vma_end - vma->vma_start;
4633         phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
4634         if (vma->vma_flags & PROT_WRITE)
4635             phdr.p_flags |= PF_W;
4636         if (vma->vma_flags & PROT_EXEC)
4637             phdr.p_flags |= PF_X;
4638         phdr.p_align = ELF_EXEC_PAGESIZE;
4639 
4640         bswap_phdr(&phdr, 1);
4641         if (dump_write(fd, &phdr, sizeof(phdr)) != 0) {
4642             goto out;
4643         }
4644     }
4645 
4646     /*
4647      * Next we write notes just after program headers.  No
4648      * alignment needed here.
4649      */
4650     if (write_note_info(&info, fd) < 0)
4651         goto out;
4652 
4653     /* align data to page boundary */
4654     if (lseek(fd, data_offset, SEEK_SET) != data_offset)
4655         goto out;
4656 
4657     /*
4658      * Finally we can dump process memory into corefile as well.
4659      */
4660     for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
4661         abi_ulong addr;
4662         abi_ulong end;
4663 
4664         end = vma->vma_start + vma_dump_size(vma);
4665 
4666         for (addr = vma->vma_start; addr < end;
4667              addr += TARGET_PAGE_SIZE) {
4668             char page[TARGET_PAGE_SIZE];
4669             int error;
4670 
4671             /*
4672              *  Read in page from target process memory and
4673              *  write it to coredump file.
4674              */
4675             error = copy_from_user(page, addr, sizeof (page));
4676             if (error != 0) {
4677                 (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
4678                                addr);
4679                 errno = -error;
4680                 goto out;
4681             }
4682             if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
4683                 goto out;
4684         }
4685     }
4686 
4687  out:
4688     free_note_info(&info);
4689     if (mm != NULL)
4690         vma_delete(mm);
4691     (void) close(fd);
4692 
4693     if (errno != 0)
4694         return (-errno);
4695     return (0);
4696 }
4697 #endif /* USE_ELF_CORE_DUMP */
4698 
4699 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
4700 {
4701     init_thread(regs, infop);
4702 }
4703