xref: /openbmc/qemu/linux-user/elfload.c (revision e8d1e0cd)
1 /* This is the Linux kernel elf-loading code, ported into user space */
2 #include "qemu/osdep.h"
3 #include <sys/param.h>
4 
5 #include <sys/resource.h>
6 #include <sys/shm.h>
7 
8 #include "qemu.h"
9 #include "user-internals.h"
10 #include "signal-common.h"
11 #include "loader.h"
12 #include "user-mmap.h"
13 #include "disas/disas.h"
14 #include "qemu/bitops.h"
15 #include "qemu/path.h"
16 #include "qemu/queue.h"
17 #include "qemu/guest-random.h"
18 #include "qemu/units.h"
19 #include "qemu/selfmap.h"
20 #include "qapi/error.h"
21 #include "target_signal.h"
22 #include "accel/tcg/debuginfo.h"
23 
24 #ifdef _ARCH_PPC64
25 #undef ARCH_DLINFO
26 #undef ELF_PLATFORM
27 #undef ELF_HWCAP
28 #undef ELF_HWCAP2
29 #undef ELF_CLASS
30 #undef ELF_DATA
31 #undef ELF_ARCH
32 #endif
33 
34 #define ELF_OSABI   ELFOSABI_SYSV
35 
36 /* from personality.h */
37 
38 /*
39  * Flags for bug emulation.
40  *
41  * These occupy the top three bytes.
42  */
43 enum {
44     ADDR_NO_RANDOMIZE = 0x0040000,      /* disable randomization of VA space */
45     FDPIC_FUNCPTRS =    0x0080000,      /* userspace function ptrs point to
46                                            descriptors (signal handling) */
47     MMAP_PAGE_ZERO =    0x0100000,
48     ADDR_COMPAT_LAYOUT = 0x0200000,
49     READ_IMPLIES_EXEC = 0x0400000,
50     ADDR_LIMIT_32BIT =  0x0800000,
51     SHORT_INODE =       0x1000000,
52     WHOLE_SECONDS =     0x2000000,
53     STICKY_TIMEOUTS =   0x4000000,
54     ADDR_LIMIT_3GB =    0x8000000,
55 };
56 
57 /*
58  * Personality types.
59  *
60  * These go in the low byte.  Avoid using the top bit, it will
61  * conflict with error returns.
62  */
63 enum {
64     PER_LINUX =         0x0000,
65     PER_LINUX_32BIT =   0x0000 | ADDR_LIMIT_32BIT,
66     PER_LINUX_FDPIC =   0x0000 | FDPIC_FUNCPTRS,
67     PER_SVR4 =          0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
68     PER_SVR3 =          0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
69     PER_SCOSVR3 =       0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE,
70     PER_OSR5 =          0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
71     PER_WYSEV386 =      0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
72     PER_ISCR4 =         0x0005 | STICKY_TIMEOUTS,
73     PER_BSD =           0x0006,
74     PER_SUNOS =         0x0006 | STICKY_TIMEOUTS,
75     PER_XENIX =         0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
76     PER_LINUX32 =       0x0008,
77     PER_LINUX32_3GB =   0x0008 | ADDR_LIMIT_3GB,
78     PER_IRIX32 =        0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
79     PER_IRIXN32 =       0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
80     PER_IRIX64 =        0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
81     PER_RISCOS =        0x000c,
82     PER_SOLARIS =       0x000d | STICKY_TIMEOUTS,
83     PER_UW7 =           0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
84     PER_OSF4 =          0x000f,                  /* OSF/1 v4 */
85     PER_HPUX =          0x0010,
86     PER_MASK =          0x00ff,
87 };
88 
89 /*
90  * Return the base personality without flags.
91  */
92 #define personality(pers)       (pers & PER_MASK)
93 
94 int info_is_fdpic(struct image_info *info)
95 {
96     return info->personality == PER_LINUX_FDPIC;
97 }
98 
99 /* this flag is uneffective under linux too, should be deleted */
100 #ifndef MAP_DENYWRITE
101 #define MAP_DENYWRITE 0
102 #endif
103 
104 /* should probably go in elf.h */
105 #ifndef ELIBBAD
106 #define ELIBBAD 80
107 #endif
108 
109 #if TARGET_BIG_ENDIAN
110 #define ELF_DATA        ELFDATA2MSB
111 #else
112 #define ELF_DATA        ELFDATA2LSB
113 #endif
114 
115 #ifdef TARGET_ABI_MIPSN32
116 typedef abi_ullong      target_elf_greg_t;
117 #define tswapreg(ptr)   tswap64(ptr)
118 #else
119 typedef abi_ulong       target_elf_greg_t;
120 #define tswapreg(ptr)   tswapal(ptr)
121 #endif
122 
123 #ifdef USE_UID16
124 typedef abi_ushort      target_uid_t;
125 typedef abi_ushort      target_gid_t;
126 #else
127 typedef abi_uint        target_uid_t;
128 typedef abi_uint        target_gid_t;
129 #endif
130 typedef abi_int         target_pid_t;
131 
132 #ifdef TARGET_I386
133 
134 #define ELF_HWCAP get_elf_hwcap()
135 
136 static uint32_t get_elf_hwcap(void)
137 {
138     X86CPU *cpu = X86_CPU(thread_cpu);
139 
140     return cpu->env.features[FEAT_1_EDX];
141 }
142 
143 #ifdef TARGET_X86_64
144 #define ELF_START_MMAP 0x2aaaaab000ULL
145 
146 #define ELF_CLASS      ELFCLASS64
147 #define ELF_ARCH       EM_X86_64
148 
149 #define ELF_PLATFORM   "x86_64"
150 
151 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
152 {
153     regs->rax = 0;
154     regs->rsp = infop->start_stack;
155     regs->rip = infop->entry;
156 }
157 
158 #define ELF_NREG    27
159 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
160 
161 /*
162  * Note that ELF_NREG should be 29 as there should be place for
163  * TRAPNO and ERR "registers" as well but linux doesn't dump
164  * those.
165  *
166  * See linux kernel: arch/x86/include/asm/elf.h
167  */
168 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env)
169 {
170     (*regs)[0] = tswapreg(env->regs[15]);
171     (*regs)[1] = tswapreg(env->regs[14]);
172     (*regs)[2] = tswapreg(env->regs[13]);
173     (*regs)[3] = tswapreg(env->regs[12]);
174     (*regs)[4] = tswapreg(env->regs[R_EBP]);
175     (*regs)[5] = tswapreg(env->regs[R_EBX]);
176     (*regs)[6] = tswapreg(env->regs[11]);
177     (*regs)[7] = tswapreg(env->regs[10]);
178     (*regs)[8] = tswapreg(env->regs[9]);
179     (*regs)[9] = tswapreg(env->regs[8]);
180     (*regs)[10] = tswapreg(env->regs[R_EAX]);
181     (*regs)[11] = tswapreg(env->regs[R_ECX]);
182     (*regs)[12] = tswapreg(env->regs[R_EDX]);
183     (*regs)[13] = tswapreg(env->regs[R_ESI]);
184     (*regs)[14] = tswapreg(env->regs[R_EDI]);
185     (*regs)[15] = tswapreg(env->regs[R_EAX]); /* XXX */
186     (*regs)[16] = tswapreg(env->eip);
187     (*regs)[17] = tswapreg(env->segs[R_CS].selector & 0xffff);
188     (*regs)[18] = tswapreg(env->eflags);
189     (*regs)[19] = tswapreg(env->regs[R_ESP]);
190     (*regs)[20] = tswapreg(env->segs[R_SS].selector & 0xffff);
191     (*regs)[21] = tswapreg(env->segs[R_FS].selector & 0xffff);
192     (*regs)[22] = tswapreg(env->segs[R_GS].selector & 0xffff);
193     (*regs)[23] = tswapreg(env->segs[R_DS].selector & 0xffff);
194     (*regs)[24] = tswapreg(env->segs[R_ES].selector & 0xffff);
195     (*regs)[25] = tswapreg(env->segs[R_FS].selector & 0xffff);
196     (*regs)[26] = tswapreg(env->segs[R_GS].selector & 0xffff);
197 }
198 
199 #if ULONG_MAX > UINT32_MAX
200 #define INIT_GUEST_COMMPAGE
201 static bool init_guest_commpage(void)
202 {
203     /*
204      * The vsyscall page is at a high negative address aka kernel space,
205      * which means that we cannot actually allocate it with target_mmap.
206      * We still should be able to use page_set_flags, unless the user
207      * has specified -R reserved_va, which would trigger an assert().
208      */
209     if (reserved_va != 0 &&
210         TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE >= reserved_va) {
211         error_report("Cannot allocate vsyscall page");
212         exit(EXIT_FAILURE);
213     }
214     page_set_flags(TARGET_VSYSCALL_PAGE,
215                    TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE,
216                    PAGE_EXEC | PAGE_VALID);
217     return true;
218 }
219 #endif
220 #else
221 
222 #define ELF_START_MMAP 0x80000000
223 
224 /*
225  * This is used to ensure we don't load something for the wrong architecture.
226  */
227 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
228 
229 /*
230  * These are used to set parameters in the core dumps.
231  */
232 #define ELF_CLASS       ELFCLASS32
233 #define ELF_ARCH        EM_386
234 
235 #define ELF_PLATFORM get_elf_platform()
236 #define EXSTACK_DEFAULT true
237 
238 static const char *get_elf_platform(void)
239 {
240     static char elf_platform[] = "i386";
241     int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL);
242     if (family > 6) {
243         family = 6;
244     }
245     if (family >= 3) {
246         elf_platform[1] = '0' + family;
247     }
248     return elf_platform;
249 }
250 
251 static inline void init_thread(struct target_pt_regs *regs,
252                                struct image_info *infop)
253 {
254     regs->esp = infop->start_stack;
255     regs->eip = infop->entry;
256 
257     /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
258        starts %edx contains a pointer to a function which might be
259        registered using `atexit'.  This provides a mean for the
260        dynamic linker to call DT_FINI functions for shared libraries
261        that have been loaded before the code runs.
262 
263        A value of 0 tells we have no such handler.  */
264     regs->edx = 0;
265 }
266 
267 #define ELF_NREG    17
268 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
269 
270 /*
271  * Note that ELF_NREG should be 19 as there should be place for
272  * TRAPNO and ERR "registers" as well but linux doesn't dump
273  * those.
274  *
275  * See linux kernel: arch/x86/include/asm/elf.h
276  */
277 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env)
278 {
279     (*regs)[0] = tswapreg(env->regs[R_EBX]);
280     (*regs)[1] = tswapreg(env->regs[R_ECX]);
281     (*regs)[2] = tswapreg(env->regs[R_EDX]);
282     (*regs)[3] = tswapreg(env->regs[R_ESI]);
283     (*regs)[4] = tswapreg(env->regs[R_EDI]);
284     (*regs)[5] = tswapreg(env->regs[R_EBP]);
285     (*regs)[6] = tswapreg(env->regs[R_EAX]);
286     (*regs)[7] = tswapreg(env->segs[R_DS].selector & 0xffff);
287     (*regs)[8] = tswapreg(env->segs[R_ES].selector & 0xffff);
288     (*regs)[9] = tswapreg(env->segs[R_FS].selector & 0xffff);
289     (*regs)[10] = tswapreg(env->segs[R_GS].selector & 0xffff);
290     (*regs)[11] = tswapreg(env->regs[R_EAX]); /* XXX */
291     (*regs)[12] = tswapreg(env->eip);
292     (*regs)[13] = tswapreg(env->segs[R_CS].selector & 0xffff);
293     (*regs)[14] = tswapreg(env->eflags);
294     (*regs)[15] = tswapreg(env->regs[R_ESP]);
295     (*regs)[16] = tswapreg(env->segs[R_SS].selector & 0xffff);
296 }
297 #endif
298 
299 #define USE_ELF_CORE_DUMP
300 #define ELF_EXEC_PAGESIZE       4096
301 
302 #endif
303 
304 #ifdef TARGET_ARM
305 
306 #ifndef TARGET_AARCH64
307 /* 32 bit ARM definitions */
308 
309 #define ELF_START_MMAP 0x80000000
310 
311 #define ELF_ARCH        EM_ARM
312 #define ELF_CLASS       ELFCLASS32
313 #define EXSTACK_DEFAULT true
314 
315 static inline void init_thread(struct target_pt_regs *regs,
316                                struct image_info *infop)
317 {
318     abi_long stack = infop->start_stack;
319     memset(regs, 0, sizeof(*regs));
320 
321     regs->uregs[16] = ARM_CPU_MODE_USR;
322     if (infop->entry & 1) {
323         regs->uregs[16] |= CPSR_T;
324     }
325     regs->uregs[15] = infop->entry & 0xfffffffe;
326     regs->uregs[13] = infop->start_stack;
327     /* FIXME - what to for failure of get_user()? */
328     get_user_ual(regs->uregs[2], stack + 8); /* envp */
329     get_user_ual(regs->uregs[1], stack + 4); /* envp */
330     /* XXX: it seems that r0 is zeroed after ! */
331     regs->uregs[0] = 0;
332     /* For uClinux PIC binaries.  */
333     /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
334     regs->uregs[10] = infop->start_data;
335 
336     /* Support ARM FDPIC.  */
337     if (info_is_fdpic(infop)) {
338         /* As described in the ABI document, r7 points to the loadmap info
339          * prepared by the kernel. If an interpreter is needed, r8 points
340          * to the interpreter loadmap and r9 points to the interpreter
341          * PT_DYNAMIC info. If no interpreter is needed, r8 is zero, and
342          * r9 points to the main program PT_DYNAMIC info.
343          */
344         regs->uregs[7] = infop->loadmap_addr;
345         if (infop->interpreter_loadmap_addr) {
346             /* Executable is dynamically loaded.  */
347             regs->uregs[8] = infop->interpreter_loadmap_addr;
348             regs->uregs[9] = infop->interpreter_pt_dynamic_addr;
349         } else {
350             regs->uregs[8] = 0;
351             regs->uregs[9] = infop->pt_dynamic_addr;
352         }
353     }
354 }
355 
356 #define ELF_NREG    18
357 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
358 
359 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUARMState *env)
360 {
361     (*regs)[0] = tswapreg(env->regs[0]);
362     (*regs)[1] = tswapreg(env->regs[1]);
363     (*regs)[2] = tswapreg(env->regs[2]);
364     (*regs)[3] = tswapreg(env->regs[3]);
365     (*regs)[4] = tswapreg(env->regs[4]);
366     (*regs)[5] = tswapreg(env->regs[5]);
367     (*regs)[6] = tswapreg(env->regs[6]);
368     (*regs)[7] = tswapreg(env->regs[7]);
369     (*regs)[8] = tswapreg(env->regs[8]);
370     (*regs)[9] = tswapreg(env->regs[9]);
371     (*regs)[10] = tswapreg(env->regs[10]);
372     (*regs)[11] = tswapreg(env->regs[11]);
373     (*regs)[12] = tswapreg(env->regs[12]);
374     (*regs)[13] = tswapreg(env->regs[13]);
375     (*regs)[14] = tswapreg(env->regs[14]);
376     (*regs)[15] = tswapreg(env->regs[15]);
377 
378     (*regs)[16] = tswapreg(cpsr_read((CPUARMState *)env));
379     (*regs)[17] = tswapreg(env->regs[0]); /* XXX */
380 }
381 
382 #define USE_ELF_CORE_DUMP
383 #define ELF_EXEC_PAGESIZE       4096
384 
385 enum
386 {
387     ARM_HWCAP_ARM_SWP       = 1 << 0,
388     ARM_HWCAP_ARM_HALF      = 1 << 1,
389     ARM_HWCAP_ARM_THUMB     = 1 << 2,
390     ARM_HWCAP_ARM_26BIT     = 1 << 3,
391     ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
392     ARM_HWCAP_ARM_FPA       = 1 << 5,
393     ARM_HWCAP_ARM_VFP       = 1 << 6,
394     ARM_HWCAP_ARM_EDSP      = 1 << 7,
395     ARM_HWCAP_ARM_JAVA      = 1 << 8,
396     ARM_HWCAP_ARM_IWMMXT    = 1 << 9,
397     ARM_HWCAP_ARM_CRUNCH    = 1 << 10,
398     ARM_HWCAP_ARM_THUMBEE   = 1 << 11,
399     ARM_HWCAP_ARM_NEON      = 1 << 12,
400     ARM_HWCAP_ARM_VFPv3     = 1 << 13,
401     ARM_HWCAP_ARM_VFPv3D16  = 1 << 14,
402     ARM_HWCAP_ARM_TLS       = 1 << 15,
403     ARM_HWCAP_ARM_VFPv4     = 1 << 16,
404     ARM_HWCAP_ARM_IDIVA     = 1 << 17,
405     ARM_HWCAP_ARM_IDIVT     = 1 << 18,
406     ARM_HWCAP_ARM_VFPD32    = 1 << 19,
407     ARM_HWCAP_ARM_LPAE      = 1 << 20,
408     ARM_HWCAP_ARM_EVTSTRM   = 1 << 21,
409 };
410 
411 enum {
412     ARM_HWCAP2_ARM_AES      = 1 << 0,
413     ARM_HWCAP2_ARM_PMULL    = 1 << 1,
414     ARM_HWCAP2_ARM_SHA1     = 1 << 2,
415     ARM_HWCAP2_ARM_SHA2     = 1 << 3,
416     ARM_HWCAP2_ARM_CRC32    = 1 << 4,
417 };
418 
419 /* The commpage only exists for 32 bit kernels */
420 
421 #define HI_COMMPAGE (intptr_t)0xffff0f00u
422 
423 static bool init_guest_commpage(void)
424 {
425     abi_ptr commpage = HI_COMMPAGE & -qemu_host_page_size;
426     void *want = g2h_untagged(commpage);
427     void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
428                       MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
429 
430     if (addr == MAP_FAILED) {
431         perror("Allocating guest commpage");
432         exit(EXIT_FAILURE);
433     }
434     if (addr != want) {
435         return false;
436     }
437 
438     /* Set kernel helper versions; rest of page is 0.  */
439     __put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu));
440 
441     if (mprotect(addr, qemu_host_page_size, PROT_READ)) {
442         perror("Protecting guest commpage");
443         exit(EXIT_FAILURE);
444     }
445 
446     page_set_flags(commpage, commpage + qemu_host_page_size,
447                    PAGE_READ | PAGE_EXEC | PAGE_VALID);
448     return true;
449 }
450 
451 #define ELF_HWCAP get_elf_hwcap()
452 #define ELF_HWCAP2 get_elf_hwcap2()
453 
454 static uint32_t get_elf_hwcap(void)
455 {
456     ARMCPU *cpu = ARM_CPU(thread_cpu);
457     uint32_t hwcaps = 0;
458 
459     hwcaps |= ARM_HWCAP_ARM_SWP;
460     hwcaps |= ARM_HWCAP_ARM_HALF;
461     hwcaps |= ARM_HWCAP_ARM_THUMB;
462     hwcaps |= ARM_HWCAP_ARM_FAST_MULT;
463 
464     /* probe for the extra features */
465 #define GET_FEATURE(feat, hwcap) \
466     do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
467 
468 #define GET_FEATURE_ID(feat, hwcap) \
469     do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
470 
471     /* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */
472     GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP);
473     GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT);
474     GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE);
475     GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON);
476     GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS);
477     GET_FEATURE(ARM_FEATURE_LPAE, ARM_HWCAP_ARM_LPAE);
478     GET_FEATURE_ID(aa32_arm_div, ARM_HWCAP_ARM_IDIVA);
479     GET_FEATURE_ID(aa32_thumb_div, ARM_HWCAP_ARM_IDIVT);
480     GET_FEATURE_ID(aa32_vfp, ARM_HWCAP_ARM_VFP);
481 
482     if (cpu_isar_feature(aa32_fpsp_v3, cpu) ||
483         cpu_isar_feature(aa32_fpdp_v3, cpu)) {
484         hwcaps |= ARM_HWCAP_ARM_VFPv3;
485         if (cpu_isar_feature(aa32_simd_r32, cpu)) {
486             hwcaps |= ARM_HWCAP_ARM_VFPD32;
487         } else {
488             hwcaps |= ARM_HWCAP_ARM_VFPv3D16;
489         }
490     }
491     GET_FEATURE_ID(aa32_simdfmac, ARM_HWCAP_ARM_VFPv4);
492 
493     return hwcaps;
494 }
495 
496 static uint32_t get_elf_hwcap2(void)
497 {
498     ARMCPU *cpu = ARM_CPU(thread_cpu);
499     uint32_t hwcaps = 0;
500 
501     GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES);
502     GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL);
503     GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1);
504     GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2);
505     GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32);
506     return hwcaps;
507 }
508 
509 #undef GET_FEATURE
510 #undef GET_FEATURE_ID
511 
512 #define ELF_PLATFORM get_elf_platform()
513 
514 static const char *get_elf_platform(void)
515 {
516     CPUARMState *env = thread_cpu->env_ptr;
517 
518 #if TARGET_BIG_ENDIAN
519 # define END  "b"
520 #else
521 # define END  "l"
522 #endif
523 
524     if (arm_feature(env, ARM_FEATURE_V8)) {
525         return "v8" END;
526     } else if (arm_feature(env, ARM_FEATURE_V7)) {
527         if (arm_feature(env, ARM_FEATURE_M)) {
528             return "v7m" END;
529         } else {
530             return "v7" END;
531         }
532     } else if (arm_feature(env, ARM_FEATURE_V6)) {
533         return "v6" END;
534     } else if (arm_feature(env, ARM_FEATURE_V5)) {
535         return "v5" END;
536     } else {
537         return "v4" END;
538     }
539 
540 #undef END
541 }
542 
543 #else
544 /* 64 bit ARM definitions */
545 #define ELF_START_MMAP 0x80000000
546 
547 #define ELF_ARCH        EM_AARCH64
548 #define ELF_CLASS       ELFCLASS64
549 #if TARGET_BIG_ENDIAN
550 # define ELF_PLATFORM    "aarch64_be"
551 #else
552 # define ELF_PLATFORM    "aarch64"
553 #endif
554 
555 static inline void init_thread(struct target_pt_regs *regs,
556                                struct image_info *infop)
557 {
558     abi_long stack = infop->start_stack;
559     memset(regs, 0, sizeof(*regs));
560 
561     regs->pc = infop->entry & ~0x3ULL;
562     regs->sp = stack;
563 }
564 
565 #define ELF_NREG    34
566 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
567 
568 static void elf_core_copy_regs(target_elf_gregset_t *regs,
569                                const CPUARMState *env)
570 {
571     int i;
572 
573     for (i = 0; i < 32; i++) {
574         (*regs)[i] = tswapreg(env->xregs[i]);
575     }
576     (*regs)[32] = tswapreg(env->pc);
577     (*regs)[33] = tswapreg(pstate_read((CPUARMState *)env));
578 }
579 
580 #define USE_ELF_CORE_DUMP
581 #define ELF_EXEC_PAGESIZE       4096
582 
583 enum {
584     ARM_HWCAP_A64_FP            = 1 << 0,
585     ARM_HWCAP_A64_ASIMD         = 1 << 1,
586     ARM_HWCAP_A64_EVTSTRM       = 1 << 2,
587     ARM_HWCAP_A64_AES           = 1 << 3,
588     ARM_HWCAP_A64_PMULL         = 1 << 4,
589     ARM_HWCAP_A64_SHA1          = 1 << 5,
590     ARM_HWCAP_A64_SHA2          = 1 << 6,
591     ARM_HWCAP_A64_CRC32         = 1 << 7,
592     ARM_HWCAP_A64_ATOMICS       = 1 << 8,
593     ARM_HWCAP_A64_FPHP          = 1 << 9,
594     ARM_HWCAP_A64_ASIMDHP       = 1 << 10,
595     ARM_HWCAP_A64_CPUID         = 1 << 11,
596     ARM_HWCAP_A64_ASIMDRDM      = 1 << 12,
597     ARM_HWCAP_A64_JSCVT         = 1 << 13,
598     ARM_HWCAP_A64_FCMA          = 1 << 14,
599     ARM_HWCAP_A64_LRCPC         = 1 << 15,
600     ARM_HWCAP_A64_DCPOP         = 1 << 16,
601     ARM_HWCAP_A64_SHA3          = 1 << 17,
602     ARM_HWCAP_A64_SM3           = 1 << 18,
603     ARM_HWCAP_A64_SM4           = 1 << 19,
604     ARM_HWCAP_A64_ASIMDDP       = 1 << 20,
605     ARM_HWCAP_A64_SHA512        = 1 << 21,
606     ARM_HWCAP_A64_SVE           = 1 << 22,
607     ARM_HWCAP_A64_ASIMDFHM      = 1 << 23,
608     ARM_HWCAP_A64_DIT           = 1 << 24,
609     ARM_HWCAP_A64_USCAT         = 1 << 25,
610     ARM_HWCAP_A64_ILRCPC        = 1 << 26,
611     ARM_HWCAP_A64_FLAGM         = 1 << 27,
612     ARM_HWCAP_A64_SSBS          = 1 << 28,
613     ARM_HWCAP_A64_SB            = 1 << 29,
614     ARM_HWCAP_A64_PACA          = 1 << 30,
615     ARM_HWCAP_A64_PACG          = 1UL << 31,
616 
617     ARM_HWCAP2_A64_DCPODP       = 1 << 0,
618     ARM_HWCAP2_A64_SVE2         = 1 << 1,
619     ARM_HWCAP2_A64_SVEAES       = 1 << 2,
620     ARM_HWCAP2_A64_SVEPMULL     = 1 << 3,
621     ARM_HWCAP2_A64_SVEBITPERM   = 1 << 4,
622     ARM_HWCAP2_A64_SVESHA3      = 1 << 5,
623     ARM_HWCAP2_A64_SVESM4       = 1 << 6,
624     ARM_HWCAP2_A64_FLAGM2       = 1 << 7,
625     ARM_HWCAP2_A64_FRINT        = 1 << 8,
626     ARM_HWCAP2_A64_SVEI8MM      = 1 << 9,
627     ARM_HWCAP2_A64_SVEF32MM     = 1 << 10,
628     ARM_HWCAP2_A64_SVEF64MM     = 1 << 11,
629     ARM_HWCAP2_A64_SVEBF16      = 1 << 12,
630     ARM_HWCAP2_A64_I8MM         = 1 << 13,
631     ARM_HWCAP2_A64_BF16         = 1 << 14,
632     ARM_HWCAP2_A64_DGH          = 1 << 15,
633     ARM_HWCAP2_A64_RNG          = 1 << 16,
634     ARM_HWCAP2_A64_BTI          = 1 << 17,
635     ARM_HWCAP2_A64_MTE          = 1 << 18,
636     ARM_HWCAP2_A64_ECV          = 1 << 19,
637     ARM_HWCAP2_A64_AFP          = 1 << 20,
638     ARM_HWCAP2_A64_RPRES        = 1 << 21,
639     ARM_HWCAP2_A64_MTE3         = 1 << 22,
640     ARM_HWCAP2_A64_SME          = 1 << 23,
641     ARM_HWCAP2_A64_SME_I16I64   = 1 << 24,
642     ARM_HWCAP2_A64_SME_F64F64   = 1 << 25,
643     ARM_HWCAP2_A64_SME_I8I32    = 1 << 26,
644     ARM_HWCAP2_A64_SME_F16F32   = 1 << 27,
645     ARM_HWCAP2_A64_SME_B16F32   = 1 << 28,
646     ARM_HWCAP2_A64_SME_F32F32   = 1 << 29,
647     ARM_HWCAP2_A64_SME_FA64     = 1 << 30,
648 };
649 
650 #define ELF_HWCAP   get_elf_hwcap()
651 #define ELF_HWCAP2  get_elf_hwcap2()
652 
653 #define GET_FEATURE_ID(feat, hwcap) \
654     do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
655 
656 static uint32_t get_elf_hwcap(void)
657 {
658     ARMCPU *cpu = ARM_CPU(thread_cpu);
659     uint32_t hwcaps = 0;
660 
661     hwcaps |= ARM_HWCAP_A64_FP;
662     hwcaps |= ARM_HWCAP_A64_ASIMD;
663     hwcaps |= ARM_HWCAP_A64_CPUID;
664 
665     /* probe for the extra features */
666 
667     GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES);
668     GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL);
669     GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1);
670     GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2);
671     GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512);
672     GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32);
673     GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3);
674     GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3);
675     GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4);
676     GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
677     GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS);
678     GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM);
679     GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP);
680     GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA);
681     GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE);
682     GET_FEATURE_ID(aa64_pauth, ARM_HWCAP_A64_PACA | ARM_HWCAP_A64_PACG);
683     GET_FEATURE_ID(aa64_fhm, ARM_HWCAP_A64_ASIMDFHM);
684     GET_FEATURE_ID(aa64_jscvt, ARM_HWCAP_A64_JSCVT);
685     GET_FEATURE_ID(aa64_sb, ARM_HWCAP_A64_SB);
686     GET_FEATURE_ID(aa64_condm_4, ARM_HWCAP_A64_FLAGM);
687     GET_FEATURE_ID(aa64_dcpop, ARM_HWCAP_A64_DCPOP);
688     GET_FEATURE_ID(aa64_rcpc_8_3, ARM_HWCAP_A64_LRCPC);
689     GET_FEATURE_ID(aa64_rcpc_8_4, ARM_HWCAP_A64_ILRCPC);
690 
691     return hwcaps;
692 }
693 
694 static uint32_t get_elf_hwcap2(void)
695 {
696     ARMCPU *cpu = ARM_CPU(thread_cpu);
697     uint32_t hwcaps = 0;
698 
699     GET_FEATURE_ID(aa64_dcpodp, ARM_HWCAP2_A64_DCPODP);
700     GET_FEATURE_ID(aa64_sve2, ARM_HWCAP2_A64_SVE2);
701     GET_FEATURE_ID(aa64_sve2_aes, ARM_HWCAP2_A64_SVEAES);
702     GET_FEATURE_ID(aa64_sve2_pmull128, ARM_HWCAP2_A64_SVEPMULL);
703     GET_FEATURE_ID(aa64_sve2_bitperm, ARM_HWCAP2_A64_SVEBITPERM);
704     GET_FEATURE_ID(aa64_sve2_sha3, ARM_HWCAP2_A64_SVESHA3);
705     GET_FEATURE_ID(aa64_sve2_sm4, ARM_HWCAP2_A64_SVESM4);
706     GET_FEATURE_ID(aa64_condm_5, ARM_HWCAP2_A64_FLAGM2);
707     GET_FEATURE_ID(aa64_frint, ARM_HWCAP2_A64_FRINT);
708     GET_FEATURE_ID(aa64_sve_i8mm, ARM_HWCAP2_A64_SVEI8MM);
709     GET_FEATURE_ID(aa64_sve_f32mm, ARM_HWCAP2_A64_SVEF32MM);
710     GET_FEATURE_ID(aa64_sve_f64mm, ARM_HWCAP2_A64_SVEF64MM);
711     GET_FEATURE_ID(aa64_sve_bf16, ARM_HWCAP2_A64_SVEBF16);
712     GET_FEATURE_ID(aa64_i8mm, ARM_HWCAP2_A64_I8MM);
713     GET_FEATURE_ID(aa64_bf16, ARM_HWCAP2_A64_BF16);
714     GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG);
715     GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI);
716     GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE);
717     GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME |
718                               ARM_HWCAP2_A64_SME_F32F32 |
719                               ARM_HWCAP2_A64_SME_B16F32 |
720                               ARM_HWCAP2_A64_SME_F16F32 |
721                               ARM_HWCAP2_A64_SME_I8I32));
722     GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64);
723     GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64);
724     GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64);
725 
726     return hwcaps;
727 }
728 
729 #undef GET_FEATURE_ID
730 
731 #endif /* not TARGET_AARCH64 */
732 #endif /* TARGET_ARM */
733 
734 #ifdef TARGET_SPARC
735 #ifdef TARGET_SPARC64
736 
737 #define ELF_START_MMAP 0x80000000
738 #define ELF_HWCAP  (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
739                     | HWCAP_SPARC_MULDIV | HWCAP_SPARC_V9)
740 #ifndef TARGET_ABI32
741 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
742 #else
743 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
744 #endif
745 
746 #define ELF_CLASS   ELFCLASS64
747 #define ELF_ARCH    EM_SPARCV9
748 #else
749 #define ELF_START_MMAP 0x80000000
750 #define ELF_HWCAP  (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
751                     | HWCAP_SPARC_MULDIV)
752 #define ELF_CLASS   ELFCLASS32
753 #define ELF_ARCH    EM_SPARC
754 #endif /* TARGET_SPARC64 */
755 
756 static inline void init_thread(struct target_pt_regs *regs,
757                                struct image_info *infop)
758 {
759     /* Note that target_cpu_copy_regs does not read psr/tstate. */
760     regs->pc = infop->entry;
761     regs->npc = regs->pc + 4;
762     regs->y = 0;
763     regs->u_regs[14] = (infop->start_stack - 16 * sizeof(abi_ulong)
764                         - TARGET_STACK_BIAS);
765 }
766 #endif /* TARGET_SPARC */
767 
768 #ifdef TARGET_PPC
769 
770 #define ELF_MACHINE    PPC_ELF_MACHINE
771 #define ELF_START_MMAP 0x80000000
772 
773 #if defined(TARGET_PPC64)
774 
775 #define elf_check_arch(x) ( (x) == EM_PPC64 )
776 
777 #define ELF_CLASS       ELFCLASS64
778 
779 #else
780 
781 #define ELF_CLASS       ELFCLASS32
782 #define EXSTACK_DEFAULT true
783 
784 #endif
785 
786 #define ELF_ARCH        EM_PPC
787 
788 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
789    See arch/powerpc/include/asm/cputable.h.  */
790 enum {
791     QEMU_PPC_FEATURE_32 = 0x80000000,
792     QEMU_PPC_FEATURE_64 = 0x40000000,
793     QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
794     QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
795     QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
796     QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
797     QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
798     QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
799     QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
800     QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
801     QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
802     QEMU_PPC_FEATURE_NO_TB = 0x00100000,
803     QEMU_PPC_FEATURE_POWER4 = 0x00080000,
804     QEMU_PPC_FEATURE_POWER5 = 0x00040000,
805     QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
806     QEMU_PPC_FEATURE_CELL = 0x00010000,
807     QEMU_PPC_FEATURE_BOOKE = 0x00008000,
808     QEMU_PPC_FEATURE_SMT = 0x00004000,
809     QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
810     QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
811     QEMU_PPC_FEATURE_PA6T = 0x00000800,
812     QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
813     QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
814     QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
815     QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
816     QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
817 
818     QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
819     QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
820 
821     /* Feature definitions in AT_HWCAP2.  */
822     QEMU_PPC_FEATURE2_ARCH_2_07 = 0x80000000, /* ISA 2.07 */
823     QEMU_PPC_FEATURE2_HAS_HTM = 0x40000000, /* Hardware Transactional Memory */
824     QEMU_PPC_FEATURE2_HAS_DSCR = 0x20000000, /* Data Stream Control Register */
825     QEMU_PPC_FEATURE2_HAS_EBB = 0x10000000, /* Event Base Branching */
826     QEMU_PPC_FEATURE2_HAS_ISEL = 0x08000000, /* Integer Select */
827     QEMU_PPC_FEATURE2_HAS_TAR = 0x04000000, /* Target Address Register */
828     QEMU_PPC_FEATURE2_VEC_CRYPTO = 0x02000000,
829     QEMU_PPC_FEATURE2_HTM_NOSC = 0x01000000,
830     QEMU_PPC_FEATURE2_ARCH_3_00 = 0x00800000, /* ISA 3.00 */
831     QEMU_PPC_FEATURE2_HAS_IEEE128 = 0x00400000, /* VSX IEEE Bin Float 128-bit */
832     QEMU_PPC_FEATURE2_DARN = 0x00200000, /* darn random number insn */
833     QEMU_PPC_FEATURE2_SCV = 0x00100000, /* scv syscall */
834     QEMU_PPC_FEATURE2_HTM_NO_SUSPEND = 0x00080000, /* TM w/o suspended state */
835     QEMU_PPC_FEATURE2_ARCH_3_1 = 0x00040000, /* ISA 3.1 */
836     QEMU_PPC_FEATURE2_MMA = 0x00020000, /* Matrix-Multiply Assist */
837 };
838 
839 #define ELF_HWCAP get_elf_hwcap()
840 
841 static uint32_t get_elf_hwcap(void)
842 {
843     PowerPCCPU *cpu = POWERPC_CPU(thread_cpu);
844     uint32_t features = 0;
845 
846     /* We don't have to be terribly complete here; the high points are
847        Altivec/FP/SPE support.  Anything else is just a bonus.  */
848 #define GET_FEATURE(flag, feature)                                      \
849     do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0)
850 #define GET_FEATURE2(flags, feature) \
851     do { \
852         if ((cpu->env.insns_flags2 & flags) == flags) { \
853             features |= feature; \
854         } \
855     } while (0)
856     GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
857     GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
858     GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
859     GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
860     GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
861     GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
862     GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
863     GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
864     GET_FEATURE2(PPC2_DFP, QEMU_PPC_FEATURE_HAS_DFP);
865     GET_FEATURE2(PPC2_VSX, QEMU_PPC_FEATURE_HAS_VSX);
866     GET_FEATURE2((PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 |
867                   PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206),
868                   QEMU_PPC_FEATURE_ARCH_2_06);
869 #undef GET_FEATURE
870 #undef GET_FEATURE2
871 
872     return features;
873 }
874 
875 #define ELF_HWCAP2 get_elf_hwcap2()
876 
877 static uint32_t get_elf_hwcap2(void)
878 {
879     PowerPCCPU *cpu = POWERPC_CPU(thread_cpu);
880     uint32_t features = 0;
881 
882 #define GET_FEATURE(flag, feature)                                      \
883     do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0)
884 #define GET_FEATURE2(flag, feature)                                      \
885     do { if (cpu->env.insns_flags2 & flag) { features |= feature; } } while (0)
886 
887     GET_FEATURE(PPC_ISEL, QEMU_PPC_FEATURE2_HAS_ISEL);
888     GET_FEATURE2(PPC2_BCTAR_ISA207, QEMU_PPC_FEATURE2_HAS_TAR);
889     GET_FEATURE2((PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
890                   PPC2_ISA207S), QEMU_PPC_FEATURE2_ARCH_2_07 |
891                   QEMU_PPC_FEATURE2_VEC_CRYPTO);
892     GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00 |
893                  QEMU_PPC_FEATURE2_DARN | QEMU_PPC_FEATURE2_HAS_IEEE128);
894     GET_FEATURE2(PPC2_ISA310, QEMU_PPC_FEATURE2_ARCH_3_1 |
895                  QEMU_PPC_FEATURE2_MMA);
896 
897 #undef GET_FEATURE
898 #undef GET_FEATURE2
899 
900     return features;
901 }
902 
903 /*
904  * The requirements here are:
905  * - keep the final alignment of sp (sp & 0xf)
906  * - make sure the 32-bit value at the first 16 byte aligned position of
907  *   AUXV is greater than 16 for glibc compatibility.
908  *   AT_IGNOREPPC is used for that.
909  * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
910  *   even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
911  */
912 #define DLINFO_ARCH_ITEMS       5
913 #define ARCH_DLINFO                                     \
914     do {                                                \
915         PowerPCCPU *cpu = POWERPC_CPU(thread_cpu);              \
916         /*                                              \
917          * Handle glibc compatibility: these magic entries must \
918          * be at the lowest addresses in the final auxv.        \
919          */                                             \
920         NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);        \
921         NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);        \
922         NEW_AUX_ENT(AT_DCACHEBSIZE, cpu->env.dcache_line_size); \
923         NEW_AUX_ENT(AT_ICACHEBSIZE, cpu->env.icache_line_size); \
924         NEW_AUX_ENT(AT_UCACHEBSIZE, 0);                 \
925     } while (0)
926 
927 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
928 {
929     _regs->gpr[1] = infop->start_stack;
930 #if defined(TARGET_PPC64)
931     if (get_ppc64_abi(infop) < 2) {
932         uint64_t val;
933         get_user_u64(val, infop->entry + 8);
934         _regs->gpr[2] = val + infop->load_bias;
935         get_user_u64(val, infop->entry);
936         infop->entry = val + infop->load_bias;
937     } else {
938         _regs->gpr[12] = infop->entry;  /* r12 set to global entry address */
939     }
940 #endif
941     _regs->nip = infop->entry;
942 }
943 
944 /* See linux kernel: arch/powerpc/include/asm/elf.h.  */
945 #define ELF_NREG 48
946 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
947 
948 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *env)
949 {
950     int i;
951     target_ulong ccr = 0;
952 
953     for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
954         (*regs)[i] = tswapreg(env->gpr[i]);
955     }
956 
957     (*regs)[32] = tswapreg(env->nip);
958     (*regs)[33] = tswapreg(env->msr);
959     (*regs)[35] = tswapreg(env->ctr);
960     (*regs)[36] = tswapreg(env->lr);
961     (*regs)[37] = tswapreg(cpu_read_xer(env));
962 
963     for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
964         ccr |= env->crf[i] << (32 - ((i + 1) * 4));
965     }
966     (*regs)[38] = tswapreg(ccr);
967 }
968 
969 #define USE_ELF_CORE_DUMP
970 #define ELF_EXEC_PAGESIZE       4096
971 
972 #endif
973 
974 #ifdef TARGET_LOONGARCH64
975 
976 #define ELF_START_MMAP 0x80000000
977 
978 #define ELF_CLASS   ELFCLASS64
979 #define ELF_ARCH    EM_LOONGARCH
980 #define EXSTACK_DEFAULT true
981 
982 #define elf_check_arch(x) ((x) == EM_LOONGARCH)
983 
984 static inline void init_thread(struct target_pt_regs *regs,
985                                struct image_info *infop)
986 {
987     /*Set crmd PG,DA = 1,0 */
988     regs->csr.crmd = 2 << 3;
989     regs->csr.era = infop->entry;
990     regs->regs[3] = infop->start_stack;
991 }
992 
993 /* See linux kernel: arch/loongarch/include/asm/elf.h */
994 #define ELF_NREG 45
995 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
996 
997 enum {
998     TARGET_EF_R0 = 0,
999     TARGET_EF_CSR_ERA = TARGET_EF_R0 + 33,
1000     TARGET_EF_CSR_BADV = TARGET_EF_R0 + 34,
1001 };
1002 
1003 static void elf_core_copy_regs(target_elf_gregset_t *regs,
1004                                const CPULoongArchState *env)
1005 {
1006     int i;
1007 
1008     (*regs)[TARGET_EF_R0] = 0;
1009 
1010     for (i = 1; i < ARRAY_SIZE(env->gpr); i++) {
1011         (*regs)[TARGET_EF_R0 + i] = tswapreg(env->gpr[i]);
1012     }
1013 
1014     (*regs)[TARGET_EF_CSR_ERA] = tswapreg(env->pc);
1015     (*regs)[TARGET_EF_CSR_BADV] = tswapreg(env->CSR_BADV);
1016 }
1017 
1018 #define USE_ELF_CORE_DUMP
1019 #define ELF_EXEC_PAGESIZE        4096
1020 
1021 #define ELF_HWCAP get_elf_hwcap()
1022 
1023 /* See arch/loongarch/include/uapi/asm/hwcap.h */
1024 enum {
1025     HWCAP_LOONGARCH_CPUCFG   = (1 << 0),
1026     HWCAP_LOONGARCH_LAM      = (1 << 1),
1027     HWCAP_LOONGARCH_UAL      = (1 << 2),
1028     HWCAP_LOONGARCH_FPU      = (1 << 3),
1029     HWCAP_LOONGARCH_LSX      = (1 << 4),
1030     HWCAP_LOONGARCH_LASX     = (1 << 5),
1031     HWCAP_LOONGARCH_CRC32    = (1 << 6),
1032     HWCAP_LOONGARCH_COMPLEX  = (1 << 7),
1033     HWCAP_LOONGARCH_CRYPTO   = (1 << 8),
1034     HWCAP_LOONGARCH_LVZ      = (1 << 9),
1035     HWCAP_LOONGARCH_LBT_X86  = (1 << 10),
1036     HWCAP_LOONGARCH_LBT_ARM  = (1 << 11),
1037     HWCAP_LOONGARCH_LBT_MIPS = (1 << 12),
1038 };
1039 
1040 static uint32_t get_elf_hwcap(void)
1041 {
1042     LoongArchCPU *cpu = LOONGARCH_CPU(thread_cpu);
1043     uint32_t hwcaps = 0;
1044 
1045     hwcaps |= HWCAP_LOONGARCH_CRC32;
1046 
1047     if (FIELD_EX32(cpu->env.cpucfg[1], CPUCFG1, UAL)) {
1048         hwcaps |= HWCAP_LOONGARCH_UAL;
1049     }
1050 
1051     if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, FP)) {
1052         hwcaps |= HWCAP_LOONGARCH_FPU;
1053     }
1054 
1055     if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LAM)) {
1056         hwcaps |= HWCAP_LOONGARCH_LAM;
1057     }
1058 
1059     return hwcaps;
1060 }
1061 
1062 #define ELF_PLATFORM "loongarch"
1063 
1064 #endif /* TARGET_LOONGARCH64 */
1065 
1066 #ifdef TARGET_MIPS
1067 
1068 #define ELF_START_MMAP 0x80000000
1069 
1070 #ifdef TARGET_MIPS64
1071 #define ELF_CLASS   ELFCLASS64
1072 #else
1073 #define ELF_CLASS   ELFCLASS32
1074 #endif
1075 #define ELF_ARCH    EM_MIPS
1076 #define EXSTACK_DEFAULT true
1077 
1078 #ifdef TARGET_ABI_MIPSN32
1079 #define elf_check_abi(x) ((x) & EF_MIPS_ABI2)
1080 #else
1081 #define elf_check_abi(x) (!((x) & EF_MIPS_ABI2))
1082 #endif
1083 
1084 #define ELF_BASE_PLATFORM get_elf_base_platform()
1085 
1086 #define MATCH_PLATFORM_INSN(_flags, _base_platform)      \
1087     do { if ((cpu->env.insn_flags & (_flags)) == _flags) \
1088     { return _base_platform; } } while (0)
1089 
1090 static const char *get_elf_base_platform(void)
1091 {
1092     MIPSCPU *cpu = MIPS_CPU(thread_cpu);
1093 
1094     /* 64 bit ISAs goes first */
1095     MATCH_PLATFORM_INSN(CPU_MIPS64R6, "mips64r6");
1096     MATCH_PLATFORM_INSN(CPU_MIPS64R5, "mips64r5");
1097     MATCH_PLATFORM_INSN(CPU_MIPS64R2, "mips64r2");
1098     MATCH_PLATFORM_INSN(CPU_MIPS64R1, "mips64");
1099     MATCH_PLATFORM_INSN(CPU_MIPS5, "mips5");
1100     MATCH_PLATFORM_INSN(CPU_MIPS4, "mips4");
1101     MATCH_PLATFORM_INSN(CPU_MIPS3, "mips3");
1102 
1103     /* 32 bit ISAs */
1104     MATCH_PLATFORM_INSN(CPU_MIPS32R6, "mips32r6");
1105     MATCH_PLATFORM_INSN(CPU_MIPS32R5, "mips32r5");
1106     MATCH_PLATFORM_INSN(CPU_MIPS32R2, "mips32r2");
1107     MATCH_PLATFORM_INSN(CPU_MIPS32R1, "mips32");
1108     MATCH_PLATFORM_INSN(CPU_MIPS2, "mips2");
1109 
1110     /* Fallback */
1111     return "mips";
1112 }
1113 #undef MATCH_PLATFORM_INSN
1114 
1115 static inline void init_thread(struct target_pt_regs *regs,
1116                                struct image_info *infop)
1117 {
1118     regs->cp0_status = 2 << CP0St_KSU;
1119     regs->cp0_epc = infop->entry;
1120     regs->regs[29] = infop->start_stack;
1121 }
1122 
1123 /* See linux kernel: arch/mips/include/asm/elf.h.  */
1124 #define ELF_NREG 45
1125 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1126 
1127 /* See linux kernel: arch/mips/include/asm/reg.h.  */
1128 enum {
1129 #ifdef TARGET_MIPS64
1130     TARGET_EF_R0 = 0,
1131 #else
1132     TARGET_EF_R0 = 6,
1133 #endif
1134     TARGET_EF_R26 = TARGET_EF_R0 + 26,
1135     TARGET_EF_R27 = TARGET_EF_R0 + 27,
1136     TARGET_EF_LO = TARGET_EF_R0 + 32,
1137     TARGET_EF_HI = TARGET_EF_R0 + 33,
1138     TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34,
1139     TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35,
1140     TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36,
1141     TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37
1142 };
1143 
1144 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
1145 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMIPSState *env)
1146 {
1147     int i;
1148 
1149     for (i = 0; i < TARGET_EF_R0; i++) {
1150         (*regs)[i] = 0;
1151     }
1152     (*regs)[TARGET_EF_R0] = 0;
1153 
1154     for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) {
1155         (*regs)[TARGET_EF_R0 + i] = tswapreg(env->active_tc.gpr[i]);
1156     }
1157 
1158     (*regs)[TARGET_EF_R26] = 0;
1159     (*regs)[TARGET_EF_R27] = 0;
1160     (*regs)[TARGET_EF_LO] = tswapreg(env->active_tc.LO[0]);
1161     (*regs)[TARGET_EF_HI] = tswapreg(env->active_tc.HI[0]);
1162     (*regs)[TARGET_EF_CP0_EPC] = tswapreg(env->active_tc.PC);
1163     (*regs)[TARGET_EF_CP0_BADVADDR] = tswapreg(env->CP0_BadVAddr);
1164     (*regs)[TARGET_EF_CP0_STATUS] = tswapreg(env->CP0_Status);
1165     (*regs)[TARGET_EF_CP0_CAUSE] = tswapreg(env->CP0_Cause);
1166 }
1167 
1168 #define USE_ELF_CORE_DUMP
1169 #define ELF_EXEC_PAGESIZE        4096
1170 
1171 /* See arch/mips/include/uapi/asm/hwcap.h.  */
1172 enum {
1173     HWCAP_MIPS_R6           = (1 << 0),
1174     HWCAP_MIPS_MSA          = (1 << 1),
1175     HWCAP_MIPS_CRC32        = (1 << 2),
1176     HWCAP_MIPS_MIPS16       = (1 << 3),
1177     HWCAP_MIPS_MDMX         = (1 << 4),
1178     HWCAP_MIPS_MIPS3D       = (1 << 5),
1179     HWCAP_MIPS_SMARTMIPS    = (1 << 6),
1180     HWCAP_MIPS_DSP          = (1 << 7),
1181     HWCAP_MIPS_DSP2         = (1 << 8),
1182     HWCAP_MIPS_DSP3         = (1 << 9),
1183     HWCAP_MIPS_MIPS16E2     = (1 << 10),
1184     HWCAP_LOONGSON_MMI      = (1 << 11),
1185     HWCAP_LOONGSON_EXT      = (1 << 12),
1186     HWCAP_LOONGSON_EXT2     = (1 << 13),
1187     HWCAP_LOONGSON_CPUCFG   = (1 << 14),
1188 };
1189 
1190 #define ELF_HWCAP get_elf_hwcap()
1191 
1192 #define GET_FEATURE_INSN(_flag, _hwcap) \
1193     do { if (cpu->env.insn_flags & (_flag)) { hwcaps |= _hwcap; } } while (0)
1194 
1195 #define GET_FEATURE_REG_SET(_reg, _mask, _hwcap) \
1196     do { if (cpu->env._reg & (_mask)) { hwcaps |= _hwcap; } } while (0)
1197 
1198 #define GET_FEATURE_REG_EQU(_reg, _start, _length, _val, _hwcap) \
1199     do { \
1200         if (extract32(cpu->env._reg, (_start), (_length)) == (_val)) { \
1201             hwcaps |= _hwcap; \
1202         } \
1203     } while (0)
1204 
1205 static uint32_t get_elf_hwcap(void)
1206 {
1207     MIPSCPU *cpu = MIPS_CPU(thread_cpu);
1208     uint32_t hwcaps = 0;
1209 
1210     GET_FEATURE_REG_EQU(CP0_Config0, CP0C0_AR, CP0C0_AR_LENGTH,
1211                         2, HWCAP_MIPS_R6);
1212     GET_FEATURE_REG_SET(CP0_Config3, 1 << CP0C3_MSAP, HWCAP_MIPS_MSA);
1213     GET_FEATURE_INSN(ASE_LMMI, HWCAP_LOONGSON_MMI);
1214     GET_FEATURE_INSN(ASE_LEXT, HWCAP_LOONGSON_EXT);
1215 
1216     return hwcaps;
1217 }
1218 
1219 #undef GET_FEATURE_REG_EQU
1220 #undef GET_FEATURE_REG_SET
1221 #undef GET_FEATURE_INSN
1222 
1223 #endif /* TARGET_MIPS */
1224 
1225 #ifdef TARGET_MICROBLAZE
1226 
1227 #define ELF_START_MMAP 0x80000000
1228 
1229 #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD)
1230 
1231 #define ELF_CLASS   ELFCLASS32
1232 #define ELF_ARCH    EM_MICROBLAZE
1233 
1234 static inline void init_thread(struct target_pt_regs *regs,
1235                                struct image_info *infop)
1236 {
1237     regs->pc = infop->entry;
1238     regs->r1 = infop->start_stack;
1239 
1240 }
1241 
1242 #define ELF_EXEC_PAGESIZE        4096
1243 
1244 #define USE_ELF_CORE_DUMP
1245 #define ELF_NREG 38
1246 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1247 
1248 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
1249 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env)
1250 {
1251     int i, pos = 0;
1252 
1253     for (i = 0; i < 32; i++) {
1254         (*regs)[pos++] = tswapreg(env->regs[i]);
1255     }
1256 
1257     (*regs)[pos++] = tswapreg(env->pc);
1258     (*regs)[pos++] = tswapreg(mb_cpu_read_msr(env));
1259     (*regs)[pos++] = 0;
1260     (*regs)[pos++] = tswapreg(env->ear);
1261     (*regs)[pos++] = 0;
1262     (*regs)[pos++] = tswapreg(env->esr);
1263 }
1264 
1265 #endif /* TARGET_MICROBLAZE */
1266 
1267 #ifdef TARGET_NIOS2
1268 
1269 #define ELF_START_MMAP 0x80000000
1270 
1271 #define elf_check_arch(x) ((x) == EM_ALTERA_NIOS2)
1272 
1273 #define ELF_CLASS   ELFCLASS32
1274 #define ELF_ARCH    EM_ALTERA_NIOS2
1275 
1276 static void init_thread(struct target_pt_regs *regs, struct image_info *infop)
1277 {
1278     regs->ea = infop->entry;
1279     regs->sp = infop->start_stack;
1280 }
1281 
1282 #define LO_COMMPAGE  TARGET_PAGE_SIZE
1283 
1284 static bool init_guest_commpage(void)
1285 {
1286     static const uint8_t kuser_page[4 + 2 * 64] = {
1287         /* __kuser_helper_version */
1288         [0x00] = 0x02, 0x00, 0x00, 0x00,
1289 
1290         /* __kuser_cmpxchg */
1291         [0x04] = 0x3a, 0x6c, 0x3b, 0x00,  /* trap 16 */
1292                  0x3a, 0x28, 0x00, 0xf8,  /* ret */
1293 
1294         /* __kuser_sigtramp */
1295         [0x44] = 0xc4, 0x22, 0x80, 0x00,  /* movi r2, __NR_rt_sigreturn */
1296                  0x3a, 0x68, 0x3b, 0x00,  /* trap 0 */
1297     };
1298 
1299     void *want = g2h_untagged(LO_COMMPAGE & -qemu_host_page_size);
1300     void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
1301                       MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
1302 
1303     if (addr == MAP_FAILED) {
1304         perror("Allocating guest commpage");
1305         exit(EXIT_FAILURE);
1306     }
1307     if (addr != want) {
1308         return false;
1309     }
1310 
1311     memcpy(addr, kuser_page, sizeof(kuser_page));
1312 
1313     if (mprotect(addr, qemu_host_page_size, PROT_READ)) {
1314         perror("Protecting guest commpage");
1315         exit(EXIT_FAILURE);
1316     }
1317 
1318     page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE,
1319                    PAGE_READ | PAGE_EXEC | PAGE_VALID);
1320     return true;
1321 }
1322 
1323 #define ELF_EXEC_PAGESIZE        4096
1324 
1325 #define USE_ELF_CORE_DUMP
1326 #define ELF_NREG 49
1327 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1328 
1329 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
1330 static void elf_core_copy_regs(target_elf_gregset_t *regs,
1331                                const CPUNios2State *env)
1332 {
1333     int i;
1334 
1335     (*regs)[0] = -1;
1336     for (i = 1; i < 8; i++)    /* r0-r7 */
1337         (*regs)[i] = tswapreg(env->regs[i + 7]);
1338 
1339     for (i = 8; i < 16; i++)   /* r8-r15 */
1340         (*regs)[i] = tswapreg(env->regs[i - 8]);
1341 
1342     for (i = 16; i < 24; i++)  /* r16-r23 */
1343         (*regs)[i] = tswapreg(env->regs[i + 7]);
1344     (*regs)[24] = -1;    /* R_ET */
1345     (*regs)[25] = -1;    /* R_BT */
1346     (*regs)[26] = tswapreg(env->regs[R_GP]);
1347     (*regs)[27] = tswapreg(env->regs[R_SP]);
1348     (*regs)[28] = tswapreg(env->regs[R_FP]);
1349     (*regs)[29] = tswapreg(env->regs[R_EA]);
1350     (*regs)[30] = -1;    /* R_SSTATUS */
1351     (*regs)[31] = tswapreg(env->regs[R_RA]);
1352 
1353     (*regs)[32] = tswapreg(env->pc);
1354 
1355     (*regs)[33] = -1; /* R_STATUS */
1356     (*regs)[34] = tswapreg(env->regs[CR_ESTATUS]);
1357 
1358     for (i = 35; i < 49; i++)    /* ... */
1359         (*regs)[i] = -1;
1360 }
1361 
1362 #endif /* TARGET_NIOS2 */
1363 
1364 #ifdef TARGET_OPENRISC
1365 
1366 #define ELF_START_MMAP 0x08000000
1367 
1368 #define ELF_ARCH EM_OPENRISC
1369 #define ELF_CLASS ELFCLASS32
1370 #define ELF_DATA  ELFDATA2MSB
1371 
1372 static inline void init_thread(struct target_pt_regs *regs,
1373                                struct image_info *infop)
1374 {
1375     regs->pc = infop->entry;
1376     regs->gpr[1] = infop->start_stack;
1377 }
1378 
1379 #define USE_ELF_CORE_DUMP
1380 #define ELF_EXEC_PAGESIZE 8192
1381 
1382 /* See linux kernel arch/openrisc/include/asm/elf.h.  */
1383 #define ELF_NREG 34 /* gprs and pc, sr */
1384 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1385 
1386 static void elf_core_copy_regs(target_elf_gregset_t *regs,
1387                                const CPUOpenRISCState *env)
1388 {
1389     int i;
1390 
1391     for (i = 0; i < 32; i++) {
1392         (*regs)[i] = tswapreg(cpu_get_gpr(env, i));
1393     }
1394     (*regs)[32] = tswapreg(env->pc);
1395     (*regs)[33] = tswapreg(cpu_get_sr(env));
1396 }
1397 #define ELF_HWCAP 0
1398 #define ELF_PLATFORM NULL
1399 
1400 #endif /* TARGET_OPENRISC */
1401 
1402 #ifdef TARGET_SH4
1403 
1404 #define ELF_START_MMAP 0x80000000
1405 
1406 #define ELF_CLASS ELFCLASS32
1407 #define ELF_ARCH  EM_SH
1408 
1409 static inline void init_thread(struct target_pt_regs *regs,
1410                                struct image_info *infop)
1411 {
1412     /* Check other registers XXXXX */
1413     regs->pc = infop->entry;
1414     regs->regs[15] = infop->start_stack;
1415 }
1416 
1417 /* See linux kernel: arch/sh/include/asm/elf.h.  */
1418 #define ELF_NREG 23
1419 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1420 
1421 /* See linux kernel: arch/sh/include/asm/ptrace.h.  */
1422 enum {
1423     TARGET_REG_PC = 16,
1424     TARGET_REG_PR = 17,
1425     TARGET_REG_SR = 18,
1426     TARGET_REG_GBR = 19,
1427     TARGET_REG_MACH = 20,
1428     TARGET_REG_MACL = 21,
1429     TARGET_REG_SYSCALL = 22
1430 };
1431 
1432 static inline void elf_core_copy_regs(target_elf_gregset_t *regs,
1433                                       const CPUSH4State *env)
1434 {
1435     int i;
1436 
1437     for (i = 0; i < 16; i++) {
1438         (*regs)[i] = tswapreg(env->gregs[i]);
1439     }
1440 
1441     (*regs)[TARGET_REG_PC] = tswapreg(env->pc);
1442     (*regs)[TARGET_REG_PR] = tswapreg(env->pr);
1443     (*regs)[TARGET_REG_SR] = tswapreg(env->sr);
1444     (*regs)[TARGET_REG_GBR] = tswapreg(env->gbr);
1445     (*regs)[TARGET_REG_MACH] = tswapreg(env->mach);
1446     (*regs)[TARGET_REG_MACL] = tswapreg(env->macl);
1447     (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */
1448 }
1449 
1450 #define USE_ELF_CORE_DUMP
1451 #define ELF_EXEC_PAGESIZE        4096
1452 
1453 enum {
1454     SH_CPU_HAS_FPU            = 0x0001, /* Hardware FPU support */
1455     SH_CPU_HAS_P2_FLUSH_BUG   = 0x0002, /* Need to flush the cache in P2 area */
1456     SH_CPU_HAS_MMU_PAGE_ASSOC = 0x0004, /* SH3: TLB way selection bit support */
1457     SH_CPU_HAS_DSP            = 0x0008, /* SH-DSP: DSP support */
1458     SH_CPU_HAS_PERF_COUNTER   = 0x0010, /* Hardware performance counters */
1459     SH_CPU_HAS_PTEA           = 0x0020, /* PTEA register */
1460     SH_CPU_HAS_LLSC           = 0x0040, /* movli.l/movco.l */
1461     SH_CPU_HAS_L2_CACHE       = 0x0080, /* Secondary cache / URAM */
1462     SH_CPU_HAS_OP32           = 0x0100, /* 32-bit instruction support */
1463     SH_CPU_HAS_PTEAEX         = 0x0200, /* PTE ASID Extension support */
1464 };
1465 
1466 #define ELF_HWCAP get_elf_hwcap()
1467 
1468 static uint32_t get_elf_hwcap(void)
1469 {
1470     SuperHCPU *cpu = SUPERH_CPU(thread_cpu);
1471     uint32_t hwcap = 0;
1472 
1473     hwcap |= SH_CPU_HAS_FPU;
1474 
1475     if (cpu->env.features & SH_FEATURE_SH4A) {
1476         hwcap |= SH_CPU_HAS_LLSC;
1477     }
1478 
1479     return hwcap;
1480 }
1481 
1482 #endif
1483 
1484 #ifdef TARGET_CRIS
1485 
1486 #define ELF_START_MMAP 0x80000000
1487 
1488 #define ELF_CLASS ELFCLASS32
1489 #define ELF_ARCH  EM_CRIS
1490 
1491 static inline void init_thread(struct target_pt_regs *regs,
1492                                struct image_info *infop)
1493 {
1494     regs->erp = infop->entry;
1495 }
1496 
1497 #define ELF_EXEC_PAGESIZE        8192
1498 
1499 #endif
1500 
1501 #ifdef TARGET_M68K
1502 
1503 #define ELF_START_MMAP 0x80000000
1504 
1505 #define ELF_CLASS       ELFCLASS32
1506 #define ELF_ARCH        EM_68K
1507 
1508 /* ??? Does this need to do anything?
1509    #define ELF_PLAT_INIT(_r) */
1510 
1511 static inline void init_thread(struct target_pt_regs *regs,
1512                                struct image_info *infop)
1513 {
1514     regs->usp = infop->start_stack;
1515     regs->sr = 0;
1516     regs->pc = infop->entry;
1517 }
1518 
1519 /* See linux kernel: arch/m68k/include/asm/elf.h.  */
1520 #define ELF_NREG 20
1521 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1522 
1523 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUM68KState *env)
1524 {
1525     (*regs)[0] = tswapreg(env->dregs[1]);
1526     (*regs)[1] = tswapreg(env->dregs[2]);
1527     (*regs)[2] = tswapreg(env->dregs[3]);
1528     (*regs)[3] = tswapreg(env->dregs[4]);
1529     (*regs)[4] = tswapreg(env->dregs[5]);
1530     (*regs)[5] = tswapreg(env->dregs[6]);
1531     (*regs)[6] = tswapreg(env->dregs[7]);
1532     (*regs)[7] = tswapreg(env->aregs[0]);
1533     (*regs)[8] = tswapreg(env->aregs[1]);
1534     (*regs)[9] = tswapreg(env->aregs[2]);
1535     (*regs)[10] = tswapreg(env->aregs[3]);
1536     (*regs)[11] = tswapreg(env->aregs[4]);
1537     (*regs)[12] = tswapreg(env->aregs[5]);
1538     (*regs)[13] = tswapreg(env->aregs[6]);
1539     (*regs)[14] = tswapreg(env->dregs[0]);
1540     (*regs)[15] = tswapreg(env->aregs[7]);
1541     (*regs)[16] = tswapreg(env->dregs[0]); /* FIXME: orig_d0 */
1542     (*regs)[17] = tswapreg(env->sr);
1543     (*regs)[18] = tswapreg(env->pc);
1544     (*regs)[19] = 0;  /* FIXME: regs->format | regs->vector */
1545 }
1546 
1547 #define USE_ELF_CORE_DUMP
1548 #define ELF_EXEC_PAGESIZE       8192
1549 
1550 #endif
1551 
1552 #ifdef TARGET_ALPHA
1553 
1554 #define ELF_START_MMAP (0x30000000000ULL)
1555 
1556 #define ELF_CLASS      ELFCLASS64
1557 #define ELF_ARCH       EM_ALPHA
1558 
1559 static inline void init_thread(struct target_pt_regs *regs,
1560                                struct image_info *infop)
1561 {
1562     regs->pc = infop->entry;
1563     regs->ps = 8;
1564     regs->usp = infop->start_stack;
1565 }
1566 
1567 #define ELF_EXEC_PAGESIZE        8192
1568 
1569 #endif /* TARGET_ALPHA */
1570 
1571 #ifdef TARGET_S390X
1572 
1573 #define ELF_START_MMAP (0x20000000000ULL)
1574 
1575 #define ELF_CLASS	ELFCLASS64
1576 #define ELF_DATA	ELFDATA2MSB
1577 #define ELF_ARCH	EM_S390
1578 
1579 #include "elf.h"
1580 
1581 #define ELF_HWCAP get_elf_hwcap()
1582 
1583 #define GET_FEATURE(_feat, _hwcap) \
1584     do { if (s390_has_feat(_feat)) { hwcap |= _hwcap; } } while (0)
1585 
1586 static uint32_t get_elf_hwcap(void)
1587 {
1588     /*
1589      * Let's assume we always have esan3 and zarch.
1590      * 31-bit processes can use 64-bit registers (high gprs).
1591      */
1592     uint32_t hwcap = HWCAP_S390_ESAN3 | HWCAP_S390_ZARCH | HWCAP_S390_HIGH_GPRS;
1593 
1594     GET_FEATURE(S390_FEAT_STFLE, HWCAP_S390_STFLE);
1595     GET_FEATURE(S390_FEAT_MSA, HWCAP_S390_MSA);
1596     GET_FEATURE(S390_FEAT_LONG_DISPLACEMENT, HWCAP_S390_LDISP);
1597     GET_FEATURE(S390_FEAT_EXTENDED_IMMEDIATE, HWCAP_S390_EIMM);
1598     if (s390_has_feat(S390_FEAT_EXTENDED_TRANSLATION_3) &&
1599         s390_has_feat(S390_FEAT_ETF3_ENH)) {
1600         hwcap |= HWCAP_S390_ETF3EH;
1601     }
1602     GET_FEATURE(S390_FEAT_VECTOR, HWCAP_S390_VXRS);
1603     GET_FEATURE(S390_FEAT_VECTOR_ENH, HWCAP_S390_VXRS_EXT);
1604 
1605     return hwcap;
1606 }
1607 
1608 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
1609 {
1610     regs->psw.addr = infop->entry;
1611     regs->psw.mask = PSW_MASK_64 | PSW_MASK_32;
1612     regs->gprs[15] = infop->start_stack;
1613 }
1614 
1615 /* See linux kernel: arch/s390/include/uapi/asm/ptrace.h (s390_regs).  */
1616 #define ELF_NREG 27
1617 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1618 
1619 enum {
1620     TARGET_REG_PSWM = 0,
1621     TARGET_REG_PSWA = 1,
1622     TARGET_REG_GPRS = 2,
1623     TARGET_REG_ARS = 18,
1624     TARGET_REG_ORIG_R2 = 26,
1625 };
1626 
1627 static void elf_core_copy_regs(target_elf_gregset_t *regs,
1628                                const CPUS390XState *env)
1629 {
1630     int i;
1631     uint32_t *aregs;
1632 
1633     (*regs)[TARGET_REG_PSWM] = tswapreg(env->psw.mask);
1634     (*regs)[TARGET_REG_PSWA] = tswapreg(env->psw.addr);
1635     for (i = 0; i < 16; i++) {
1636         (*regs)[TARGET_REG_GPRS + i] = tswapreg(env->regs[i]);
1637     }
1638     aregs = (uint32_t *)&((*regs)[TARGET_REG_ARS]);
1639     for (i = 0; i < 16; i++) {
1640         aregs[i] = tswap32(env->aregs[i]);
1641     }
1642     (*regs)[TARGET_REG_ORIG_R2] = 0;
1643 }
1644 
1645 #define USE_ELF_CORE_DUMP
1646 #define ELF_EXEC_PAGESIZE 4096
1647 
1648 #endif /* TARGET_S390X */
1649 
1650 #ifdef TARGET_RISCV
1651 
1652 #define ELF_START_MMAP 0x80000000
1653 #define ELF_ARCH  EM_RISCV
1654 
1655 #ifdef TARGET_RISCV32
1656 #define ELF_CLASS ELFCLASS32
1657 #else
1658 #define ELF_CLASS ELFCLASS64
1659 #endif
1660 
1661 #define ELF_HWCAP get_elf_hwcap()
1662 
1663 static uint32_t get_elf_hwcap(void)
1664 {
1665 #define MISA_BIT(EXT) (1 << (EXT - 'A'))
1666     RISCVCPU *cpu = RISCV_CPU(thread_cpu);
1667     uint32_t mask = MISA_BIT('I') | MISA_BIT('M') | MISA_BIT('A')
1668                     | MISA_BIT('F') | MISA_BIT('D') | MISA_BIT('C');
1669 
1670     return cpu->env.misa_ext & mask;
1671 #undef MISA_BIT
1672 }
1673 
1674 static inline void init_thread(struct target_pt_regs *regs,
1675                                struct image_info *infop)
1676 {
1677     regs->sepc = infop->entry;
1678     regs->sp = infop->start_stack;
1679 }
1680 
1681 #define ELF_EXEC_PAGESIZE 4096
1682 
1683 #endif /* TARGET_RISCV */
1684 
1685 #ifdef TARGET_HPPA
1686 
1687 #define ELF_START_MMAP  0x80000000
1688 #define ELF_CLASS       ELFCLASS32
1689 #define ELF_ARCH        EM_PARISC
1690 #define ELF_PLATFORM    "PARISC"
1691 #define STACK_GROWS_DOWN 0
1692 #define STACK_ALIGNMENT  64
1693 
1694 static inline void init_thread(struct target_pt_regs *regs,
1695                                struct image_info *infop)
1696 {
1697     regs->iaoq[0] = infop->entry;
1698     regs->iaoq[1] = infop->entry + 4;
1699     regs->gr[23] = 0;
1700     regs->gr[24] = infop->argv;
1701     regs->gr[25] = infop->argc;
1702     /* The top-of-stack contains a linkage buffer.  */
1703     regs->gr[30] = infop->start_stack + 64;
1704     regs->gr[31] = infop->entry;
1705 }
1706 
1707 #define LO_COMMPAGE  0
1708 
1709 static bool init_guest_commpage(void)
1710 {
1711     void *want = g2h_untagged(LO_COMMPAGE);
1712     void *addr = mmap(want, qemu_host_page_size, PROT_NONE,
1713                       MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
1714 
1715     if (addr == MAP_FAILED) {
1716         perror("Allocating guest commpage");
1717         exit(EXIT_FAILURE);
1718     }
1719     if (addr != want) {
1720         return false;
1721     }
1722 
1723     /*
1724      * On Linux, page zero is normally marked execute only + gateway.
1725      * Normal read or write is supposed to fail (thus PROT_NONE above),
1726      * but specific offsets have kernel code mapped to raise permissions
1727      * and implement syscalls.  Here, simply mark the page executable.
1728      * Special case the entry points during translation (see do_page_zero).
1729      */
1730     page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE,
1731                    PAGE_EXEC | PAGE_VALID);
1732     return true;
1733 }
1734 
1735 #endif /* TARGET_HPPA */
1736 
1737 #ifdef TARGET_XTENSA
1738 
1739 #define ELF_START_MMAP 0x20000000
1740 
1741 #define ELF_CLASS       ELFCLASS32
1742 #define ELF_ARCH        EM_XTENSA
1743 
1744 static inline void init_thread(struct target_pt_regs *regs,
1745                                struct image_info *infop)
1746 {
1747     regs->windowbase = 0;
1748     regs->windowstart = 1;
1749     regs->areg[1] = infop->start_stack;
1750     regs->pc = infop->entry;
1751     if (info_is_fdpic(infop)) {
1752         regs->areg[4] = infop->loadmap_addr;
1753         regs->areg[5] = infop->interpreter_loadmap_addr;
1754         if (infop->interpreter_loadmap_addr) {
1755             regs->areg[6] = infop->interpreter_pt_dynamic_addr;
1756         } else {
1757             regs->areg[6] = infop->pt_dynamic_addr;
1758         }
1759     }
1760 }
1761 
1762 /* See linux kernel: arch/xtensa/include/asm/elf.h.  */
1763 #define ELF_NREG 128
1764 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1765 
1766 enum {
1767     TARGET_REG_PC,
1768     TARGET_REG_PS,
1769     TARGET_REG_LBEG,
1770     TARGET_REG_LEND,
1771     TARGET_REG_LCOUNT,
1772     TARGET_REG_SAR,
1773     TARGET_REG_WINDOWSTART,
1774     TARGET_REG_WINDOWBASE,
1775     TARGET_REG_THREADPTR,
1776     TARGET_REG_AR0 = 64,
1777 };
1778 
1779 static void elf_core_copy_regs(target_elf_gregset_t *regs,
1780                                const CPUXtensaState *env)
1781 {
1782     unsigned i;
1783 
1784     (*regs)[TARGET_REG_PC] = tswapreg(env->pc);
1785     (*regs)[TARGET_REG_PS] = tswapreg(env->sregs[PS] & ~PS_EXCM);
1786     (*regs)[TARGET_REG_LBEG] = tswapreg(env->sregs[LBEG]);
1787     (*regs)[TARGET_REG_LEND] = tswapreg(env->sregs[LEND]);
1788     (*regs)[TARGET_REG_LCOUNT] = tswapreg(env->sregs[LCOUNT]);
1789     (*regs)[TARGET_REG_SAR] = tswapreg(env->sregs[SAR]);
1790     (*regs)[TARGET_REG_WINDOWSTART] = tswapreg(env->sregs[WINDOW_START]);
1791     (*regs)[TARGET_REG_WINDOWBASE] = tswapreg(env->sregs[WINDOW_BASE]);
1792     (*regs)[TARGET_REG_THREADPTR] = tswapreg(env->uregs[THREADPTR]);
1793     xtensa_sync_phys_from_window((CPUXtensaState *)env);
1794     for (i = 0; i < env->config->nareg; ++i) {
1795         (*regs)[TARGET_REG_AR0 + i] = tswapreg(env->phys_regs[i]);
1796     }
1797 }
1798 
1799 #define USE_ELF_CORE_DUMP
1800 #define ELF_EXEC_PAGESIZE       4096
1801 
1802 #endif /* TARGET_XTENSA */
1803 
1804 #ifdef TARGET_HEXAGON
1805 
1806 #define ELF_START_MMAP 0x20000000
1807 
1808 #define ELF_CLASS       ELFCLASS32
1809 #define ELF_ARCH        EM_HEXAGON
1810 
1811 static inline void init_thread(struct target_pt_regs *regs,
1812                                struct image_info *infop)
1813 {
1814     regs->sepc = infop->entry;
1815     regs->sp = infop->start_stack;
1816 }
1817 
1818 #endif /* TARGET_HEXAGON */
1819 
1820 #ifndef ELF_BASE_PLATFORM
1821 #define ELF_BASE_PLATFORM (NULL)
1822 #endif
1823 
1824 #ifndef ELF_PLATFORM
1825 #define ELF_PLATFORM (NULL)
1826 #endif
1827 
1828 #ifndef ELF_MACHINE
1829 #define ELF_MACHINE ELF_ARCH
1830 #endif
1831 
1832 #ifndef elf_check_arch
1833 #define elf_check_arch(x) ((x) == ELF_ARCH)
1834 #endif
1835 
1836 #ifndef elf_check_abi
1837 #define elf_check_abi(x) (1)
1838 #endif
1839 
1840 #ifndef ELF_HWCAP
1841 #define ELF_HWCAP 0
1842 #endif
1843 
1844 #ifndef STACK_GROWS_DOWN
1845 #define STACK_GROWS_DOWN 1
1846 #endif
1847 
1848 #ifndef STACK_ALIGNMENT
1849 #define STACK_ALIGNMENT 16
1850 #endif
1851 
1852 #ifdef TARGET_ABI32
1853 #undef ELF_CLASS
1854 #define ELF_CLASS ELFCLASS32
1855 #undef bswaptls
1856 #define bswaptls(ptr) bswap32s(ptr)
1857 #endif
1858 
1859 #ifndef EXSTACK_DEFAULT
1860 #define EXSTACK_DEFAULT false
1861 #endif
1862 
1863 #include "elf.h"
1864 
1865 /* We must delay the following stanzas until after "elf.h". */
1866 #if defined(TARGET_AARCH64)
1867 
1868 static bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz,
1869                                     const uint32_t *data,
1870                                     struct image_info *info,
1871                                     Error **errp)
1872 {
1873     if (pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) {
1874         if (pr_datasz != sizeof(uint32_t)) {
1875             error_setg(errp, "Ill-formed GNU_PROPERTY_AARCH64_FEATURE_1_AND");
1876             return false;
1877         }
1878         /* We will extract GNU_PROPERTY_AARCH64_FEATURE_1_BTI later. */
1879         info->note_flags = *data;
1880     }
1881     return true;
1882 }
1883 #define ARCH_USE_GNU_PROPERTY 1
1884 
1885 #else
1886 
1887 static bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz,
1888                                     const uint32_t *data,
1889                                     struct image_info *info,
1890                                     Error **errp)
1891 {
1892     g_assert_not_reached();
1893 }
1894 #define ARCH_USE_GNU_PROPERTY 0
1895 
1896 #endif
1897 
1898 struct exec
1899 {
1900     unsigned int a_info;   /* Use macros N_MAGIC, etc for access */
1901     unsigned int a_text;   /* length of text, in bytes */
1902     unsigned int a_data;   /* length of data, in bytes */
1903     unsigned int a_bss;    /* length of uninitialized data area, in bytes */
1904     unsigned int a_syms;   /* length of symbol table data in file, in bytes */
1905     unsigned int a_entry;  /* start address */
1906     unsigned int a_trsize; /* length of relocation info for text, in bytes */
1907     unsigned int a_drsize; /* length of relocation info for data, in bytes */
1908 };
1909 
1910 
1911 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
1912 #define OMAGIC 0407
1913 #define NMAGIC 0410
1914 #define ZMAGIC 0413
1915 #define QMAGIC 0314
1916 
1917 /* Necessary parameters */
1918 #define TARGET_ELF_EXEC_PAGESIZE \
1919         (((eppnt->p_align & ~qemu_host_page_mask) != 0) ? \
1920          TARGET_PAGE_SIZE : MAX(qemu_host_page_size, TARGET_PAGE_SIZE))
1921 #define TARGET_ELF_PAGELENGTH(_v) ROUND_UP((_v), TARGET_ELF_EXEC_PAGESIZE)
1922 #define TARGET_ELF_PAGESTART(_v) ((_v) & \
1923                                  ~(abi_ulong)(TARGET_ELF_EXEC_PAGESIZE-1))
1924 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
1925 
1926 #define DLINFO_ITEMS 16
1927 
1928 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
1929 {
1930     memcpy(to, from, n);
1931 }
1932 
1933 #ifdef BSWAP_NEEDED
1934 static void bswap_ehdr(struct elfhdr *ehdr)
1935 {
1936     bswap16s(&ehdr->e_type);            /* Object file type */
1937     bswap16s(&ehdr->e_machine);         /* Architecture */
1938     bswap32s(&ehdr->e_version);         /* Object file version */
1939     bswaptls(&ehdr->e_entry);           /* Entry point virtual address */
1940     bswaptls(&ehdr->e_phoff);           /* Program header table file offset */
1941     bswaptls(&ehdr->e_shoff);           /* Section header table file offset */
1942     bswap32s(&ehdr->e_flags);           /* Processor-specific flags */
1943     bswap16s(&ehdr->e_ehsize);          /* ELF header size in bytes */
1944     bswap16s(&ehdr->e_phentsize);       /* Program header table entry size */
1945     bswap16s(&ehdr->e_phnum);           /* Program header table entry count */
1946     bswap16s(&ehdr->e_shentsize);       /* Section header table entry size */
1947     bswap16s(&ehdr->e_shnum);           /* Section header table entry count */
1948     bswap16s(&ehdr->e_shstrndx);        /* Section header string table index */
1949 }
1950 
1951 static void bswap_phdr(struct elf_phdr *phdr, int phnum)
1952 {
1953     int i;
1954     for (i = 0; i < phnum; ++i, ++phdr) {
1955         bswap32s(&phdr->p_type);        /* Segment type */
1956         bswap32s(&phdr->p_flags);       /* Segment flags */
1957         bswaptls(&phdr->p_offset);      /* Segment file offset */
1958         bswaptls(&phdr->p_vaddr);       /* Segment virtual address */
1959         bswaptls(&phdr->p_paddr);       /* Segment physical address */
1960         bswaptls(&phdr->p_filesz);      /* Segment size in file */
1961         bswaptls(&phdr->p_memsz);       /* Segment size in memory */
1962         bswaptls(&phdr->p_align);       /* Segment alignment */
1963     }
1964 }
1965 
1966 static void bswap_shdr(struct elf_shdr *shdr, int shnum)
1967 {
1968     int i;
1969     for (i = 0; i < shnum; ++i, ++shdr) {
1970         bswap32s(&shdr->sh_name);
1971         bswap32s(&shdr->sh_type);
1972         bswaptls(&shdr->sh_flags);
1973         bswaptls(&shdr->sh_addr);
1974         bswaptls(&shdr->sh_offset);
1975         bswaptls(&shdr->sh_size);
1976         bswap32s(&shdr->sh_link);
1977         bswap32s(&shdr->sh_info);
1978         bswaptls(&shdr->sh_addralign);
1979         bswaptls(&shdr->sh_entsize);
1980     }
1981 }
1982 
1983 static void bswap_sym(struct elf_sym *sym)
1984 {
1985     bswap32s(&sym->st_name);
1986     bswaptls(&sym->st_value);
1987     bswaptls(&sym->st_size);
1988     bswap16s(&sym->st_shndx);
1989 }
1990 
1991 #ifdef TARGET_MIPS
1992 static void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags)
1993 {
1994     bswap16s(&abiflags->version);
1995     bswap32s(&abiflags->ases);
1996     bswap32s(&abiflags->isa_ext);
1997     bswap32s(&abiflags->flags1);
1998     bswap32s(&abiflags->flags2);
1999 }
2000 #endif
2001 #else
2002 static inline void bswap_ehdr(struct elfhdr *ehdr) { }
2003 static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { }
2004 static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { }
2005 static inline void bswap_sym(struct elf_sym *sym) { }
2006 #ifdef TARGET_MIPS
2007 static inline void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) { }
2008 #endif
2009 #endif
2010 
2011 #ifdef USE_ELF_CORE_DUMP
2012 static int elf_core_dump(int, const CPUArchState *);
2013 #endif /* USE_ELF_CORE_DUMP */
2014 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias);
2015 
2016 /* Verify the portions of EHDR within E_IDENT for the target.
2017    This can be performed before bswapping the entire header.  */
2018 static bool elf_check_ident(struct elfhdr *ehdr)
2019 {
2020     return (ehdr->e_ident[EI_MAG0] == ELFMAG0
2021             && ehdr->e_ident[EI_MAG1] == ELFMAG1
2022             && ehdr->e_ident[EI_MAG2] == ELFMAG2
2023             && ehdr->e_ident[EI_MAG3] == ELFMAG3
2024             && ehdr->e_ident[EI_CLASS] == ELF_CLASS
2025             && ehdr->e_ident[EI_DATA] == ELF_DATA
2026             && ehdr->e_ident[EI_VERSION] == EV_CURRENT);
2027 }
2028 
2029 /* Verify the portions of EHDR outside of E_IDENT for the target.
2030    This has to wait until after bswapping the header.  */
2031 static bool elf_check_ehdr(struct elfhdr *ehdr)
2032 {
2033     return (elf_check_arch(ehdr->e_machine)
2034             && elf_check_abi(ehdr->e_flags)
2035             && ehdr->e_ehsize == sizeof(struct elfhdr)
2036             && ehdr->e_phentsize == sizeof(struct elf_phdr)
2037             && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN));
2038 }
2039 
2040 /*
2041  * 'copy_elf_strings()' copies argument/envelope strings from user
2042  * memory to free pages in kernel mem. These are in a format ready
2043  * to be put directly into the top of new user memory.
2044  *
2045  */
2046 static abi_ulong copy_elf_strings(int argc, char **argv, char *scratch,
2047                                   abi_ulong p, abi_ulong stack_limit)
2048 {
2049     char *tmp;
2050     int len, i;
2051     abi_ulong top = p;
2052 
2053     if (!p) {
2054         return 0;       /* bullet-proofing */
2055     }
2056 
2057     if (STACK_GROWS_DOWN) {
2058         int offset = ((p - 1) % TARGET_PAGE_SIZE) + 1;
2059         for (i = argc - 1; i >= 0; --i) {
2060             tmp = argv[i];
2061             if (!tmp) {
2062                 fprintf(stderr, "VFS: argc is wrong");
2063                 exit(-1);
2064             }
2065             len = strlen(tmp) + 1;
2066             tmp += len;
2067 
2068             if (len > (p - stack_limit)) {
2069                 return 0;
2070             }
2071             while (len) {
2072                 int bytes_to_copy = (len > offset) ? offset : len;
2073                 tmp -= bytes_to_copy;
2074                 p -= bytes_to_copy;
2075                 offset -= bytes_to_copy;
2076                 len -= bytes_to_copy;
2077 
2078                 memcpy_fromfs(scratch + offset, tmp, bytes_to_copy);
2079 
2080                 if (offset == 0) {
2081                     memcpy_to_target(p, scratch, top - p);
2082                     top = p;
2083                     offset = TARGET_PAGE_SIZE;
2084                 }
2085             }
2086         }
2087         if (p != top) {
2088             memcpy_to_target(p, scratch + offset, top - p);
2089         }
2090     } else {
2091         int remaining = TARGET_PAGE_SIZE - (p % TARGET_PAGE_SIZE);
2092         for (i = 0; i < argc; ++i) {
2093             tmp = argv[i];
2094             if (!tmp) {
2095                 fprintf(stderr, "VFS: argc is wrong");
2096                 exit(-1);
2097             }
2098             len = strlen(tmp) + 1;
2099             if (len > (stack_limit - p)) {
2100                 return 0;
2101             }
2102             while (len) {
2103                 int bytes_to_copy = (len > remaining) ? remaining : len;
2104 
2105                 memcpy_fromfs(scratch + (p - top), tmp, bytes_to_copy);
2106 
2107                 tmp += bytes_to_copy;
2108                 remaining -= bytes_to_copy;
2109                 p += bytes_to_copy;
2110                 len -= bytes_to_copy;
2111 
2112                 if (remaining == 0) {
2113                     memcpy_to_target(top, scratch, p - top);
2114                     top = p;
2115                     remaining = TARGET_PAGE_SIZE;
2116                 }
2117             }
2118         }
2119         if (p != top) {
2120             memcpy_to_target(top, scratch, p - top);
2121         }
2122     }
2123 
2124     return p;
2125 }
2126 
2127 /* Older linux kernels provide up to MAX_ARG_PAGES (default: 32) of
2128  * argument/environment space. Newer kernels (>2.6.33) allow more,
2129  * dependent on stack size, but guarantee at least 32 pages for
2130  * backwards compatibility.
2131  */
2132 #define STACK_LOWER_LIMIT (32 * TARGET_PAGE_SIZE)
2133 
2134 static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
2135                                  struct image_info *info)
2136 {
2137     abi_ulong size, error, guard;
2138     int prot;
2139 
2140     size = guest_stack_size;
2141     if (size < STACK_LOWER_LIMIT) {
2142         size = STACK_LOWER_LIMIT;
2143     }
2144 
2145     if (STACK_GROWS_DOWN) {
2146         guard = TARGET_PAGE_SIZE;
2147         if (guard < qemu_real_host_page_size()) {
2148             guard = qemu_real_host_page_size();
2149         }
2150     } else {
2151         /* no guard page for hppa target where stack grows upwards. */
2152         guard = 0;
2153     }
2154 
2155     prot = PROT_READ | PROT_WRITE;
2156     if (info->exec_stack) {
2157         prot |= PROT_EXEC;
2158     }
2159     error = target_mmap(0, size + guard, prot,
2160                         MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
2161     if (error == -1) {
2162         perror("mmap stack");
2163         exit(-1);
2164     }
2165 
2166     /* We reserve one extra page at the top of the stack as guard.  */
2167     if (STACK_GROWS_DOWN) {
2168         target_mprotect(error, guard, PROT_NONE);
2169         info->stack_limit = error + guard;
2170         return info->stack_limit + size - sizeof(void *);
2171     } else {
2172         info->stack_limit = error + size;
2173         return error;
2174     }
2175 }
2176 
2177 /* Map and zero the bss.  We need to explicitly zero any fractional pages
2178    after the data section (i.e. bss).  */
2179 static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
2180 {
2181     uintptr_t host_start, host_map_start, host_end;
2182 
2183     last_bss = TARGET_PAGE_ALIGN(last_bss);
2184 
2185     /* ??? There is confusion between qemu_real_host_page_size and
2186        qemu_host_page_size here and elsewhere in target_mmap, which
2187        may lead to the end of the data section mapping from the file
2188        not being mapped.  At least there was an explicit test and
2189        comment for that here, suggesting that "the file size must
2190        be known".  The comment probably pre-dates the introduction
2191        of the fstat system call in target_mmap which does in fact
2192        find out the size.  What isn't clear is if the workaround
2193        here is still actually needed.  For now, continue with it,
2194        but merge it with the "normal" mmap that would allocate the bss.  */
2195 
2196     host_start = (uintptr_t) g2h_untagged(elf_bss);
2197     host_end = (uintptr_t) g2h_untagged(last_bss);
2198     host_map_start = REAL_HOST_PAGE_ALIGN(host_start);
2199 
2200     if (host_map_start < host_end) {
2201         void *p = mmap((void *)host_map_start, host_end - host_map_start,
2202                        prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
2203         if (p == MAP_FAILED) {
2204             perror("cannot mmap brk");
2205             exit(-1);
2206         }
2207     }
2208 
2209     /* Ensure that the bss page(s) are valid */
2210     if ((page_get_flags(last_bss-1) & prot) != prot) {
2211         page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot | PAGE_VALID);
2212     }
2213 
2214     if (host_start < host_map_start) {
2215         memset((void *)host_start, 0, host_map_start - host_start);
2216     }
2217 }
2218 
2219 #if defined(TARGET_ARM)
2220 static int elf_is_fdpic(struct elfhdr *exec)
2221 {
2222     return exec->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC;
2223 }
2224 #elif defined(TARGET_XTENSA)
2225 static int elf_is_fdpic(struct elfhdr *exec)
2226 {
2227     return exec->e_ident[EI_OSABI] == ELFOSABI_XTENSA_FDPIC;
2228 }
2229 #else
2230 /* Default implementation, always false.  */
2231 static int elf_is_fdpic(struct elfhdr *exec)
2232 {
2233     return 0;
2234 }
2235 #endif
2236 
2237 static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp)
2238 {
2239     uint16_t n;
2240     struct elf32_fdpic_loadseg *loadsegs = info->loadsegs;
2241 
2242     /* elf32_fdpic_loadseg */
2243     n = info->nsegs;
2244     while (n--) {
2245         sp -= 12;
2246         put_user_u32(loadsegs[n].addr, sp+0);
2247         put_user_u32(loadsegs[n].p_vaddr, sp+4);
2248         put_user_u32(loadsegs[n].p_memsz, sp+8);
2249     }
2250 
2251     /* elf32_fdpic_loadmap */
2252     sp -= 4;
2253     put_user_u16(0, sp+0); /* version */
2254     put_user_u16(info->nsegs, sp+2); /* nsegs */
2255 
2256     info->personality = PER_LINUX_FDPIC;
2257     info->loadmap_addr = sp;
2258 
2259     return sp;
2260 }
2261 
2262 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
2263                                    struct elfhdr *exec,
2264                                    struct image_info *info,
2265                                    struct image_info *interp_info)
2266 {
2267     abi_ulong sp;
2268     abi_ulong u_argc, u_argv, u_envp, u_auxv;
2269     int size;
2270     int i;
2271     abi_ulong u_rand_bytes;
2272     uint8_t k_rand_bytes[16];
2273     abi_ulong u_platform, u_base_platform;
2274     const char *k_platform, *k_base_platform;
2275     const int n = sizeof(elf_addr_t);
2276 
2277     sp = p;
2278 
2279     /* Needs to be before we load the env/argc/... */
2280     if (elf_is_fdpic(exec)) {
2281         /* Need 4 byte alignment for these structs */
2282         sp &= ~3;
2283         sp = loader_build_fdpic_loadmap(info, sp);
2284         info->other_info = interp_info;
2285         if (interp_info) {
2286             interp_info->other_info = info;
2287             sp = loader_build_fdpic_loadmap(interp_info, sp);
2288             info->interpreter_loadmap_addr = interp_info->loadmap_addr;
2289             info->interpreter_pt_dynamic_addr = interp_info->pt_dynamic_addr;
2290         } else {
2291             info->interpreter_loadmap_addr = 0;
2292             info->interpreter_pt_dynamic_addr = 0;
2293         }
2294     }
2295 
2296     u_base_platform = 0;
2297     k_base_platform = ELF_BASE_PLATFORM;
2298     if (k_base_platform) {
2299         size_t len = strlen(k_base_platform) + 1;
2300         if (STACK_GROWS_DOWN) {
2301             sp -= (len + n - 1) & ~(n - 1);
2302             u_base_platform = sp;
2303             /* FIXME - check return value of memcpy_to_target() for failure */
2304             memcpy_to_target(sp, k_base_platform, len);
2305         } else {
2306             memcpy_to_target(sp, k_base_platform, len);
2307             u_base_platform = sp;
2308             sp += len + 1;
2309         }
2310     }
2311 
2312     u_platform = 0;
2313     k_platform = ELF_PLATFORM;
2314     if (k_platform) {
2315         size_t len = strlen(k_platform) + 1;
2316         if (STACK_GROWS_DOWN) {
2317             sp -= (len + n - 1) & ~(n - 1);
2318             u_platform = sp;
2319             /* FIXME - check return value of memcpy_to_target() for failure */
2320             memcpy_to_target(sp, k_platform, len);
2321         } else {
2322             memcpy_to_target(sp, k_platform, len);
2323             u_platform = sp;
2324             sp += len + 1;
2325         }
2326     }
2327 
2328     /* Provide 16 byte alignment for the PRNG, and basic alignment for
2329      * the argv and envp pointers.
2330      */
2331     if (STACK_GROWS_DOWN) {
2332         sp = QEMU_ALIGN_DOWN(sp, 16);
2333     } else {
2334         sp = QEMU_ALIGN_UP(sp, 16);
2335     }
2336 
2337     /*
2338      * Generate 16 random bytes for userspace PRNG seeding.
2339      */
2340     qemu_guest_getrandom_nofail(k_rand_bytes, sizeof(k_rand_bytes));
2341     if (STACK_GROWS_DOWN) {
2342         sp -= 16;
2343         u_rand_bytes = sp;
2344         /* FIXME - check return value of memcpy_to_target() for failure */
2345         memcpy_to_target(sp, k_rand_bytes, 16);
2346     } else {
2347         memcpy_to_target(sp, k_rand_bytes, 16);
2348         u_rand_bytes = sp;
2349         sp += 16;
2350     }
2351 
2352     size = (DLINFO_ITEMS + 1) * 2;
2353     if (k_base_platform)
2354         size += 2;
2355     if (k_platform)
2356         size += 2;
2357 #ifdef DLINFO_ARCH_ITEMS
2358     size += DLINFO_ARCH_ITEMS * 2;
2359 #endif
2360 #ifdef ELF_HWCAP2
2361     size += 2;
2362 #endif
2363     info->auxv_len = size * n;
2364 
2365     size += envc + argc + 2;
2366     size += 1;  /* argc itself */
2367     size *= n;
2368 
2369     /* Allocate space and finalize stack alignment for entry now.  */
2370     if (STACK_GROWS_DOWN) {
2371         u_argc = QEMU_ALIGN_DOWN(sp - size, STACK_ALIGNMENT);
2372         sp = u_argc;
2373     } else {
2374         u_argc = sp;
2375         sp = QEMU_ALIGN_UP(sp + size, STACK_ALIGNMENT);
2376     }
2377 
2378     u_argv = u_argc + n;
2379     u_envp = u_argv + (argc + 1) * n;
2380     u_auxv = u_envp + (envc + 1) * n;
2381     info->saved_auxv = u_auxv;
2382     info->argc = argc;
2383     info->envc = envc;
2384     info->argv = u_argv;
2385     info->envp = u_envp;
2386 
2387     /* This is correct because Linux defines
2388      * elf_addr_t as Elf32_Off / Elf64_Off
2389      */
2390 #define NEW_AUX_ENT(id, val) do {               \
2391         put_user_ual(id, u_auxv);  u_auxv += n; \
2392         put_user_ual(val, u_auxv); u_auxv += n; \
2393     } while(0)
2394 
2395 #ifdef ARCH_DLINFO
2396     /*
2397      * ARCH_DLINFO must come first so platform specific code can enforce
2398      * special alignment requirements on the AUXV if necessary (eg. PPC).
2399      */
2400     ARCH_DLINFO;
2401 #endif
2402     /* There must be exactly DLINFO_ITEMS entries here, or the assert
2403      * on info->auxv_len will trigger.
2404      */
2405     NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff));
2406     NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
2407     NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
2408     if ((info->alignment & ~qemu_host_page_mask) != 0) {
2409         /* Target doesn't support host page size alignment */
2410         NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
2411     } else {
2412         NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(MAX(TARGET_PAGE_SIZE,
2413                                                qemu_host_page_size)));
2414     }
2415     NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0));
2416     NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
2417     NEW_AUX_ENT(AT_ENTRY, info->entry);
2418     NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
2419     NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
2420     NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
2421     NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
2422     NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
2423     NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
2424     NEW_AUX_ENT(AT_RANDOM, (abi_ulong) u_rand_bytes);
2425     NEW_AUX_ENT(AT_SECURE, (abi_ulong) qemu_getauxval(AT_SECURE));
2426     NEW_AUX_ENT(AT_EXECFN, info->file_string);
2427 
2428 #ifdef ELF_HWCAP2
2429     NEW_AUX_ENT(AT_HWCAP2, (abi_ulong) ELF_HWCAP2);
2430 #endif
2431 
2432     if (u_base_platform) {
2433         NEW_AUX_ENT(AT_BASE_PLATFORM, u_base_platform);
2434     }
2435     if (u_platform) {
2436         NEW_AUX_ENT(AT_PLATFORM, u_platform);
2437     }
2438     NEW_AUX_ENT (AT_NULL, 0);
2439 #undef NEW_AUX_ENT
2440 
2441     /* Check that our initial calculation of the auxv length matches how much
2442      * we actually put into it.
2443      */
2444     assert(info->auxv_len == u_auxv - info->saved_auxv);
2445 
2446     put_user_ual(argc, u_argc);
2447 
2448     p = info->arg_strings;
2449     for (i = 0; i < argc; ++i) {
2450         put_user_ual(p, u_argv);
2451         u_argv += n;
2452         p += target_strlen(p) + 1;
2453     }
2454     put_user_ual(0, u_argv);
2455 
2456     p = info->env_strings;
2457     for (i = 0; i < envc; ++i) {
2458         put_user_ual(p, u_envp);
2459         u_envp += n;
2460         p += target_strlen(p) + 1;
2461     }
2462     put_user_ual(0, u_envp);
2463 
2464     return sp;
2465 }
2466 
2467 #if defined(HI_COMMPAGE)
2468 #define LO_COMMPAGE -1
2469 #elif defined(LO_COMMPAGE)
2470 #define HI_COMMPAGE 0
2471 #else
2472 #define HI_COMMPAGE 0
2473 #define LO_COMMPAGE -1
2474 #ifndef INIT_GUEST_COMMPAGE
2475 #define init_guest_commpage() true
2476 #endif
2477 #endif
2478 
2479 static void pgb_fail_in_use(const char *image_name)
2480 {
2481     error_report("%s: requires virtual address space that is in use "
2482                  "(omit the -B option or choose a different value)",
2483                  image_name);
2484     exit(EXIT_FAILURE);
2485 }
2486 
2487 static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
2488                                 abi_ulong guest_hiaddr, long align)
2489 {
2490     const int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
2491     void *addr, *test;
2492 
2493     if (!QEMU_IS_ALIGNED(guest_base, align)) {
2494         fprintf(stderr, "Requested guest base %p does not satisfy "
2495                 "host minimum alignment (0x%lx)\n",
2496                 (void *)guest_base, align);
2497         exit(EXIT_FAILURE);
2498     }
2499 
2500     /* Sanity check the guest binary. */
2501     if (reserved_va) {
2502         if (guest_hiaddr > reserved_va) {
2503             error_report("%s: requires more than reserved virtual "
2504                          "address space (0x%" PRIx64 " > 0x%lx)",
2505                          image_name, (uint64_t)guest_hiaddr, reserved_va);
2506             exit(EXIT_FAILURE);
2507         }
2508     } else {
2509 #if HOST_LONG_BITS < TARGET_ABI_BITS
2510         if ((guest_hiaddr - guest_base) > ~(uintptr_t)0) {
2511             error_report("%s: requires more virtual address space "
2512                          "than the host can provide (0x%" PRIx64 ")",
2513                          image_name, (uint64_t)guest_hiaddr - guest_base);
2514             exit(EXIT_FAILURE);
2515         }
2516 #endif
2517     }
2518 
2519     /*
2520      * Expand the allocation to the entire reserved_va.
2521      * Exclude the mmap_min_addr hole.
2522      */
2523     if (reserved_va) {
2524         guest_loaddr = (guest_base >= mmap_min_addr ? 0
2525                         : mmap_min_addr - guest_base);
2526         guest_hiaddr = reserved_va;
2527     }
2528 
2529     /* Reserve the address space for the binary, or reserved_va. */
2530     test = g2h_untagged(guest_loaddr);
2531     addr = mmap(test, guest_hiaddr - guest_loaddr, PROT_NONE, flags, -1, 0);
2532     if (test != addr) {
2533         pgb_fail_in_use(image_name);
2534     }
2535     qemu_log_mask(CPU_LOG_PAGE,
2536                   "%s: base @ %p for " TARGET_ABI_FMT_ld " bytes\n",
2537                   __func__, addr, guest_hiaddr - guest_loaddr);
2538 }
2539 
2540 /**
2541  * pgd_find_hole_fallback: potential mmap address
2542  * @guest_size: size of available space
2543  * @brk: location of break
2544  * @align: memory alignment
2545  *
2546  * This is a fallback method for finding a hole in the host address
2547  * space if we don't have the benefit of being able to access
2548  * /proc/self/map. It can potentially take a very long time as we can
2549  * only dumbly iterate up the host address space seeing if the
2550  * allocation would work.
2551  */
2552 static uintptr_t pgd_find_hole_fallback(uintptr_t guest_size, uintptr_t brk,
2553                                         long align, uintptr_t offset)
2554 {
2555     uintptr_t base;
2556 
2557     /* Start (aligned) at the bottom and work our way up */
2558     base = ROUND_UP(mmap_min_addr, align);
2559 
2560     while (true) {
2561         uintptr_t align_start, end;
2562         align_start = ROUND_UP(base, align);
2563         end = align_start + guest_size + offset;
2564 
2565         /* if brk is anywhere in the range give ourselves some room to grow. */
2566         if (align_start <= brk && brk < end) {
2567             base = brk + (16 * MiB);
2568             continue;
2569         } else if (align_start + guest_size < align_start) {
2570             /* we have run out of space */
2571             return -1;
2572         } else {
2573             int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE |
2574                 MAP_FIXED_NOREPLACE;
2575             void * mmap_start = mmap((void *) align_start, guest_size,
2576                                      PROT_NONE, flags, -1, 0);
2577             if (mmap_start != MAP_FAILED) {
2578                 munmap(mmap_start, guest_size);
2579                 if (mmap_start == (void *) align_start) {
2580                     qemu_log_mask(CPU_LOG_PAGE,
2581                                   "%s: base @ %p for %" PRIdPTR" bytes\n",
2582                                   __func__, mmap_start + offset, guest_size);
2583                     return (uintptr_t) mmap_start + offset;
2584                 }
2585             }
2586             base += qemu_host_page_size;
2587         }
2588     }
2589 }
2590 
2591 /* Return value for guest_base, or -1 if no hole found. */
2592 static uintptr_t pgb_find_hole(uintptr_t guest_loaddr, uintptr_t guest_size,
2593                                long align, uintptr_t offset)
2594 {
2595     GSList *maps, *iter;
2596     uintptr_t this_start, this_end, next_start, brk;
2597     intptr_t ret = -1;
2598 
2599     assert(QEMU_IS_ALIGNED(guest_loaddr, align));
2600 
2601     maps = read_self_maps();
2602 
2603     /* Read brk after we've read the maps, which will malloc. */
2604     brk = (uintptr_t)sbrk(0);
2605 
2606     if (!maps) {
2607         return pgd_find_hole_fallback(guest_size, brk, align, offset);
2608     }
2609 
2610     /* The first hole is before the first map entry. */
2611     this_start = mmap_min_addr;
2612 
2613     for (iter = maps; iter;
2614          this_start = next_start, iter = g_slist_next(iter)) {
2615         uintptr_t align_start, hole_size;
2616 
2617         this_end = ((MapInfo *)iter->data)->start;
2618         next_start = ((MapInfo *)iter->data)->end;
2619         align_start = ROUND_UP(this_start + offset, align);
2620 
2621         /* Skip holes that are too small. */
2622         if (align_start >= this_end) {
2623             continue;
2624         }
2625         hole_size = this_end - align_start;
2626         if (hole_size < guest_size) {
2627             continue;
2628         }
2629 
2630         /* If this hole contains brk, give ourselves some room to grow. */
2631         if (this_start <= brk && brk < this_end) {
2632             hole_size -= guest_size;
2633             if (sizeof(uintptr_t) == 8 && hole_size >= 1 * GiB) {
2634                 align_start += 1 * GiB;
2635             } else if (hole_size >= 16 * MiB) {
2636                 align_start += 16 * MiB;
2637             } else {
2638                 align_start = (this_end - guest_size) & -align;
2639                 if (align_start < this_start) {
2640                     continue;
2641                 }
2642             }
2643         }
2644 
2645         /* Record the lowest successful match. */
2646         if (ret < 0) {
2647             ret = align_start;
2648         }
2649         /* If this hole contains the identity map, select it. */
2650         if (align_start <= guest_loaddr &&
2651             guest_loaddr + guest_size <= this_end) {
2652             ret = 0;
2653         }
2654         /* If this hole ends above the identity map, stop looking. */
2655         if (this_end >= guest_loaddr) {
2656             break;
2657         }
2658     }
2659     free_self_maps(maps);
2660 
2661     if (ret != -1) {
2662         qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %" PRIxPTR
2663                       " for %" PRIuPTR " bytes\n",
2664                       __func__, ret, guest_size);
2665     }
2666 
2667     return ret;
2668 }
2669 
2670 static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
2671                        abi_ulong orig_hiaddr, long align)
2672 {
2673     uintptr_t loaddr = orig_loaddr;
2674     uintptr_t hiaddr = orig_hiaddr;
2675     uintptr_t offset = 0;
2676     uintptr_t addr;
2677 
2678     if (hiaddr != orig_hiaddr) {
2679         error_report("%s: requires virtual address space that the "
2680                      "host cannot provide (0x%" PRIx64 ")",
2681                      image_name, (uint64_t)orig_hiaddr);
2682         exit(EXIT_FAILURE);
2683     }
2684 
2685     loaddr &= -align;
2686     if (HI_COMMPAGE) {
2687         /*
2688          * Extend the allocation to include the commpage.
2689          * For a 64-bit host, this is just 4GiB; for a 32-bit host we
2690          * need to ensure there is space bellow the guest_base so we
2691          * can map the commpage in the place needed when the address
2692          * arithmetic wraps around.
2693          */
2694         if (sizeof(uintptr_t) == 8 || loaddr >= 0x80000000u) {
2695             hiaddr = (uintptr_t) 4 << 30;
2696         } else {
2697             offset = -(HI_COMMPAGE & -align);
2698         }
2699     } else if (LO_COMMPAGE != -1) {
2700         loaddr = MIN(loaddr, LO_COMMPAGE & -align);
2701     }
2702 
2703     addr = pgb_find_hole(loaddr, hiaddr - loaddr, align, offset);
2704     if (addr == -1) {
2705         /*
2706          * If HI_COMMPAGE, there *might* be a non-consecutive allocation
2707          * that can satisfy both.  But as the normal arm32 link base address
2708          * is ~32k, and we extend down to include the commpage, making the
2709          * overhead only ~96k, this is unlikely.
2710          */
2711         error_report("%s: Unable to allocate %#zx bytes of "
2712                      "virtual address space", image_name,
2713                      (size_t)(hiaddr - loaddr));
2714         exit(EXIT_FAILURE);
2715     }
2716 
2717     guest_base = addr;
2718 
2719     qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %"PRIxPTR" for %" PRIuPTR" bytes\n",
2720                   __func__, addr, hiaddr - loaddr);
2721 }
2722 
2723 static void pgb_dynamic(const char *image_name, long align)
2724 {
2725     /*
2726      * The executable is dynamic and does not require a fixed address.
2727      * All we need is a commpage that satisfies align.
2728      * If we do not need a commpage, leave guest_base == 0.
2729      */
2730     if (HI_COMMPAGE) {
2731         uintptr_t addr, commpage;
2732 
2733         /* 64-bit hosts should have used reserved_va. */
2734         assert(sizeof(uintptr_t) == 4);
2735 
2736         /*
2737          * By putting the commpage at the first hole, that puts guest_base
2738          * just above that, and maximises the positive guest addresses.
2739          */
2740         commpage = HI_COMMPAGE & -align;
2741         addr = pgb_find_hole(commpage, -commpage, align, 0);
2742         assert(addr != -1);
2743         guest_base = addr;
2744     }
2745 }
2746 
2747 static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
2748                             abi_ulong guest_hiaddr, long align)
2749 {
2750     int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
2751     void *addr, *test;
2752 
2753     if (guest_hiaddr > reserved_va) {
2754         error_report("%s: requires more than reserved virtual "
2755                      "address space (0x%" PRIx64 " > 0x%lx)",
2756                      image_name, (uint64_t)guest_hiaddr, reserved_va);
2757         exit(EXIT_FAILURE);
2758     }
2759 
2760     /* Widen the "image" to the entire reserved address space. */
2761     pgb_static(image_name, 0, reserved_va, align);
2762 
2763     /* osdep.h defines this as 0 if it's missing */
2764     flags |= MAP_FIXED_NOREPLACE;
2765 
2766     /* Reserve the memory on the host. */
2767     assert(guest_base != 0);
2768     test = g2h_untagged(0);
2769     addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0);
2770     if (addr == MAP_FAILED || addr != test) {
2771         error_report("Unable to reserve 0x%lx bytes of virtual address "
2772                      "space at %p (%s) for use as guest address space (check your "
2773                      "virtual memory ulimit setting, min_mmap_addr or reserve less "
2774                      "using -R option)", reserved_va, test, strerror(errno));
2775         exit(EXIT_FAILURE);
2776     }
2777 
2778     qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %p for %lu bytes\n",
2779                   __func__, addr, reserved_va);
2780 }
2781 
2782 void probe_guest_base(const char *image_name, abi_ulong guest_loaddr,
2783                       abi_ulong guest_hiaddr)
2784 {
2785     /* In order to use host shmat, we must be able to honor SHMLBA.  */
2786     uintptr_t align = MAX(SHMLBA, qemu_host_page_size);
2787 
2788     if (have_guest_base) {
2789         pgb_have_guest_base(image_name, guest_loaddr, guest_hiaddr, align);
2790     } else if (reserved_va) {
2791         pgb_reserved_va(image_name, guest_loaddr, guest_hiaddr, align);
2792     } else if (guest_loaddr) {
2793         pgb_static(image_name, guest_loaddr, guest_hiaddr, align);
2794     } else {
2795         pgb_dynamic(image_name, align);
2796     }
2797 
2798     /* Reserve and initialize the commpage. */
2799     if (!init_guest_commpage()) {
2800         /*
2801          * With have_guest_base, the user has selected the address and
2802          * we are trying to work with that.  Otherwise, we have selected
2803          * free space and init_guest_commpage must succeeded.
2804          */
2805         assert(have_guest_base);
2806         pgb_fail_in_use(image_name);
2807     }
2808 
2809     assert(QEMU_IS_ALIGNED(guest_base, align));
2810     qemu_log_mask(CPU_LOG_PAGE, "Locating guest address space "
2811                   "@ 0x%" PRIx64 "\n", (uint64_t)guest_base);
2812 }
2813 
2814 enum {
2815     /* The string "GNU\0" as a magic number. */
2816     GNU0_MAGIC = const_le32('G' | 'N' << 8 | 'U' << 16),
2817     NOTE_DATA_SZ = 1 * KiB,
2818     NOTE_NAME_SZ = 4,
2819     ELF_GNU_PROPERTY_ALIGN = ELF_CLASS == ELFCLASS32 ? 4 : 8,
2820 };
2821 
2822 /*
2823  * Process a single gnu_property entry.
2824  * Return false for error.
2825  */
2826 static bool parse_elf_property(const uint32_t *data, int *off, int datasz,
2827                                struct image_info *info, bool have_prev_type,
2828                                uint32_t *prev_type, Error **errp)
2829 {
2830     uint32_t pr_type, pr_datasz, step;
2831 
2832     if (*off > datasz || !QEMU_IS_ALIGNED(*off, ELF_GNU_PROPERTY_ALIGN)) {
2833         goto error_data;
2834     }
2835     datasz -= *off;
2836     data += *off / sizeof(uint32_t);
2837 
2838     if (datasz < 2 * sizeof(uint32_t)) {
2839         goto error_data;
2840     }
2841     pr_type = data[0];
2842     pr_datasz = data[1];
2843     data += 2;
2844     datasz -= 2 * sizeof(uint32_t);
2845     step = ROUND_UP(pr_datasz, ELF_GNU_PROPERTY_ALIGN);
2846     if (step > datasz) {
2847         goto error_data;
2848     }
2849 
2850     /* Properties are supposed to be unique and sorted on pr_type. */
2851     if (have_prev_type && pr_type <= *prev_type) {
2852         if (pr_type == *prev_type) {
2853             error_setg(errp, "Duplicate property in PT_GNU_PROPERTY");
2854         } else {
2855             error_setg(errp, "Unsorted property in PT_GNU_PROPERTY");
2856         }
2857         return false;
2858     }
2859     *prev_type = pr_type;
2860 
2861     if (!arch_parse_elf_property(pr_type, pr_datasz, data, info, errp)) {
2862         return false;
2863     }
2864 
2865     *off += 2 * sizeof(uint32_t) + step;
2866     return true;
2867 
2868  error_data:
2869     error_setg(errp, "Ill-formed property in PT_GNU_PROPERTY");
2870     return false;
2871 }
2872 
2873 /* Process NT_GNU_PROPERTY_TYPE_0. */
2874 static bool parse_elf_properties(int image_fd,
2875                                  struct image_info *info,
2876                                  const struct elf_phdr *phdr,
2877                                  char bprm_buf[BPRM_BUF_SIZE],
2878                                  Error **errp)
2879 {
2880     union {
2881         struct elf_note nhdr;
2882         uint32_t data[NOTE_DATA_SZ / sizeof(uint32_t)];
2883     } note;
2884 
2885     int n, off, datasz;
2886     bool have_prev_type;
2887     uint32_t prev_type;
2888 
2889     /* Unless the arch requires properties, ignore them. */
2890     if (!ARCH_USE_GNU_PROPERTY) {
2891         return true;
2892     }
2893 
2894     /* If the properties are crazy large, that's too bad. */
2895     n = phdr->p_filesz;
2896     if (n > sizeof(note)) {
2897         error_setg(errp, "PT_GNU_PROPERTY too large");
2898         return false;
2899     }
2900     if (n < sizeof(note.nhdr)) {
2901         error_setg(errp, "PT_GNU_PROPERTY too small");
2902         return false;
2903     }
2904 
2905     if (phdr->p_offset + n <= BPRM_BUF_SIZE) {
2906         memcpy(&note, bprm_buf + phdr->p_offset, n);
2907     } else {
2908         ssize_t len = pread(image_fd, &note, n, phdr->p_offset);
2909         if (len != n) {
2910             error_setg_errno(errp, errno, "Error reading file header");
2911             return false;
2912         }
2913     }
2914 
2915     /*
2916      * The contents of a valid PT_GNU_PROPERTY is a sequence
2917      * of uint32_t -- swap them all now.
2918      */
2919 #ifdef BSWAP_NEEDED
2920     for (int i = 0; i < n / 4; i++) {
2921         bswap32s(note.data + i);
2922     }
2923 #endif
2924 
2925     /*
2926      * Note that nhdr is 3 words, and that the "name" described by namesz
2927      * immediately follows nhdr and is thus at the 4th word.  Further, all
2928      * of the inputs to the kernel's round_up are multiples of 4.
2929      */
2930     if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 ||
2931         note.nhdr.n_namesz != NOTE_NAME_SZ ||
2932         note.data[3] != GNU0_MAGIC) {
2933         error_setg(errp, "Invalid note in PT_GNU_PROPERTY");
2934         return false;
2935     }
2936     off = sizeof(note.nhdr) + NOTE_NAME_SZ;
2937 
2938     datasz = note.nhdr.n_descsz + off;
2939     if (datasz > n) {
2940         error_setg(errp, "Invalid note size in PT_GNU_PROPERTY");
2941         return false;
2942     }
2943 
2944     have_prev_type = false;
2945     prev_type = 0;
2946     while (1) {
2947         if (off == datasz) {
2948             return true;  /* end, exit ok */
2949         }
2950         if (!parse_elf_property(note.data, &off, datasz, info,
2951                                 have_prev_type, &prev_type, errp)) {
2952             return false;
2953         }
2954         have_prev_type = true;
2955     }
2956 }
2957 
2958 /* Load an ELF image into the address space.
2959 
2960    IMAGE_NAME is the filename of the image, to use in error messages.
2961    IMAGE_FD is the open file descriptor for the image.
2962 
2963    BPRM_BUF is a copy of the beginning of the file; this of course
2964    contains the elf file header at offset 0.  It is assumed that this
2965    buffer is sufficiently aligned to present no problems to the host
2966    in accessing data at aligned offsets within the buffer.
2967 
2968    On return: INFO values will be filled in, as necessary or available.  */
2969 
2970 static void load_elf_image(const char *image_name, int image_fd,
2971                            struct image_info *info, char **pinterp_name,
2972                            char bprm_buf[BPRM_BUF_SIZE])
2973 {
2974     struct elfhdr *ehdr = (struct elfhdr *)bprm_buf;
2975     struct elf_phdr *phdr;
2976     abi_ulong load_addr, load_bias, loaddr, hiaddr, error;
2977     int i, retval, prot_exec;
2978     Error *err = NULL;
2979 
2980     /* First of all, some simple consistency checks */
2981     if (!elf_check_ident(ehdr)) {
2982         error_setg(&err, "Invalid ELF image for this architecture");
2983         goto exit_errmsg;
2984     }
2985     bswap_ehdr(ehdr);
2986     if (!elf_check_ehdr(ehdr)) {
2987         error_setg(&err, "Invalid ELF image for this architecture");
2988         goto exit_errmsg;
2989     }
2990 
2991     i = ehdr->e_phnum * sizeof(struct elf_phdr);
2992     if (ehdr->e_phoff + i <= BPRM_BUF_SIZE) {
2993         phdr = (struct elf_phdr *)(bprm_buf + ehdr->e_phoff);
2994     } else {
2995         phdr = (struct elf_phdr *) alloca(i);
2996         retval = pread(image_fd, phdr, i, ehdr->e_phoff);
2997         if (retval != i) {
2998             goto exit_read;
2999         }
3000     }
3001     bswap_phdr(phdr, ehdr->e_phnum);
3002 
3003     info->nsegs = 0;
3004     info->pt_dynamic_addr = 0;
3005 
3006     mmap_lock();
3007 
3008     /*
3009      * Find the maximum size of the image and allocate an appropriate
3010      * amount of memory to handle that.  Locate the interpreter, if any.
3011      */
3012     loaddr = -1, hiaddr = 0;
3013     info->alignment = 0;
3014     info->exec_stack = EXSTACK_DEFAULT;
3015     for (i = 0; i < ehdr->e_phnum; ++i) {
3016         struct elf_phdr *eppnt = phdr + i;
3017         if (eppnt->p_type == PT_LOAD) {
3018             abi_ulong a = eppnt->p_vaddr - eppnt->p_offset;
3019             if (a < loaddr) {
3020                 loaddr = a;
3021             }
3022             a = eppnt->p_vaddr + eppnt->p_memsz;
3023             if (a > hiaddr) {
3024                 hiaddr = a;
3025             }
3026             ++info->nsegs;
3027             info->alignment |= eppnt->p_align;
3028         } else if (eppnt->p_type == PT_INTERP && pinterp_name) {
3029             g_autofree char *interp_name = NULL;
3030 
3031             if (*pinterp_name) {
3032                 error_setg(&err, "Multiple PT_INTERP entries");
3033                 goto exit_errmsg;
3034             }
3035 
3036             interp_name = g_malloc(eppnt->p_filesz);
3037 
3038             if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) {
3039                 memcpy(interp_name, bprm_buf + eppnt->p_offset,
3040                        eppnt->p_filesz);
3041             } else {
3042                 retval = pread(image_fd, interp_name, eppnt->p_filesz,
3043                                eppnt->p_offset);
3044                 if (retval != eppnt->p_filesz) {
3045                     goto exit_read;
3046                 }
3047             }
3048             if (interp_name[eppnt->p_filesz - 1] != 0) {
3049                 error_setg(&err, "Invalid PT_INTERP entry");
3050                 goto exit_errmsg;
3051             }
3052             *pinterp_name = g_steal_pointer(&interp_name);
3053         } else if (eppnt->p_type == PT_GNU_PROPERTY) {
3054             if (!parse_elf_properties(image_fd, info, eppnt, bprm_buf, &err)) {
3055                 goto exit_errmsg;
3056             }
3057         } else if (eppnt->p_type == PT_GNU_STACK) {
3058             info->exec_stack = eppnt->p_flags & PF_X;
3059         }
3060     }
3061 
3062     if (pinterp_name != NULL) {
3063         /*
3064          * This is the main executable.
3065          *
3066          * Reserve extra space for brk.
3067          * We hold on to this space while placing the interpreter
3068          * and the stack, lest they be placed immediately after
3069          * the data segment and block allocation from the brk.
3070          *
3071          * 16MB is chosen as "large enough" without being so large as
3072          * to allow the result to not fit with a 32-bit guest on a
3073          * 32-bit host. However some 64 bit guests (e.g. s390x)
3074          * attempt to place their heap further ahead and currently
3075          * nothing stops them smashing into QEMUs address space.
3076          */
3077 #if TARGET_LONG_BITS == 64
3078         info->reserve_brk = 32 * MiB;
3079 #else
3080         info->reserve_brk = 16 * MiB;
3081 #endif
3082         hiaddr += info->reserve_brk;
3083 
3084         if (ehdr->e_type == ET_EXEC) {
3085             /*
3086              * Make sure that the low address does not conflict with
3087              * MMAP_MIN_ADDR or the QEMU application itself.
3088              */
3089             probe_guest_base(image_name, loaddr, hiaddr);
3090         } else {
3091             /*
3092              * The binary is dynamic, but we still need to
3093              * select guest_base.  In this case we pass a size.
3094              */
3095             probe_guest_base(image_name, 0, hiaddr - loaddr);
3096         }
3097     }
3098 
3099     /*
3100      * Reserve address space for all of this.
3101      *
3102      * In the case of ET_EXEC, we supply MAP_FIXED so that we get
3103      * exactly the address range that is required.
3104      *
3105      * Otherwise this is ET_DYN, and we are searching for a location
3106      * that can hold the memory space required.  If the image is
3107      * pre-linked, LOADDR will be non-zero, and the kernel should
3108      * honor that address if it happens to be free.
3109      *
3110      * In both cases, we will overwrite pages in this range with mappings
3111      * from the executable.
3112      */
3113     load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE,
3114                             MAP_PRIVATE | MAP_ANON | MAP_NORESERVE |
3115                             (ehdr->e_type == ET_EXEC ? MAP_FIXED : 0),
3116                             -1, 0);
3117     if (load_addr == -1) {
3118         goto exit_mmap;
3119     }
3120     load_bias = load_addr - loaddr;
3121 
3122     if (elf_is_fdpic(ehdr)) {
3123         struct elf32_fdpic_loadseg *loadsegs = info->loadsegs =
3124             g_malloc(sizeof(*loadsegs) * info->nsegs);
3125 
3126         for (i = 0; i < ehdr->e_phnum; ++i) {
3127             switch (phdr[i].p_type) {
3128             case PT_DYNAMIC:
3129                 info->pt_dynamic_addr = phdr[i].p_vaddr + load_bias;
3130                 break;
3131             case PT_LOAD:
3132                 loadsegs->addr = phdr[i].p_vaddr + load_bias;
3133                 loadsegs->p_vaddr = phdr[i].p_vaddr;
3134                 loadsegs->p_memsz = phdr[i].p_memsz;
3135                 ++loadsegs;
3136                 break;
3137             }
3138         }
3139     }
3140 
3141     info->load_bias = load_bias;
3142     info->code_offset = load_bias;
3143     info->data_offset = load_bias;
3144     info->load_addr = load_addr;
3145     info->entry = ehdr->e_entry + load_bias;
3146     info->start_code = -1;
3147     info->end_code = 0;
3148     info->start_data = -1;
3149     info->end_data = 0;
3150     info->brk = 0;
3151     info->elf_flags = ehdr->e_flags;
3152 
3153     prot_exec = PROT_EXEC;
3154 #ifdef TARGET_AARCH64
3155     /*
3156      * If the BTI feature is present, this indicates that the executable
3157      * pages of the startup binary should be mapped with PROT_BTI, so that
3158      * branch targets are enforced.
3159      *
3160      * The startup binary is either the interpreter or the static executable.
3161      * The interpreter is responsible for all pages of a dynamic executable.
3162      *
3163      * Elf notes are backward compatible to older cpus.
3164      * Do not enable BTI unless it is supported.
3165      */
3166     if ((info->note_flags & GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
3167         && (pinterp_name == NULL || *pinterp_name == 0)
3168         && cpu_isar_feature(aa64_bti, ARM_CPU(thread_cpu))) {
3169         prot_exec |= TARGET_PROT_BTI;
3170     }
3171 #endif
3172 
3173     for (i = 0; i < ehdr->e_phnum; i++) {
3174         struct elf_phdr *eppnt = phdr + i;
3175         if (eppnt->p_type == PT_LOAD) {
3176             abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em, vaddr_len;
3177             int elf_prot = 0;
3178 
3179             if (eppnt->p_flags & PF_R) {
3180                 elf_prot |= PROT_READ;
3181             }
3182             if (eppnt->p_flags & PF_W) {
3183                 elf_prot |= PROT_WRITE;
3184             }
3185             if (eppnt->p_flags & PF_X) {
3186                 elf_prot |= prot_exec;
3187             }
3188 
3189             vaddr = load_bias + eppnt->p_vaddr;
3190             vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr);
3191             vaddr_ps = TARGET_ELF_PAGESTART(vaddr);
3192 
3193             vaddr_ef = vaddr + eppnt->p_filesz;
3194             vaddr_em = vaddr + eppnt->p_memsz;
3195 
3196             /*
3197              * Some segments may be completely empty, with a non-zero p_memsz
3198              * but no backing file segment.
3199              */
3200             if (eppnt->p_filesz != 0) {
3201                 vaddr_len = TARGET_ELF_PAGELENGTH(eppnt->p_filesz + vaddr_po);
3202                 error = target_mmap(vaddr_ps, vaddr_len, elf_prot,
3203                                     MAP_PRIVATE | MAP_FIXED,
3204                                     image_fd, eppnt->p_offset - vaddr_po);
3205 
3206                 if (error == -1) {
3207                     goto exit_mmap;
3208                 }
3209 
3210                 /*
3211                  * If the load segment requests extra zeros (e.g. bss), map it.
3212                  */
3213                 if (eppnt->p_filesz < eppnt->p_memsz) {
3214                     zero_bss(vaddr_ef, vaddr_em, elf_prot);
3215                 }
3216             } else if (eppnt->p_memsz != 0) {
3217                 vaddr_len = TARGET_ELF_PAGELENGTH(eppnt->p_memsz + vaddr_po);
3218                 error = target_mmap(vaddr_ps, vaddr_len, elf_prot,
3219                                     MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
3220                                     -1, 0);
3221 
3222                 if (error == -1) {
3223                     goto exit_mmap;
3224                 }
3225             }
3226 
3227             /* Find the full program boundaries.  */
3228             if (elf_prot & PROT_EXEC) {
3229                 if (vaddr < info->start_code) {
3230                     info->start_code = vaddr;
3231                 }
3232                 if (vaddr_ef > info->end_code) {
3233                     info->end_code = vaddr_ef;
3234                 }
3235             }
3236             if (elf_prot & PROT_WRITE) {
3237                 if (vaddr < info->start_data) {
3238                     info->start_data = vaddr;
3239                 }
3240                 if (vaddr_ef > info->end_data) {
3241                     info->end_data = vaddr_ef;
3242                 }
3243             }
3244             if (vaddr_em > info->brk) {
3245                 info->brk = vaddr_em;
3246             }
3247 #ifdef TARGET_MIPS
3248         } else if (eppnt->p_type == PT_MIPS_ABIFLAGS) {
3249             Mips_elf_abiflags_v0 abiflags;
3250             if (eppnt->p_filesz < sizeof(Mips_elf_abiflags_v0)) {
3251                 error_setg(&err, "Invalid PT_MIPS_ABIFLAGS entry");
3252                 goto exit_errmsg;
3253             }
3254             if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) {
3255                 memcpy(&abiflags, bprm_buf + eppnt->p_offset,
3256                        sizeof(Mips_elf_abiflags_v0));
3257             } else {
3258                 retval = pread(image_fd, &abiflags, sizeof(Mips_elf_abiflags_v0),
3259                                eppnt->p_offset);
3260                 if (retval != sizeof(Mips_elf_abiflags_v0)) {
3261                     goto exit_read;
3262                 }
3263             }
3264             bswap_mips_abiflags(&abiflags);
3265             info->fp_abi = abiflags.fp_abi;
3266 #endif
3267         }
3268     }
3269 
3270     if (info->end_data == 0) {
3271         info->start_data = info->end_code;
3272         info->end_data = info->end_code;
3273     }
3274 
3275     if (qemu_log_enabled()) {
3276         load_symbols(ehdr, image_fd, load_bias);
3277     }
3278 
3279     debuginfo_report_elf(image_name, image_fd, load_bias);
3280 
3281     mmap_unlock();
3282 
3283     close(image_fd);
3284     return;
3285 
3286  exit_read:
3287     if (retval >= 0) {
3288         error_setg(&err, "Incomplete read of file header");
3289     } else {
3290         error_setg_errno(&err, errno, "Error reading file header");
3291     }
3292     goto exit_errmsg;
3293  exit_mmap:
3294     error_setg_errno(&err, errno, "Error mapping file");
3295     goto exit_errmsg;
3296  exit_errmsg:
3297     error_reportf_err(err, "%s: ", image_name);
3298     exit(-1);
3299 }
3300 
3301 static void load_elf_interp(const char *filename, struct image_info *info,
3302                             char bprm_buf[BPRM_BUF_SIZE])
3303 {
3304     int fd, retval;
3305     Error *err = NULL;
3306 
3307     fd = open(path(filename), O_RDONLY);
3308     if (fd < 0) {
3309         error_setg_file_open(&err, errno, filename);
3310         error_report_err(err);
3311         exit(-1);
3312     }
3313 
3314     retval = read(fd, bprm_buf, BPRM_BUF_SIZE);
3315     if (retval < 0) {
3316         error_setg_errno(&err, errno, "Error reading file header");
3317         error_reportf_err(err, "%s: ", filename);
3318         exit(-1);
3319     }
3320 
3321     if (retval < BPRM_BUF_SIZE) {
3322         memset(bprm_buf + retval, 0, BPRM_BUF_SIZE - retval);
3323     }
3324 
3325     load_elf_image(filename, fd, info, NULL, bprm_buf);
3326 }
3327 
3328 static int symfind(const void *s0, const void *s1)
3329 {
3330     target_ulong addr = *(target_ulong *)s0;
3331     struct elf_sym *sym = (struct elf_sym *)s1;
3332     int result = 0;
3333     if (addr < sym->st_value) {
3334         result = -1;
3335     } else if (addr >= sym->st_value + sym->st_size) {
3336         result = 1;
3337     }
3338     return result;
3339 }
3340 
3341 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
3342 {
3343 #if ELF_CLASS == ELFCLASS32
3344     struct elf_sym *syms = s->disas_symtab.elf32;
3345 #else
3346     struct elf_sym *syms = s->disas_symtab.elf64;
3347 #endif
3348 
3349     // binary search
3350     struct elf_sym *sym;
3351 
3352     sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind);
3353     if (sym != NULL) {
3354         return s->disas_strtab + sym->st_name;
3355     }
3356 
3357     return "";
3358 }
3359 
3360 /* FIXME: This should use elf_ops.h  */
3361 static int symcmp(const void *s0, const void *s1)
3362 {
3363     struct elf_sym *sym0 = (struct elf_sym *)s0;
3364     struct elf_sym *sym1 = (struct elf_sym *)s1;
3365     return (sym0->st_value < sym1->st_value)
3366         ? -1
3367         : ((sym0->st_value > sym1->st_value) ? 1 : 0);
3368 }
3369 
3370 /* Best attempt to load symbols from this ELF object. */
3371 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias)
3372 {
3373     int i, shnum, nsyms, sym_idx = 0, str_idx = 0;
3374     uint64_t segsz;
3375     struct elf_shdr *shdr;
3376     char *strings = NULL;
3377     struct syminfo *s = NULL;
3378     struct elf_sym *new_syms, *syms = NULL;
3379 
3380     shnum = hdr->e_shnum;
3381     i = shnum * sizeof(struct elf_shdr);
3382     shdr = (struct elf_shdr *)alloca(i);
3383     if (pread(fd, shdr, i, hdr->e_shoff) != i) {
3384         return;
3385     }
3386 
3387     bswap_shdr(shdr, shnum);
3388     for (i = 0; i < shnum; ++i) {
3389         if (shdr[i].sh_type == SHT_SYMTAB) {
3390             sym_idx = i;
3391             str_idx = shdr[i].sh_link;
3392             goto found;
3393         }
3394     }
3395 
3396     /* There will be no symbol table if the file was stripped.  */
3397     return;
3398 
3399  found:
3400     /* Now know where the strtab and symtab are.  Snarf them.  */
3401     s = g_try_new(struct syminfo, 1);
3402     if (!s) {
3403         goto give_up;
3404     }
3405 
3406     segsz = shdr[str_idx].sh_size;
3407     s->disas_strtab = strings = g_try_malloc(segsz);
3408     if (!strings ||
3409         pread(fd, strings, segsz, shdr[str_idx].sh_offset) != segsz) {
3410         goto give_up;
3411     }
3412 
3413     segsz = shdr[sym_idx].sh_size;
3414     syms = g_try_malloc(segsz);
3415     if (!syms || pread(fd, syms, segsz, shdr[sym_idx].sh_offset) != segsz) {
3416         goto give_up;
3417     }
3418 
3419     if (segsz / sizeof(struct elf_sym) > INT_MAX) {
3420         /* Implausibly large symbol table: give up rather than ploughing
3421          * on with the number of symbols calculation overflowing
3422          */
3423         goto give_up;
3424     }
3425     nsyms = segsz / sizeof(struct elf_sym);
3426     for (i = 0; i < nsyms; ) {
3427         bswap_sym(syms + i);
3428         /* Throw away entries which we do not need.  */
3429         if (syms[i].st_shndx == SHN_UNDEF
3430             || syms[i].st_shndx >= SHN_LORESERVE
3431             || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
3432             if (i < --nsyms) {
3433                 syms[i] = syms[nsyms];
3434             }
3435         } else {
3436 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
3437             /* The bottom address bit marks a Thumb or MIPS16 symbol.  */
3438             syms[i].st_value &= ~(target_ulong)1;
3439 #endif
3440             syms[i].st_value += load_bias;
3441             i++;
3442         }
3443     }
3444 
3445     /* No "useful" symbol.  */
3446     if (nsyms == 0) {
3447         goto give_up;
3448     }
3449 
3450     /* Attempt to free the storage associated with the local symbols
3451        that we threw away.  Whether or not this has any effect on the
3452        memory allocation depends on the malloc implementation and how
3453        many symbols we managed to discard.  */
3454     new_syms = g_try_renew(struct elf_sym, syms, nsyms);
3455     if (new_syms == NULL) {
3456         goto give_up;
3457     }
3458     syms = new_syms;
3459 
3460     qsort(syms, nsyms, sizeof(*syms), symcmp);
3461 
3462     s->disas_num_syms = nsyms;
3463 #if ELF_CLASS == ELFCLASS32
3464     s->disas_symtab.elf32 = syms;
3465 #else
3466     s->disas_symtab.elf64 = syms;
3467 #endif
3468     s->lookup_symbol = lookup_symbolxx;
3469     s->next = syminfos;
3470     syminfos = s;
3471 
3472     return;
3473 
3474 give_up:
3475     g_free(s);
3476     g_free(strings);
3477     g_free(syms);
3478 }
3479 
3480 uint32_t get_elf_eflags(int fd)
3481 {
3482     struct elfhdr ehdr;
3483     off_t offset;
3484     int ret;
3485 
3486     /* Read ELF header */
3487     offset = lseek(fd, 0, SEEK_SET);
3488     if (offset == (off_t) -1) {
3489         return 0;
3490     }
3491     ret = read(fd, &ehdr, sizeof(ehdr));
3492     if (ret < sizeof(ehdr)) {
3493         return 0;
3494     }
3495     offset = lseek(fd, offset, SEEK_SET);
3496     if (offset == (off_t) -1) {
3497         return 0;
3498     }
3499 
3500     /* Check ELF signature */
3501     if (!elf_check_ident(&ehdr)) {
3502         return 0;
3503     }
3504 
3505     /* check header */
3506     bswap_ehdr(&ehdr);
3507     if (!elf_check_ehdr(&ehdr)) {
3508         return 0;
3509     }
3510 
3511     /* return architecture id */
3512     return ehdr.e_flags;
3513 }
3514 
3515 int load_elf_binary(struct linux_binprm *bprm, struct image_info *info)
3516 {
3517     struct image_info interp_info;
3518     struct elfhdr elf_ex;
3519     char *elf_interpreter = NULL;
3520     char *scratch;
3521 
3522     memset(&interp_info, 0, sizeof(interp_info));
3523 #ifdef TARGET_MIPS
3524     interp_info.fp_abi = MIPS_ABI_FP_UNKNOWN;
3525 #endif
3526 
3527     info->start_mmap = (abi_ulong)ELF_START_MMAP;
3528 
3529     load_elf_image(bprm->filename, bprm->fd, info,
3530                    &elf_interpreter, bprm->buf);
3531 
3532     /* ??? We need a copy of the elf header for passing to create_elf_tables.
3533        If we do nothing, we'll have overwritten this when we re-use bprm->buf
3534        when we load the interpreter.  */
3535     elf_ex = *(struct elfhdr *)bprm->buf;
3536 
3537     /* Do this so that we can load the interpreter, if need be.  We will
3538        change some of these later */
3539     bprm->p = setup_arg_pages(bprm, info);
3540 
3541     scratch = g_new0(char, TARGET_PAGE_SIZE);
3542     if (STACK_GROWS_DOWN) {
3543         bprm->p = copy_elf_strings(1, &bprm->filename, scratch,
3544                                    bprm->p, info->stack_limit);
3545         info->file_string = bprm->p;
3546         bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch,
3547                                    bprm->p, info->stack_limit);
3548         info->env_strings = bprm->p;
3549         bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch,
3550                                    bprm->p, info->stack_limit);
3551         info->arg_strings = bprm->p;
3552     } else {
3553         info->arg_strings = bprm->p;
3554         bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch,
3555                                    bprm->p, info->stack_limit);
3556         info->env_strings = bprm->p;
3557         bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch,
3558                                    bprm->p, info->stack_limit);
3559         info->file_string = bprm->p;
3560         bprm->p = copy_elf_strings(1, &bprm->filename, scratch,
3561                                    bprm->p, info->stack_limit);
3562     }
3563 
3564     g_free(scratch);
3565 
3566     if (!bprm->p) {
3567         fprintf(stderr, "%s: %s\n", bprm->filename, strerror(E2BIG));
3568         exit(-1);
3569     }
3570 
3571     if (elf_interpreter) {
3572         load_elf_interp(elf_interpreter, &interp_info, bprm->buf);
3573 
3574         /* If the program interpreter is one of these two, then assume
3575            an iBCS2 image.  Otherwise assume a native linux image.  */
3576 
3577         if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0
3578             || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) {
3579             info->personality = PER_SVR4;
3580 
3581             /* Why this, you ask???  Well SVr4 maps page 0 as read-only,
3582                and some applications "depend" upon this behavior.  Since
3583                we do not have the power to recompile these, we emulate
3584                the SVr4 behavior.  Sigh.  */
3585             target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
3586                         MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
3587         }
3588 #ifdef TARGET_MIPS
3589         info->interp_fp_abi = interp_info.fp_abi;
3590 #endif
3591     }
3592 
3593     /*
3594      * TODO: load a vdso, which would also contain the signal trampolines.
3595      * Otherwise, allocate a private page to hold them.
3596      */
3597     if (TARGET_ARCH_HAS_SIGTRAMP_PAGE) {
3598         abi_long tramp_page = target_mmap(0, TARGET_PAGE_SIZE,
3599                                           PROT_READ | PROT_WRITE,
3600                                           MAP_PRIVATE | MAP_ANON, -1, 0);
3601         if (tramp_page == -1) {
3602             return -errno;
3603         }
3604 
3605         setup_sigtramp(tramp_page);
3606         target_mprotect(tramp_page, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC);
3607     }
3608 
3609     bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &elf_ex,
3610                                 info, (elf_interpreter ? &interp_info : NULL));
3611     info->start_stack = bprm->p;
3612 
3613     /* If we have an interpreter, set that as the program's entry point.
3614        Copy the load_bias as well, to help PPC64 interpret the entry
3615        point as a function descriptor.  Do this after creating elf tables
3616        so that we copy the original program entry point into the AUXV.  */
3617     if (elf_interpreter) {
3618         info->load_bias = interp_info.load_bias;
3619         info->entry = interp_info.entry;
3620         g_free(elf_interpreter);
3621     }
3622 
3623 #ifdef USE_ELF_CORE_DUMP
3624     bprm->core_dump = &elf_core_dump;
3625 #endif
3626 
3627     /*
3628      * If we reserved extra space for brk, release it now.
3629      * The implementation of do_brk in syscalls.c expects to be able
3630      * to mmap pages in this space.
3631      */
3632     if (info->reserve_brk) {
3633         abi_ulong start_brk = HOST_PAGE_ALIGN(info->brk);
3634         abi_ulong end_brk = HOST_PAGE_ALIGN(info->brk + info->reserve_brk);
3635         target_munmap(start_brk, end_brk - start_brk);
3636     }
3637 
3638     return 0;
3639 }
3640 
3641 #ifdef USE_ELF_CORE_DUMP
3642 /*
3643  * Definitions to generate Intel SVR4-like core files.
3644  * These mostly have the same names as the SVR4 types with "target_elf_"
3645  * tacked on the front to prevent clashes with linux definitions,
3646  * and the typedef forms have been avoided.  This is mostly like
3647  * the SVR4 structure, but more Linuxy, with things that Linux does
3648  * not support and which gdb doesn't really use excluded.
3649  *
3650  * Fields we don't dump (their contents is zero) in linux-user qemu
3651  * are marked with XXX.
3652  *
3653  * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
3654  *
3655  * Porting ELF coredump for target is (quite) simple process.  First you
3656  * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
3657  * the target resides):
3658  *
3659  * #define USE_ELF_CORE_DUMP
3660  *
3661  * Next you define type of register set used for dumping.  ELF specification
3662  * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
3663  *
3664  * typedef <target_regtype> target_elf_greg_t;
3665  * #define ELF_NREG <number of registers>
3666  * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
3667  *
3668  * Last step is to implement target specific function that copies registers
3669  * from given cpu into just specified register set.  Prototype is:
3670  *
3671  * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
3672  *                                const CPUArchState *env);
3673  *
3674  * Parameters:
3675  *     regs - copy register values into here (allocated and zeroed by caller)
3676  *     env - copy registers from here
3677  *
3678  * Example for ARM target is provided in this file.
3679  */
3680 
3681 /* An ELF note in memory */
3682 struct memelfnote {
3683     const char *name;
3684     size_t     namesz;
3685     size_t     namesz_rounded;
3686     int        type;
3687     size_t     datasz;
3688     size_t     datasz_rounded;
3689     void       *data;
3690     size_t     notesz;
3691 };
3692 
3693 struct target_elf_siginfo {
3694     abi_int    si_signo; /* signal number */
3695     abi_int    si_code;  /* extra code */
3696     abi_int    si_errno; /* errno */
3697 };
3698 
3699 struct target_elf_prstatus {
3700     struct target_elf_siginfo pr_info;      /* Info associated with signal */
3701     abi_short          pr_cursig;    /* Current signal */
3702     abi_ulong          pr_sigpend;   /* XXX */
3703     abi_ulong          pr_sighold;   /* XXX */
3704     target_pid_t       pr_pid;
3705     target_pid_t       pr_ppid;
3706     target_pid_t       pr_pgrp;
3707     target_pid_t       pr_sid;
3708     struct target_timeval pr_utime;  /* XXX User time */
3709     struct target_timeval pr_stime;  /* XXX System time */
3710     struct target_timeval pr_cutime; /* XXX Cumulative user time */
3711     struct target_timeval pr_cstime; /* XXX Cumulative system time */
3712     target_elf_gregset_t      pr_reg;       /* GP registers */
3713     abi_int            pr_fpvalid;   /* XXX */
3714 };
3715 
3716 #define ELF_PRARGSZ     (80) /* Number of chars for args */
3717 
3718 struct target_elf_prpsinfo {
3719     char         pr_state;       /* numeric process state */
3720     char         pr_sname;       /* char for pr_state */
3721     char         pr_zomb;        /* zombie */
3722     char         pr_nice;        /* nice val */
3723     abi_ulong    pr_flag;        /* flags */
3724     target_uid_t pr_uid;
3725     target_gid_t pr_gid;
3726     target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
3727     /* Lots missing */
3728     char    pr_fname[16] QEMU_NONSTRING; /* filename of executable */
3729     char    pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
3730 };
3731 
3732 /* Here is the structure in which status of each thread is captured. */
3733 struct elf_thread_status {
3734     QTAILQ_ENTRY(elf_thread_status)  ets_link;
3735     struct target_elf_prstatus prstatus;   /* NT_PRSTATUS */
3736 #if 0
3737     elf_fpregset_t fpu;             /* NT_PRFPREG */
3738     struct task_struct *thread;
3739     elf_fpxregset_t xfpu;           /* ELF_CORE_XFPREG_TYPE */
3740 #endif
3741     struct memelfnote notes[1];
3742     int num_notes;
3743 };
3744 
3745 struct elf_note_info {
3746     struct memelfnote   *notes;
3747     struct target_elf_prstatus *prstatus;  /* NT_PRSTATUS */
3748     struct target_elf_prpsinfo *psinfo;    /* NT_PRPSINFO */
3749 
3750     QTAILQ_HEAD(, elf_thread_status) thread_list;
3751 #if 0
3752     /*
3753      * Current version of ELF coredump doesn't support
3754      * dumping fp regs etc.
3755      */
3756     elf_fpregset_t *fpu;
3757     elf_fpxregset_t *xfpu;
3758     int thread_status_size;
3759 #endif
3760     int notes_size;
3761     int numnote;
3762 };
3763 
3764 struct vm_area_struct {
3765     target_ulong   vma_start;  /* start vaddr of memory region */
3766     target_ulong   vma_end;    /* end vaddr of memory region */
3767     abi_ulong      vma_flags;  /* protection etc. flags for the region */
3768     QTAILQ_ENTRY(vm_area_struct) vma_link;
3769 };
3770 
3771 struct mm_struct {
3772     QTAILQ_HEAD(, vm_area_struct) mm_mmap;
3773     int mm_count;           /* number of mappings */
3774 };
3775 
3776 static struct mm_struct *vma_init(void);
3777 static void vma_delete(struct mm_struct *);
3778 static int vma_add_mapping(struct mm_struct *, target_ulong,
3779                            target_ulong, abi_ulong);
3780 static int vma_get_mapping_count(const struct mm_struct *);
3781 static struct vm_area_struct *vma_first(const struct mm_struct *);
3782 static struct vm_area_struct *vma_next(struct vm_area_struct *);
3783 static abi_ulong vma_dump_size(const struct vm_area_struct *);
3784 static int vma_walker(void *priv, target_ulong start, target_ulong end,
3785                       unsigned long flags);
3786 
3787 static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
3788 static void fill_note(struct memelfnote *, const char *, int,
3789                       unsigned int, void *);
3790 static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
3791 static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
3792 static void fill_auxv_note(struct memelfnote *, const TaskState *);
3793 static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
3794 static size_t note_size(const struct memelfnote *);
3795 static void free_note_info(struct elf_note_info *);
3796 static int fill_note_info(struct elf_note_info *, long, const CPUArchState *);
3797 static void fill_thread_info(struct elf_note_info *, const CPUArchState *);
3798 
3799 static int dump_write(int, const void *, size_t);
3800 static int write_note(struct memelfnote *, int);
3801 static int write_note_info(struct elf_note_info *, int);
3802 
3803 #ifdef BSWAP_NEEDED
3804 static void bswap_prstatus(struct target_elf_prstatus *prstatus)
3805 {
3806     prstatus->pr_info.si_signo = tswap32(prstatus->pr_info.si_signo);
3807     prstatus->pr_info.si_code = tswap32(prstatus->pr_info.si_code);
3808     prstatus->pr_info.si_errno = tswap32(prstatus->pr_info.si_errno);
3809     prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
3810     prstatus->pr_sigpend = tswapal(prstatus->pr_sigpend);
3811     prstatus->pr_sighold = tswapal(prstatus->pr_sighold);
3812     prstatus->pr_pid = tswap32(prstatus->pr_pid);
3813     prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
3814     prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
3815     prstatus->pr_sid = tswap32(prstatus->pr_sid);
3816     /* cpu times are not filled, so we skip them */
3817     /* regs should be in correct format already */
3818     prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
3819 }
3820 
3821 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
3822 {
3823     psinfo->pr_flag = tswapal(psinfo->pr_flag);
3824     psinfo->pr_uid = tswap16(psinfo->pr_uid);
3825     psinfo->pr_gid = tswap16(psinfo->pr_gid);
3826     psinfo->pr_pid = tswap32(psinfo->pr_pid);
3827     psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
3828     psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
3829     psinfo->pr_sid = tswap32(psinfo->pr_sid);
3830 }
3831 
3832 static void bswap_note(struct elf_note *en)
3833 {
3834     bswap32s(&en->n_namesz);
3835     bswap32s(&en->n_descsz);
3836     bswap32s(&en->n_type);
3837 }
3838 #else
3839 static inline void bswap_prstatus(struct target_elf_prstatus *p) { }
3840 static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {}
3841 static inline void bswap_note(struct elf_note *en) { }
3842 #endif /* BSWAP_NEEDED */
3843 
3844 /*
3845  * Minimal support for linux memory regions.  These are needed
3846  * when we are finding out what memory exactly belongs to
3847  * emulated process.  No locks needed here, as long as
3848  * thread that received the signal is stopped.
3849  */
3850 
3851 static struct mm_struct *vma_init(void)
3852 {
3853     struct mm_struct *mm;
3854 
3855     if ((mm = g_malloc(sizeof (*mm))) == NULL)
3856         return (NULL);
3857 
3858     mm->mm_count = 0;
3859     QTAILQ_INIT(&mm->mm_mmap);
3860 
3861     return (mm);
3862 }
3863 
3864 static void vma_delete(struct mm_struct *mm)
3865 {
3866     struct vm_area_struct *vma;
3867 
3868     while ((vma = vma_first(mm)) != NULL) {
3869         QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
3870         g_free(vma);
3871     }
3872     g_free(mm);
3873 }
3874 
3875 static int vma_add_mapping(struct mm_struct *mm, target_ulong start,
3876                            target_ulong end, abi_ulong flags)
3877 {
3878     struct vm_area_struct *vma;
3879 
3880     if ((vma = g_malloc0(sizeof (*vma))) == NULL)
3881         return (-1);
3882 
3883     vma->vma_start = start;
3884     vma->vma_end = end;
3885     vma->vma_flags = flags;
3886 
3887     QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
3888     mm->mm_count++;
3889 
3890     return (0);
3891 }
3892 
3893 static struct vm_area_struct *vma_first(const struct mm_struct *mm)
3894 {
3895     return (QTAILQ_FIRST(&mm->mm_mmap));
3896 }
3897 
3898 static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
3899 {
3900     return (QTAILQ_NEXT(vma, vma_link));
3901 }
3902 
3903 static int vma_get_mapping_count(const struct mm_struct *mm)
3904 {
3905     return (mm->mm_count);
3906 }
3907 
3908 /*
3909  * Calculate file (dump) size of given memory region.
3910  */
3911 static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
3912 {
3913     /* if we cannot even read the first page, skip it */
3914     if (!access_ok_untagged(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
3915         return (0);
3916 
3917     /*
3918      * Usually we don't dump executable pages as they contain
3919      * non-writable code that debugger can read directly from
3920      * target library etc.  However, thread stacks are marked
3921      * also executable so we read in first page of given region
3922      * and check whether it contains elf header.  If there is
3923      * no elf header, we dump it.
3924      */
3925     if (vma->vma_flags & PROT_EXEC) {
3926         char page[TARGET_PAGE_SIZE];
3927 
3928         if (copy_from_user(page, vma->vma_start, sizeof (page))) {
3929             return 0;
3930         }
3931         if ((page[EI_MAG0] == ELFMAG0) &&
3932             (page[EI_MAG1] == ELFMAG1) &&
3933             (page[EI_MAG2] == ELFMAG2) &&
3934             (page[EI_MAG3] == ELFMAG3)) {
3935             /*
3936              * Mappings are possibly from ELF binary.  Don't dump
3937              * them.
3938              */
3939             return (0);
3940         }
3941     }
3942 
3943     return (vma->vma_end - vma->vma_start);
3944 }
3945 
3946 static int vma_walker(void *priv, target_ulong start, target_ulong end,
3947                       unsigned long flags)
3948 {
3949     struct mm_struct *mm = (struct mm_struct *)priv;
3950 
3951     vma_add_mapping(mm, start, end, flags);
3952     return (0);
3953 }
3954 
3955 static void fill_note(struct memelfnote *note, const char *name, int type,
3956                       unsigned int sz, void *data)
3957 {
3958     unsigned int namesz;
3959 
3960     namesz = strlen(name) + 1;
3961     note->name = name;
3962     note->namesz = namesz;
3963     note->namesz_rounded = roundup(namesz, sizeof (int32_t));
3964     note->type = type;
3965     note->datasz = sz;
3966     note->datasz_rounded = roundup(sz, sizeof (int32_t));
3967 
3968     note->data = data;
3969 
3970     /*
3971      * We calculate rounded up note size here as specified by
3972      * ELF document.
3973      */
3974     note->notesz = sizeof (struct elf_note) +
3975         note->namesz_rounded + note->datasz_rounded;
3976 }
3977 
3978 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
3979                             uint32_t flags)
3980 {
3981     (void) memset(elf, 0, sizeof(*elf));
3982 
3983     (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
3984     elf->e_ident[EI_CLASS] = ELF_CLASS;
3985     elf->e_ident[EI_DATA] = ELF_DATA;
3986     elf->e_ident[EI_VERSION] = EV_CURRENT;
3987     elf->e_ident[EI_OSABI] = ELF_OSABI;
3988 
3989     elf->e_type = ET_CORE;
3990     elf->e_machine = machine;
3991     elf->e_version = EV_CURRENT;
3992     elf->e_phoff = sizeof(struct elfhdr);
3993     elf->e_flags = flags;
3994     elf->e_ehsize = sizeof(struct elfhdr);
3995     elf->e_phentsize = sizeof(struct elf_phdr);
3996     elf->e_phnum = segs;
3997 
3998     bswap_ehdr(elf);
3999 }
4000 
4001 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
4002 {
4003     phdr->p_type = PT_NOTE;
4004     phdr->p_offset = offset;
4005     phdr->p_vaddr = 0;
4006     phdr->p_paddr = 0;
4007     phdr->p_filesz = sz;
4008     phdr->p_memsz = 0;
4009     phdr->p_flags = 0;
4010     phdr->p_align = 0;
4011 
4012     bswap_phdr(phdr, 1);
4013 }
4014 
4015 static size_t note_size(const struct memelfnote *note)
4016 {
4017     return (note->notesz);
4018 }
4019 
4020 static void fill_prstatus(struct target_elf_prstatus *prstatus,
4021                           const TaskState *ts, int signr)
4022 {
4023     (void) memset(prstatus, 0, sizeof (*prstatus));
4024     prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
4025     prstatus->pr_pid = ts->ts_tid;
4026     prstatus->pr_ppid = getppid();
4027     prstatus->pr_pgrp = getpgrp();
4028     prstatus->pr_sid = getsid(0);
4029 
4030     bswap_prstatus(prstatus);
4031 }
4032 
4033 static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
4034 {
4035     char *base_filename;
4036     unsigned int i, len;
4037 
4038     (void) memset(psinfo, 0, sizeof (*psinfo));
4039 
4040     len = ts->info->env_strings - ts->info->arg_strings;
4041     if (len >= ELF_PRARGSZ)
4042         len = ELF_PRARGSZ - 1;
4043     if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_strings, len)) {
4044         return -EFAULT;
4045     }
4046     for (i = 0; i < len; i++)
4047         if (psinfo->pr_psargs[i] == 0)
4048             psinfo->pr_psargs[i] = ' ';
4049     psinfo->pr_psargs[len] = 0;
4050 
4051     psinfo->pr_pid = getpid();
4052     psinfo->pr_ppid = getppid();
4053     psinfo->pr_pgrp = getpgrp();
4054     psinfo->pr_sid = getsid(0);
4055     psinfo->pr_uid = getuid();
4056     psinfo->pr_gid = getgid();
4057 
4058     base_filename = g_path_get_basename(ts->bprm->filename);
4059     /*
4060      * Using strncpy here is fine: at max-length,
4061      * this field is not NUL-terminated.
4062      */
4063     (void) strncpy(psinfo->pr_fname, base_filename,
4064                    sizeof(psinfo->pr_fname));
4065 
4066     g_free(base_filename);
4067     bswap_psinfo(psinfo);
4068     return (0);
4069 }
4070 
4071 static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
4072 {
4073     elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
4074     elf_addr_t orig_auxv = auxv;
4075     void *ptr;
4076     int len = ts->info->auxv_len;
4077 
4078     /*
4079      * Auxiliary vector is stored in target process stack.  It contains
4080      * {type, value} pairs that we need to dump into note.  This is not
4081      * strictly necessary but we do it here for sake of completeness.
4082      */
4083 
4084     /* read in whole auxv vector and copy it to memelfnote */
4085     ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
4086     if (ptr != NULL) {
4087         fill_note(note, "CORE", NT_AUXV, len, ptr);
4088         unlock_user(ptr, auxv, len);
4089     }
4090 }
4091 
4092 /*
4093  * Constructs name of coredump file.  We have following convention
4094  * for the name:
4095  *     qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
4096  *
4097  * Returns the filename
4098  */
4099 static char *core_dump_filename(const TaskState *ts)
4100 {
4101     g_autoptr(GDateTime) now = g_date_time_new_now_local();
4102     g_autofree char *nowstr = g_date_time_format(now, "%Y%m%d-%H%M%S");
4103     g_autofree char *base_filename = g_path_get_basename(ts->bprm->filename);
4104 
4105     return g_strdup_printf("qemu_%s_%s_%d.core",
4106                            base_filename, nowstr, (int)getpid());
4107 }
4108 
4109 static int dump_write(int fd, const void *ptr, size_t size)
4110 {
4111     const char *bufp = (const char *)ptr;
4112     ssize_t bytes_written, bytes_left;
4113     struct rlimit dumpsize;
4114     off_t pos;
4115 
4116     bytes_written = 0;
4117     getrlimit(RLIMIT_CORE, &dumpsize);
4118     if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
4119         if (errno == ESPIPE) { /* not a seekable stream */
4120             bytes_left = size;
4121         } else {
4122             return pos;
4123         }
4124     } else {
4125         if (dumpsize.rlim_cur <= pos) {
4126             return -1;
4127         } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
4128             bytes_left = size;
4129         } else {
4130             size_t limit_left=dumpsize.rlim_cur - pos;
4131             bytes_left = limit_left >= size ? size : limit_left ;
4132         }
4133     }
4134 
4135     /*
4136      * In normal conditions, single write(2) should do but
4137      * in case of socket etc. this mechanism is more portable.
4138      */
4139     do {
4140         bytes_written = write(fd, bufp, bytes_left);
4141         if (bytes_written < 0) {
4142             if (errno == EINTR)
4143                 continue;
4144             return (-1);
4145         } else if (bytes_written == 0) { /* eof */
4146             return (-1);
4147         }
4148         bufp += bytes_written;
4149         bytes_left -= bytes_written;
4150     } while (bytes_left > 0);
4151 
4152     return (0);
4153 }
4154 
4155 static int write_note(struct memelfnote *men, int fd)
4156 {
4157     struct elf_note en;
4158 
4159     en.n_namesz = men->namesz;
4160     en.n_type = men->type;
4161     en.n_descsz = men->datasz;
4162 
4163     bswap_note(&en);
4164 
4165     if (dump_write(fd, &en, sizeof(en)) != 0)
4166         return (-1);
4167     if (dump_write(fd, men->name, men->namesz_rounded) != 0)
4168         return (-1);
4169     if (dump_write(fd, men->data, men->datasz_rounded) != 0)
4170         return (-1);
4171 
4172     return (0);
4173 }
4174 
4175 static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env)
4176 {
4177     CPUState *cpu = env_cpu((CPUArchState *)env);
4178     TaskState *ts = (TaskState *)cpu->opaque;
4179     struct elf_thread_status *ets;
4180 
4181     ets = g_malloc0(sizeof (*ets));
4182     ets->num_notes = 1; /* only prstatus is dumped */
4183     fill_prstatus(&ets->prstatus, ts, 0);
4184     elf_core_copy_regs(&ets->prstatus.pr_reg, env);
4185     fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
4186               &ets->prstatus);
4187 
4188     QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
4189 
4190     info->notes_size += note_size(&ets->notes[0]);
4191 }
4192 
4193 static void init_note_info(struct elf_note_info *info)
4194 {
4195     /* Initialize the elf_note_info structure so that it is at
4196      * least safe to call free_note_info() on it. Must be
4197      * called before calling fill_note_info().
4198      */
4199     memset(info, 0, sizeof (*info));
4200     QTAILQ_INIT(&info->thread_list);
4201 }
4202 
4203 static int fill_note_info(struct elf_note_info *info,
4204                           long signr, const CPUArchState *env)
4205 {
4206 #define NUMNOTES 3
4207     CPUState *cpu = env_cpu((CPUArchState *)env);
4208     TaskState *ts = (TaskState *)cpu->opaque;
4209     int i;
4210 
4211     info->notes = g_new0(struct memelfnote, NUMNOTES);
4212     if (info->notes == NULL)
4213         return (-ENOMEM);
4214     info->prstatus = g_malloc0(sizeof (*info->prstatus));
4215     if (info->prstatus == NULL)
4216         return (-ENOMEM);
4217     info->psinfo = g_malloc0(sizeof (*info->psinfo));
4218     if (info->prstatus == NULL)
4219         return (-ENOMEM);
4220 
4221     /*
4222      * First fill in status (and registers) of current thread
4223      * including process info & aux vector.
4224      */
4225     fill_prstatus(info->prstatus, ts, signr);
4226     elf_core_copy_regs(&info->prstatus->pr_reg, env);
4227     fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
4228               sizeof (*info->prstatus), info->prstatus);
4229     fill_psinfo(info->psinfo, ts);
4230     fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
4231               sizeof (*info->psinfo), info->psinfo);
4232     fill_auxv_note(&info->notes[2], ts);
4233     info->numnote = 3;
4234 
4235     info->notes_size = 0;
4236     for (i = 0; i < info->numnote; i++)
4237         info->notes_size += note_size(&info->notes[i]);
4238 
4239     /* read and fill status of all threads */
4240     cpu_list_lock();
4241     CPU_FOREACH(cpu) {
4242         if (cpu == thread_cpu) {
4243             continue;
4244         }
4245         fill_thread_info(info, cpu->env_ptr);
4246     }
4247     cpu_list_unlock();
4248 
4249     return (0);
4250 }
4251 
4252 static void free_note_info(struct elf_note_info *info)
4253 {
4254     struct elf_thread_status *ets;
4255 
4256     while (!QTAILQ_EMPTY(&info->thread_list)) {
4257         ets = QTAILQ_FIRST(&info->thread_list);
4258         QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
4259         g_free(ets);
4260     }
4261 
4262     g_free(info->prstatus);
4263     g_free(info->psinfo);
4264     g_free(info->notes);
4265 }
4266 
4267 static int write_note_info(struct elf_note_info *info, int fd)
4268 {
4269     struct elf_thread_status *ets;
4270     int i, error = 0;
4271 
4272     /* write prstatus, psinfo and auxv for current thread */
4273     for (i = 0; i < info->numnote; i++)
4274         if ((error = write_note(&info->notes[i], fd)) != 0)
4275             return (error);
4276 
4277     /* write prstatus for each thread */
4278     QTAILQ_FOREACH(ets, &info->thread_list, ets_link) {
4279         if ((error = write_note(&ets->notes[0], fd)) != 0)
4280             return (error);
4281     }
4282 
4283     return (0);
4284 }
4285 
4286 /*
4287  * Write out ELF coredump.
4288  *
4289  * See documentation of ELF object file format in:
4290  * http://www.caldera.com/developers/devspecs/gabi41.pdf
4291  *
4292  * Coredump format in linux is following:
4293  *
4294  * 0   +----------------------+         \
4295  *     | ELF header           | ET_CORE  |
4296  *     +----------------------+          |
4297  *     | ELF program headers  |          |--- headers
4298  *     | - NOTE section       |          |
4299  *     | - PT_LOAD sections   |          |
4300  *     +----------------------+         /
4301  *     | NOTEs:               |
4302  *     | - NT_PRSTATUS        |
4303  *     | - NT_PRSINFO         |
4304  *     | - NT_AUXV            |
4305  *     +----------------------+ <-- aligned to target page
4306  *     | Process memory dump  |
4307  *     :                      :
4308  *     .                      .
4309  *     :                      :
4310  *     |                      |
4311  *     +----------------------+
4312  *
4313  * NT_PRSTATUS -> struct elf_prstatus (per thread)
4314  * NT_PRSINFO  -> struct elf_prpsinfo
4315  * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
4316  *
4317  * Format follows System V format as close as possible.  Current
4318  * version limitations are as follows:
4319  *     - no floating point registers are dumped
4320  *
4321  * Function returns 0 in case of success, negative errno otherwise.
4322  *
4323  * TODO: make this work also during runtime: it should be
4324  * possible to force coredump from running process and then
4325  * continue processing.  For example qemu could set up SIGUSR2
4326  * handler (provided that target process haven't registered
4327  * handler for that) that does the dump when signal is received.
4328  */
4329 static int elf_core_dump(int signr, const CPUArchState *env)
4330 {
4331     const CPUState *cpu = env_cpu((CPUArchState *)env);
4332     const TaskState *ts = (const TaskState *)cpu->opaque;
4333     struct vm_area_struct *vma = NULL;
4334     g_autofree char *corefile = NULL;
4335     struct elf_note_info info;
4336     struct elfhdr elf;
4337     struct elf_phdr phdr;
4338     struct rlimit dumpsize;
4339     struct mm_struct *mm = NULL;
4340     off_t offset = 0, data_offset = 0;
4341     int segs = 0;
4342     int fd = -1;
4343 
4344     init_note_info(&info);
4345 
4346     errno = 0;
4347     getrlimit(RLIMIT_CORE, &dumpsize);
4348     if (dumpsize.rlim_cur == 0)
4349         return 0;
4350 
4351     corefile = core_dump_filename(ts);
4352 
4353     if ((fd = open(corefile, O_WRONLY | O_CREAT,
4354                    S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
4355         return (-errno);
4356 
4357     /*
4358      * Walk through target process memory mappings and
4359      * set up structure containing this information.  After
4360      * this point vma_xxx functions can be used.
4361      */
4362     if ((mm = vma_init()) == NULL)
4363         goto out;
4364 
4365     walk_memory_regions(mm, vma_walker);
4366     segs = vma_get_mapping_count(mm);
4367 
4368     /*
4369      * Construct valid coredump ELF header.  We also
4370      * add one more segment for notes.
4371      */
4372     fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
4373     if (dump_write(fd, &elf, sizeof (elf)) != 0)
4374         goto out;
4375 
4376     /* fill in the in-memory version of notes */
4377     if (fill_note_info(&info, signr, env) < 0)
4378         goto out;
4379 
4380     offset += sizeof (elf);                             /* elf header */
4381     offset += (segs + 1) * sizeof (struct elf_phdr);    /* program headers */
4382 
4383     /* write out notes program header */
4384     fill_elf_note_phdr(&phdr, info.notes_size, offset);
4385 
4386     offset += info.notes_size;
4387     if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
4388         goto out;
4389 
4390     /*
4391      * ELF specification wants data to start at page boundary so
4392      * we align it here.
4393      */
4394     data_offset = offset = roundup(offset, ELF_EXEC_PAGESIZE);
4395 
4396     /*
4397      * Write program headers for memory regions mapped in
4398      * the target process.
4399      */
4400     for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
4401         (void) memset(&phdr, 0, sizeof (phdr));
4402 
4403         phdr.p_type = PT_LOAD;
4404         phdr.p_offset = offset;
4405         phdr.p_vaddr = vma->vma_start;
4406         phdr.p_paddr = 0;
4407         phdr.p_filesz = vma_dump_size(vma);
4408         offset += phdr.p_filesz;
4409         phdr.p_memsz = vma->vma_end - vma->vma_start;
4410         phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
4411         if (vma->vma_flags & PROT_WRITE)
4412             phdr.p_flags |= PF_W;
4413         if (vma->vma_flags & PROT_EXEC)
4414             phdr.p_flags |= PF_X;
4415         phdr.p_align = ELF_EXEC_PAGESIZE;
4416 
4417         bswap_phdr(&phdr, 1);
4418         if (dump_write(fd, &phdr, sizeof(phdr)) != 0) {
4419             goto out;
4420         }
4421     }
4422 
4423     /*
4424      * Next we write notes just after program headers.  No
4425      * alignment needed here.
4426      */
4427     if (write_note_info(&info, fd) < 0)
4428         goto out;
4429 
4430     /* align data to page boundary */
4431     if (lseek(fd, data_offset, SEEK_SET) != data_offset)
4432         goto out;
4433 
4434     /*
4435      * Finally we can dump process memory into corefile as well.
4436      */
4437     for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
4438         abi_ulong addr;
4439         abi_ulong end;
4440 
4441         end = vma->vma_start + vma_dump_size(vma);
4442 
4443         for (addr = vma->vma_start; addr < end;
4444              addr += TARGET_PAGE_SIZE) {
4445             char page[TARGET_PAGE_SIZE];
4446             int error;
4447 
4448             /*
4449              *  Read in page from target process memory and
4450              *  write it to coredump file.
4451              */
4452             error = copy_from_user(page, addr, sizeof (page));
4453             if (error != 0) {
4454                 (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
4455                                addr);
4456                 errno = -error;
4457                 goto out;
4458             }
4459             if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
4460                 goto out;
4461         }
4462     }
4463 
4464  out:
4465     free_note_info(&info);
4466     if (mm != NULL)
4467         vma_delete(mm);
4468     (void) close(fd);
4469 
4470     if (errno != 0)
4471         return (-errno);
4472     return (0);
4473 }
4474 #endif /* USE_ELF_CORE_DUMP */
4475 
4476 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
4477 {
4478     init_thread(regs, infop);
4479 }
4480