xref: /openbmc/qemu/linux-user/elfload.c (revision 01afdadc)
1 /* This is the Linux kernel elf-loading code, ported into user space */
2 #include <sys/time.h>
3 #include <sys/param.h>
4 
5 #include <stdio.h>
6 #include <sys/types.h>
7 #include <fcntl.h>
8 #include <errno.h>
9 #include <unistd.h>
10 #include <sys/mman.h>
11 #include <sys/resource.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <time.h>
15 
16 #include "qemu.h"
17 #include "disas.h"
18 
19 #ifdef _ARCH_PPC64
20 #undef ARCH_DLINFO
21 #undef ELF_PLATFORM
22 #undef ELF_HWCAP
23 #undef ELF_CLASS
24 #undef ELF_DATA
25 #undef ELF_ARCH
26 #endif
27 
28 #define ELF_OSABI   ELFOSABI_SYSV
29 
30 /* from personality.h */
31 
32 /*
33  * Flags for bug emulation.
34  *
35  * These occupy the top three bytes.
36  */
37 enum {
38     ADDR_NO_RANDOMIZE = 0x0040000,      /* disable randomization of VA space */
39     FDPIC_FUNCPTRS =    0x0080000,      /* userspace function ptrs point to
40                                            descriptors (signal handling) */
41     MMAP_PAGE_ZERO =    0x0100000,
42     ADDR_COMPAT_LAYOUT = 0x0200000,
43     READ_IMPLIES_EXEC = 0x0400000,
44     ADDR_LIMIT_32BIT =  0x0800000,
45     SHORT_INODE =       0x1000000,
46     WHOLE_SECONDS =     0x2000000,
47     STICKY_TIMEOUTS =   0x4000000,
48     ADDR_LIMIT_3GB =    0x8000000,
49 };
50 
51 /*
52  * Personality types.
53  *
54  * These go in the low byte.  Avoid using the top bit, it will
55  * conflict with error returns.
56  */
57 enum {
58     PER_LINUX =         0x0000,
59     PER_LINUX_32BIT =   0x0000 | ADDR_LIMIT_32BIT,
60     PER_LINUX_FDPIC =   0x0000 | FDPIC_FUNCPTRS,
61     PER_SVR4 =          0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
62     PER_SVR3 =          0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
63     PER_SCOSVR3 =       0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE,
64     PER_OSR5 =          0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
65     PER_WYSEV386 =      0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
66     PER_ISCR4 =         0x0005 | STICKY_TIMEOUTS,
67     PER_BSD =           0x0006,
68     PER_SUNOS =         0x0006 | STICKY_TIMEOUTS,
69     PER_XENIX =         0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
70     PER_LINUX32 =       0x0008,
71     PER_LINUX32_3GB =   0x0008 | ADDR_LIMIT_3GB,
72     PER_IRIX32 =        0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
73     PER_IRIXN32 =       0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
74     PER_IRIX64 =        0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
75     PER_RISCOS =        0x000c,
76     PER_SOLARIS =       0x000d | STICKY_TIMEOUTS,
77     PER_UW7 =           0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
78     PER_OSF4 =          0x000f,                  /* OSF/1 v4 */
79     PER_HPUX =          0x0010,
80     PER_MASK =          0x00ff,
81 };
82 
83 /*
84  * Return the base personality without flags.
85  */
86 #define personality(pers)       (pers & PER_MASK)
87 
88 /* this flag is uneffective under linux too, should be deleted */
89 #ifndef MAP_DENYWRITE
90 #define MAP_DENYWRITE 0
91 #endif
92 
93 /* should probably go in elf.h */
94 #ifndef ELIBBAD
95 #define ELIBBAD 80
96 #endif
97 
98 #ifdef TARGET_WORDS_BIGENDIAN
99 #define ELF_DATA        ELFDATA2MSB
100 #else
101 #define ELF_DATA        ELFDATA2LSB
102 #endif
103 
104 typedef target_ulong    target_elf_greg_t;
105 #ifdef USE_UID16
106 typedef target_ushort   target_uid_t;
107 typedef target_ushort   target_gid_t;
108 #else
109 typedef target_uint     target_uid_t;
110 typedef target_uint     target_gid_t;
111 #endif
112 typedef target_int      target_pid_t;
113 
114 #ifdef TARGET_I386
115 
116 #define ELF_PLATFORM get_elf_platform()
117 
118 static const char *get_elf_platform(void)
119 {
120     static char elf_platform[] = "i386";
121     int family = (thread_env->cpuid_version >> 8) & 0xff;
122     if (family > 6)
123         family = 6;
124     if (family >= 3)
125         elf_platform[1] = '0' + family;
126     return elf_platform;
127 }
128 
129 #define ELF_HWCAP get_elf_hwcap()
130 
131 static uint32_t get_elf_hwcap(void)
132 {
133     return thread_env->cpuid_features;
134 }
135 
136 #ifdef TARGET_X86_64
137 #define ELF_START_MMAP 0x2aaaaab000ULL
138 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
139 
140 #define ELF_CLASS      ELFCLASS64
141 #define ELF_ARCH       EM_X86_64
142 
143 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
144 {
145     regs->rax = 0;
146     regs->rsp = infop->start_stack;
147     regs->rip = infop->entry;
148 }
149 
150 #define ELF_NREG    27
151 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
152 
153 /*
154  * Note that ELF_NREG should be 29 as there should be place for
155  * TRAPNO and ERR "registers" as well but linux doesn't dump
156  * those.
157  *
158  * See linux kernel: arch/x86/include/asm/elf.h
159  */
160 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env)
161 {
162     (*regs)[0] = env->regs[15];
163     (*regs)[1] = env->regs[14];
164     (*regs)[2] = env->regs[13];
165     (*regs)[3] = env->regs[12];
166     (*regs)[4] = env->regs[R_EBP];
167     (*regs)[5] = env->regs[R_EBX];
168     (*regs)[6] = env->regs[11];
169     (*regs)[7] = env->regs[10];
170     (*regs)[8] = env->regs[9];
171     (*regs)[9] = env->regs[8];
172     (*regs)[10] = env->regs[R_EAX];
173     (*regs)[11] = env->regs[R_ECX];
174     (*regs)[12] = env->regs[R_EDX];
175     (*regs)[13] = env->regs[R_ESI];
176     (*regs)[14] = env->regs[R_EDI];
177     (*regs)[15] = env->regs[R_EAX]; /* XXX */
178     (*regs)[16] = env->eip;
179     (*regs)[17] = env->segs[R_CS].selector & 0xffff;
180     (*regs)[18] = env->eflags;
181     (*regs)[19] = env->regs[R_ESP];
182     (*regs)[20] = env->segs[R_SS].selector & 0xffff;
183     (*regs)[21] = env->segs[R_FS].selector & 0xffff;
184     (*regs)[22] = env->segs[R_GS].selector & 0xffff;
185     (*regs)[23] = env->segs[R_DS].selector & 0xffff;
186     (*regs)[24] = env->segs[R_ES].selector & 0xffff;
187     (*regs)[25] = env->segs[R_FS].selector & 0xffff;
188     (*regs)[26] = env->segs[R_GS].selector & 0xffff;
189 }
190 
191 #else
192 
193 #define ELF_START_MMAP 0x80000000
194 
195 /*
196  * This is used to ensure we don't load something for the wrong architecture.
197  */
198 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
199 
200 /*
201  * These are used to set parameters in the core dumps.
202  */
203 #define ELF_CLASS       ELFCLASS32
204 #define ELF_ARCH        EM_386
205 
206 static inline void init_thread(struct target_pt_regs *regs,
207                                struct image_info *infop)
208 {
209     regs->esp = infop->start_stack;
210     regs->eip = infop->entry;
211 
212     /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
213        starts %edx contains a pointer to a function which might be
214        registered using `atexit'.  This provides a mean for the
215        dynamic linker to call DT_FINI functions for shared libraries
216        that have been loaded before the code runs.
217 
218        A value of 0 tells we have no such handler.  */
219     regs->edx = 0;
220 }
221 
222 #define ELF_NREG    17
223 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
224 
225 /*
226  * Note that ELF_NREG should be 19 as there should be place for
227  * TRAPNO and ERR "registers" as well but linux doesn't dump
228  * those.
229  *
230  * See linux kernel: arch/x86/include/asm/elf.h
231  */
232 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env)
233 {
234     (*regs)[0] = env->regs[R_EBX];
235     (*regs)[1] = env->regs[R_ECX];
236     (*regs)[2] = env->regs[R_EDX];
237     (*regs)[3] = env->regs[R_ESI];
238     (*regs)[4] = env->regs[R_EDI];
239     (*regs)[5] = env->regs[R_EBP];
240     (*regs)[6] = env->regs[R_EAX];
241     (*regs)[7] = env->segs[R_DS].selector & 0xffff;
242     (*regs)[8] = env->segs[R_ES].selector & 0xffff;
243     (*regs)[9] = env->segs[R_FS].selector & 0xffff;
244     (*regs)[10] = env->segs[R_GS].selector & 0xffff;
245     (*regs)[11] = env->regs[R_EAX]; /* XXX */
246     (*regs)[12] = env->eip;
247     (*regs)[13] = env->segs[R_CS].selector & 0xffff;
248     (*regs)[14] = env->eflags;
249     (*regs)[15] = env->regs[R_ESP];
250     (*regs)[16] = env->segs[R_SS].selector & 0xffff;
251 }
252 #endif
253 
254 #define USE_ELF_CORE_DUMP
255 #define ELF_EXEC_PAGESIZE       4096
256 
257 #endif
258 
259 #ifdef TARGET_ARM
260 
261 #define ELF_START_MMAP 0x80000000
262 
263 #define elf_check_arch(x) ( (x) == EM_ARM )
264 
265 #define ELF_CLASS       ELFCLASS32
266 #define ELF_ARCH        EM_ARM
267 
268 static inline void init_thread(struct target_pt_regs *regs,
269                                struct image_info *infop)
270 {
271     abi_long stack = infop->start_stack;
272     memset(regs, 0, sizeof(*regs));
273     regs->ARM_cpsr = 0x10;
274     if (infop->entry & 1)
275         regs->ARM_cpsr |= CPSR_T;
276     regs->ARM_pc = infop->entry & 0xfffffffe;
277     regs->ARM_sp = infop->start_stack;
278     /* FIXME - what to for failure of get_user()? */
279     get_user_ual(regs->ARM_r2, stack + 8); /* envp */
280     get_user_ual(regs->ARM_r1, stack + 4); /* envp */
281     /* XXX: it seems that r0 is zeroed after ! */
282     regs->ARM_r0 = 0;
283     /* For uClinux PIC binaries.  */
284     /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
285     regs->ARM_r10 = infop->start_data;
286 }
287 
288 #define ELF_NREG    18
289 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
290 
291 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUARMState *env)
292 {
293     (*regs)[0] = tswapl(env->regs[0]);
294     (*regs)[1] = tswapl(env->regs[1]);
295     (*regs)[2] = tswapl(env->regs[2]);
296     (*regs)[3] = tswapl(env->regs[3]);
297     (*regs)[4] = tswapl(env->regs[4]);
298     (*regs)[5] = tswapl(env->regs[5]);
299     (*regs)[6] = tswapl(env->regs[6]);
300     (*regs)[7] = tswapl(env->regs[7]);
301     (*regs)[8] = tswapl(env->regs[8]);
302     (*regs)[9] = tswapl(env->regs[9]);
303     (*regs)[10] = tswapl(env->regs[10]);
304     (*regs)[11] = tswapl(env->regs[11]);
305     (*regs)[12] = tswapl(env->regs[12]);
306     (*regs)[13] = tswapl(env->regs[13]);
307     (*regs)[14] = tswapl(env->regs[14]);
308     (*regs)[15] = tswapl(env->regs[15]);
309 
310     (*regs)[16] = tswapl(cpsr_read((CPUARMState *)env));
311     (*regs)[17] = tswapl(env->regs[0]); /* XXX */
312 }
313 
314 #define USE_ELF_CORE_DUMP
315 #define ELF_EXEC_PAGESIZE       4096
316 
317 enum
318 {
319     ARM_HWCAP_ARM_SWP       = 1 << 0,
320     ARM_HWCAP_ARM_HALF      = 1 << 1,
321     ARM_HWCAP_ARM_THUMB     = 1 << 2,
322     ARM_HWCAP_ARM_26BIT     = 1 << 3,
323     ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
324     ARM_HWCAP_ARM_FPA       = 1 << 5,
325     ARM_HWCAP_ARM_VFP       = 1 << 6,
326     ARM_HWCAP_ARM_EDSP      = 1 << 7,
327     ARM_HWCAP_ARM_JAVA      = 1 << 8,
328     ARM_HWCAP_ARM_IWMMXT    = 1 << 9,
329     ARM_HWCAP_ARM_THUMBEE   = 1 << 10,
330     ARM_HWCAP_ARM_NEON      = 1 << 11,
331     ARM_HWCAP_ARM_VFPv3     = 1 << 12,
332     ARM_HWCAP_ARM_VFPv3D16  = 1 << 13,
333 };
334 
335 #define TARGET_HAS_GUEST_VALIDATE_BASE
336 /* We want the opportunity to check the suggested base */
337 bool guest_validate_base(unsigned long guest_base)
338 {
339     unsigned long real_start, test_page_addr;
340 
341     /* We need to check that we can force a fault on access to the
342      * commpage at 0xffff0fxx
343      */
344     test_page_addr = guest_base + (0xffff0f00 & qemu_host_page_mask);
345     /* Note it needs to be writeable to let us initialise it */
346     real_start = (unsigned long)
347                  mmap((void *)test_page_addr, qemu_host_page_size,
348                      PROT_READ | PROT_WRITE,
349                      MAP_ANONYMOUS | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
350 
351     /* If we can't map it then try another address */
352     if (real_start == -1ul) {
353         return 0;
354     }
355 
356     if (real_start != test_page_addr) {
357         /* OS didn't put the page where we asked - unmap and reject */
358         munmap((void *)real_start, qemu_host_page_size);
359         return 0;
360     }
361 
362     /* Leave the page mapped
363      * Populate it (mmap should have left it all 0'd)
364      */
365 
366     /* Kernel helper versions */
367     __put_user(5, (uint32_t *)g2h(0xffff0ffcul));
368 
369     /* Now it's populated make it RO */
370     if (mprotect((void *)test_page_addr, qemu_host_page_size, PROT_READ)) {
371         perror("Protecting guest commpage");
372         exit(-1);
373     }
374 
375     return 1; /* All good */
376 }
377 
378 
379 #define ELF_HWCAP get_elf_hwcap()
380 
381 static uint32_t get_elf_hwcap(void)
382 {
383     CPUARMState *e = thread_env;
384     uint32_t hwcaps = 0;
385 
386     hwcaps |= ARM_HWCAP_ARM_SWP;
387     hwcaps |= ARM_HWCAP_ARM_HALF;
388     hwcaps |= ARM_HWCAP_ARM_THUMB;
389     hwcaps |= ARM_HWCAP_ARM_FAST_MULT;
390     hwcaps |= ARM_HWCAP_ARM_FPA;
391 
392     /* probe for the extra features */
393 #define GET_FEATURE(feat, hwcap) \
394     do {if (arm_feature(e, feat)) { hwcaps |= hwcap; } } while (0)
395     GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP);
396     GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT);
397     GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE);
398     GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON);
399     GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3);
400     GET_FEATURE(ARM_FEATURE_VFP_FP16, ARM_HWCAP_ARM_VFPv3D16);
401 #undef GET_FEATURE
402 
403     return hwcaps;
404 }
405 
406 #endif
407 
408 #ifdef TARGET_UNICORE32
409 
410 #define ELF_START_MMAP          0x80000000
411 
412 #define elf_check_arch(x)       ((x) == EM_UNICORE32)
413 
414 #define ELF_CLASS               ELFCLASS32
415 #define ELF_DATA                ELFDATA2LSB
416 #define ELF_ARCH                EM_UNICORE32
417 
418 static inline void init_thread(struct target_pt_regs *regs,
419         struct image_info *infop)
420 {
421     abi_long stack = infop->start_stack;
422     memset(regs, 0, sizeof(*regs));
423     regs->UC32_REG_asr = 0x10;
424     regs->UC32_REG_pc = infop->entry & 0xfffffffe;
425     regs->UC32_REG_sp = infop->start_stack;
426     /* FIXME - what to for failure of get_user()? */
427     get_user_ual(regs->UC32_REG_02, stack + 8); /* envp */
428     get_user_ual(regs->UC32_REG_01, stack + 4); /* envp */
429     /* XXX: it seems that r0 is zeroed after ! */
430     regs->UC32_REG_00 = 0;
431 }
432 
433 #define ELF_NREG    34
434 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
435 
436 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUUniCore32State *env)
437 {
438     (*regs)[0] = env->regs[0];
439     (*regs)[1] = env->regs[1];
440     (*regs)[2] = env->regs[2];
441     (*regs)[3] = env->regs[3];
442     (*regs)[4] = env->regs[4];
443     (*regs)[5] = env->regs[5];
444     (*regs)[6] = env->regs[6];
445     (*regs)[7] = env->regs[7];
446     (*regs)[8] = env->regs[8];
447     (*regs)[9] = env->regs[9];
448     (*regs)[10] = env->regs[10];
449     (*regs)[11] = env->regs[11];
450     (*regs)[12] = env->regs[12];
451     (*regs)[13] = env->regs[13];
452     (*regs)[14] = env->regs[14];
453     (*regs)[15] = env->regs[15];
454     (*regs)[16] = env->regs[16];
455     (*regs)[17] = env->regs[17];
456     (*regs)[18] = env->regs[18];
457     (*regs)[19] = env->regs[19];
458     (*regs)[20] = env->regs[20];
459     (*regs)[21] = env->regs[21];
460     (*regs)[22] = env->regs[22];
461     (*regs)[23] = env->regs[23];
462     (*regs)[24] = env->regs[24];
463     (*regs)[25] = env->regs[25];
464     (*regs)[26] = env->regs[26];
465     (*regs)[27] = env->regs[27];
466     (*regs)[28] = env->regs[28];
467     (*regs)[29] = env->regs[29];
468     (*regs)[30] = env->regs[30];
469     (*regs)[31] = env->regs[31];
470 
471     (*regs)[32] = cpu_asr_read((CPUUniCore32State *)env);
472     (*regs)[33] = env->regs[0]; /* XXX */
473 }
474 
475 #define USE_ELF_CORE_DUMP
476 #define ELF_EXEC_PAGESIZE               4096
477 
478 #define ELF_HWCAP                       (UC32_HWCAP_CMOV | UC32_HWCAP_UCF64)
479 
480 #endif
481 
482 #ifdef TARGET_SPARC
483 #ifdef TARGET_SPARC64
484 
485 #define ELF_START_MMAP 0x80000000
486 #define ELF_HWCAP  (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
487                     | HWCAP_SPARC_MULDIV | HWCAP_SPARC_V9)
488 #ifndef TARGET_ABI32
489 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
490 #else
491 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
492 #endif
493 
494 #define ELF_CLASS   ELFCLASS64
495 #define ELF_ARCH    EM_SPARCV9
496 
497 #define STACK_BIAS              2047
498 
499 static inline void init_thread(struct target_pt_regs *regs,
500                                struct image_info *infop)
501 {
502 #ifndef TARGET_ABI32
503     regs->tstate = 0;
504 #endif
505     regs->pc = infop->entry;
506     regs->npc = regs->pc + 4;
507     regs->y = 0;
508 #ifdef TARGET_ABI32
509     regs->u_regs[14] = infop->start_stack - 16 * 4;
510 #else
511     if (personality(infop->personality) == PER_LINUX32)
512         regs->u_regs[14] = infop->start_stack - 16 * 4;
513     else
514         regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
515 #endif
516 }
517 
518 #else
519 #define ELF_START_MMAP 0x80000000
520 #define ELF_HWCAP  (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
521                     | HWCAP_SPARC_MULDIV)
522 #define elf_check_arch(x) ( (x) == EM_SPARC )
523 
524 #define ELF_CLASS   ELFCLASS32
525 #define ELF_ARCH    EM_SPARC
526 
527 static inline void init_thread(struct target_pt_regs *regs,
528                                struct image_info *infop)
529 {
530     regs->psr = 0;
531     regs->pc = infop->entry;
532     regs->npc = regs->pc + 4;
533     regs->y = 0;
534     regs->u_regs[14] = infop->start_stack - 16 * 4;
535 }
536 
537 #endif
538 #endif
539 
540 #ifdef TARGET_PPC
541 
542 #define ELF_START_MMAP 0x80000000
543 
544 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
545 
546 #define elf_check_arch(x) ( (x) == EM_PPC64 )
547 
548 #define ELF_CLASS       ELFCLASS64
549 
550 #else
551 
552 #define elf_check_arch(x) ( (x) == EM_PPC )
553 
554 #define ELF_CLASS       ELFCLASS32
555 
556 #endif
557 
558 #define ELF_ARCH        EM_PPC
559 
560 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
561    See arch/powerpc/include/asm/cputable.h.  */
562 enum {
563     QEMU_PPC_FEATURE_32 = 0x80000000,
564     QEMU_PPC_FEATURE_64 = 0x40000000,
565     QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
566     QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
567     QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
568     QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
569     QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
570     QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
571     QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
572     QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
573     QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
574     QEMU_PPC_FEATURE_NO_TB = 0x00100000,
575     QEMU_PPC_FEATURE_POWER4 = 0x00080000,
576     QEMU_PPC_FEATURE_POWER5 = 0x00040000,
577     QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
578     QEMU_PPC_FEATURE_CELL = 0x00010000,
579     QEMU_PPC_FEATURE_BOOKE = 0x00008000,
580     QEMU_PPC_FEATURE_SMT = 0x00004000,
581     QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
582     QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
583     QEMU_PPC_FEATURE_PA6T = 0x00000800,
584     QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
585     QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
586     QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
587     QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
588     QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
589 
590     QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
591     QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
592 };
593 
594 #define ELF_HWCAP get_elf_hwcap()
595 
596 static uint32_t get_elf_hwcap(void)
597 {
598     CPUPPCState *e = thread_env;
599     uint32_t features = 0;
600 
601     /* We don't have to be terribly complete here; the high points are
602        Altivec/FP/SPE support.  Anything else is just a bonus.  */
603 #define GET_FEATURE(flag, feature)                                      \
604     do {if (e->insns_flags & flag) features |= feature; } while(0)
605     GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
606     GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
607     GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
608     GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
609     GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
610     GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
611     GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
612     GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
613 #undef GET_FEATURE
614 
615     return features;
616 }
617 
618 /*
619  * The requirements here are:
620  * - keep the final alignment of sp (sp & 0xf)
621  * - make sure the 32-bit value at the first 16 byte aligned position of
622  *   AUXV is greater than 16 for glibc compatibility.
623  *   AT_IGNOREPPC is used for that.
624  * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
625  *   even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
626  */
627 #define DLINFO_ARCH_ITEMS       5
628 #define ARCH_DLINFO                                     \
629     do {                                                \
630         NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20);              \
631         NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20);              \
632         NEW_AUX_ENT(AT_UCACHEBSIZE, 0);                 \
633         /*                                              \
634          * Now handle glibc compatibility.              \
635          */                                             \
636         NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);        \
637         NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);        \
638     } while (0)
639 
640 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
641 {
642     _regs->gpr[1] = infop->start_stack;
643 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
644     _regs->gpr[2] = ldq_raw(infop->entry + 8) + infop->load_bias;
645     infop->entry = ldq_raw(infop->entry) + infop->load_bias;
646 #endif
647     _regs->nip = infop->entry;
648 }
649 
650 /* See linux kernel: arch/powerpc/include/asm/elf.h.  */
651 #define ELF_NREG 48
652 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
653 
654 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *env)
655 {
656     int i;
657     target_ulong ccr = 0;
658 
659     for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
660         (*regs)[i] = tswapl(env->gpr[i]);
661     }
662 
663     (*regs)[32] = tswapl(env->nip);
664     (*regs)[33] = tswapl(env->msr);
665     (*regs)[35] = tswapl(env->ctr);
666     (*regs)[36] = tswapl(env->lr);
667     (*regs)[37] = tswapl(env->xer);
668 
669     for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
670         ccr |= env->crf[i] << (32 - ((i + 1) * 4));
671     }
672     (*regs)[38] = tswapl(ccr);
673 }
674 
675 #define USE_ELF_CORE_DUMP
676 #define ELF_EXEC_PAGESIZE       4096
677 
678 #endif
679 
680 #ifdef TARGET_MIPS
681 
682 #define ELF_START_MMAP 0x80000000
683 
684 #define elf_check_arch(x) ( (x) == EM_MIPS )
685 
686 #ifdef TARGET_MIPS64
687 #define ELF_CLASS   ELFCLASS64
688 #else
689 #define ELF_CLASS   ELFCLASS32
690 #endif
691 #define ELF_ARCH    EM_MIPS
692 
693 static inline void init_thread(struct target_pt_regs *regs,
694                                struct image_info *infop)
695 {
696     regs->cp0_status = 2 << CP0St_KSU;
697     regs->cp0_epc = infop->entry;
698     regs->regs[29] = infop->start_stack;
699 }
700 
701 /* See linux kernel: arch/mips/include/asm/elf.h.  */
702 #define ELF_NREG 45
703 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
704 
705 /* See linux kernel: arch/mips/include/asm/reg.h.  */
706 enum {
707 #ifdef TARGET_MIPS64
708     TARGET_EF_R0 = 0,
709 #else
710     TARGET_EF_R0 = 6,
711 #endif
712     TARGET_EF_R26 = TARGET_EF_R0 + 26,
713     TARGET_EF_R27 = TARGET_EF_R0 + 27,
714     TARGET_EF_LO = TARGET_EF_R0 + 32,
715     TARGET_EF_HI = TARGET_EF_R0 + 33,
716     TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34,
717     TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35,
718     TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36,
719     TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37
720 };
721 
722 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
723 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMIPSState *env)
724 {
725     int i;
726 
727     for (i = 0; i < TARGET_EF_R0; i++) {
728         (*regs)[i] = 0;
729     }
730     (*regs)[TARGET_EF_R0] = 0;
731 
732     for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) {
733         (*regs)[TARGET_EF_R0 + i] = tswapl(env->active_tc.gpr[i]);
734     }
735 
736     (*regs)[TARGET_EF_R26] = 0;
737     (*regs)[TARGET_EF_R27] = 0;
738     (*regs)[TARGET_EF_LO] = tswapl(env->active_tc.LO[0]);
739     (*regs)[TARGET_EF_HI] = tswapl(env->active_tc.HI[0]);
740     (*regs)[TARGET_EF_CP0_EPC] = tswapl(env->active_tc.PC);
741     (*regs)[TARGET_EF_CP0_BADVADDR] = tswapl(env->CP0_BadVAddr);
742     (*regs)[TARGET_EF_CP0_STATUS] = tswapl(env->CP0_Status);
743     (*regs)[TARGET_EF_CP0_CAUSE] = tswapl(env->CP0_Cause);
744 }
745 
746 #define USE_ELF_CORE_DUMP
747 #define ELF_EXEC_PAGESIZE        4096
748 
749 #endif /* TARGET_MIPS */
750 
751 #ifdef TARGET_MICROBLAZE
752 
753 #define ELF_START_MMAP 0x80000000
754 
755 #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD)
756 
757 #define ELF_CLASS   ELFCLASS32
758 #define ELF_ARCH    EM_MICROBLAZE
759 
760 static inline void init_thread(struct target_pt_regs *regs,
761                                struct image_info *infop)
762 {
763     regs->pc = infop->entry;
764     regs->r1 = infop->start_stack;
765 
766 }
767 
768 #define ELF_EXEC_PAGESIZE        4096
769 
770 #define USE_ELF_CORE_DUMP
771 #define ELF_NREG 38
772 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
773 
774 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
775 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env)
776 {
777     int i, pos = 0;
778 
779     for (i = 0; i < 32; i++) {
780         (*regs)[pos++] = tswapl(env->regs[i]);
781     }
782 
783     for (i = 0; i < 6; i++) {
784         (*regs)[pos++] = tswapl(env->sregs[i]);
785     }
786 }
787 
788 #endif /* TARGET_MICROBLAZE */
789 
790 #ifdef TARGET_OPENRISC
791 
792 #define ELF_START_MMAP 0x08000000
793 
794 #define elf_check_arch(x) ((x) == EM_OPENRISC)
795 
796 #define ELF_ARCH EM_OPENRISC
797 #define ELF_CLASS ELFCLASS32
798 #define ELF_DATA  ELFDATA2MSB
799 
800 static inline void init_thread(struct target_pt_regs *regs,
801                                struct image_info *infop)
802 {
803     regs->pc = infop->entry;
804     regs->gpr[1] = infop->start_stack;
805 }
806 
807 #define USE_ELF_CORE_DUMP
808 #define ELF_EXEC_PAGESIZE 8192
809 
810 /* See linux kernel arch/openrisc/include/asm/elf.h.  */
811 #define ELF_NREG 34 /* gprs and pc, sr */
812 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
813 
814 static void elf_core_copy_regs(target_elf_gregset_t *regs,
815                                const CPUOpenRISCState *env)
816 {
817     int i;
818 
819     for (i = 0; i < 32; i++) {
820         (*regs)[i] = tswapl(env->gpr[i]);
821     }
822 
823     (*regs)[32] = tswapl(env->pc);
824     (*regs)[33] = tswapl(env->sr);
825 }
826 #define ELF_HWCAP 0
827 #define ELF_PLATFORM NULL
828 
829 #endif /* TARGET_OPENRISC */
830 
831 #ifdef TARGET_SH4
832 
833 #define ELF_START_MMAP 0x80000000
834 
835 #define elf_check_arch(x) ( (x) == EM_SH )
836 
837 #define ELF_CLASS ELFCLASS32
838 #define ELF_ARCH  EM_SH
839 
840 static inline void init_thread(struct target_pt_regs *regs,
841                                struct image_info *infop)
842 {
843     /* Check other registers XXXXX */
844     regs->pc = infop->entry;
845     regs->regs[15] = infop->start_stack;
846 }
847 
848 /* See linux kernel: arch/sh/include/asm/elf.h.  */
849 #define ELF_NREG 23
850 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
851 
852 /* See linux kernel: arch/sh/include/asm/ptrace.h.  */
853 enum {
854     TARGET_REG_PC = 16,
855     TARGET_REG_PR = 17,
856     TARGET_REG_SR = 18,
857     TARGET_REG_GBR = 19,
858     TARGET_REG_MACH = 20,
859     TARGET_REG_MACL = 21,
860     TARGET_REG_SYSCALL = 22
861 };
862 
863 static inline void elf_core_copy_regs(target_elf_gregset_t *regs,
864                                       const CPUSH4State *env)
865 {
866     int i;
867 
868     for (i = 0; i < 16; i++) {
869         (*regs[i]) = tswapl(env->gregs[i]);
870     }
871 
872     (*regs)[TARGET_REG_PC] = tswapl(env->pc);
873     (*regs)[TARGET_REG_PR] = tswapl(env->pr);
874     (*regs)[TARGET_REG_SR] = tswapl(env->sr);
875     (*regs)[TARGET_REG_GBR] = tswapl(env->gbr);
876     (*regs)[TARGET_REG_MACH] = tswapl(env->mach);
877     (*regs)[TARGET_REG_MACL] = tswapl(env->macl);
878     (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */
879 }
880 
881 #define USE_ELF_CORE_DUMP
882 #define ELF_EXEC_PAGESIZE        4096
883 
884 #endif
885 
886 #ifdef TARGET_CRIS
887 
888 #define ELF_START_MMAP 0x80000000
889 
890 #define elf_check_arch(x) ( (x) == EM_CRIS )
891 
892 #define ELF_CLASS ELFCLASS32
893 #define ELF_ARCH  EM_CRIS
894 
895 static inline void init_thread(struct target_pt_regs *regs,
896                                struct image_info *infop)
897 {
898     regs->erp = infop->entry;
899 }
900 
901 #define ELF_EXEC_PAGESIZE        8192
902 
903 #endif
904 
905 #ifdef TARGET_M68K
906 
907 #define ELF_START_MMAP 0x80000000
908 
909 #define elf_check_arch(x) ( (x) == EM_68K )
910 
911 #define ELF_CLASS       ELFCLASS32
912 #define ELF_ARCH        EM_68K
913 
914 /* ??? Does this need to do anything?
915    #define ELF_PLAT_INIT(_r) */
916 
917 static inline void init_thread(struct target_pt_regs *regs,
918                                struct image_info *infop)
919 {
920     regs->usp = infop->start_stack;
921     regs->sr = 0;
922     regs->pc = infop->entry;
923 }
924 
925 /* See linux kernel: arch/m68k/include/asm/elf.h.  */
926 #define ELF_NREG 20
927 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
928 
929 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUM68KState *env)
930 {
931     (*regs)[0] = tswapl(env->dregs[1]);
932     (*regs)[1] = tswapl(env->dregs[2]);
933     (*regs)[2] = tswapl(env->dregs[3]);
934     (*regs)[3] = tswapl(env->dregs[4]);
935     (*regs)[4] = tswapl(env->dregs[5]);
936     (*regs)[5] = tswapl(env->dregs[6]);
937     (*regs)[6] = tswapl(env->dregs[7]);
938     (*regs)[7] = tswapl(env->aregs[0]);
939     (*regs)[8] = tswapl(env->aregs[1]);
940     (*regs)[9] = tswapl(env->aregs[2]);
941     (*regs)[10] = tswapl(env->aregs[3]);
942     (*regs)[11] = tswapl(env->aregs[4]);
943     (*regs)[12] = tswapl(env->aregs[5]);
944     (*regs)[13] = tswapl(env->aregs[6]);
945     (*regs)[14] = tswapl(env->dregs[0]);
946     (*regs)[15] = tswapl(env->aregs[7]);
947     (*regs)[16] = tswapl(env->dregs[0]); /* FIXME: orig_d0 */
948     (*regs)[17] = tswapl(env->sr);
949     (*regs)[18] = tswapl(env->pc);
950     (*regs)[19] = 0;  /* FIXME: regs->format | regs->vector */
951 }
952 
953 #define USE_ELF_CORE_DUMP
954 #define ELF_EXEC_PAGESIZE       8192
955 
956 #endif
957 
958 #ifdef TARGET_ALPHA
959 
960 #define ELF_START_MMAP (0x30000000000ULL)
961 
962 #define elf_check_arch(x) ( (x) == ELF_ARCH )
963 
964 #define ELF_CLASS      ELFCLASS64
965 #define ELF_ARCH       EM_ALPHA
966 
967 static inline void init_thread(struct target_pt_regs *regs,
968                                struct image_info *infop)
969 {
970     regs->pc = infop->entry;
971     regs->ps = 8;
972     regs->usp = infop->start_stack;
973 }
974 
975 #define ELF_EXEC_PAGESIZE        8192
976 
977 #endif /* TARGET_ALPHA */
978 
979 #ifdef TARGET_S390X
980 
981 #define ELF_START_MMAP (0x20000000000ULL)
982 
983 #define elf_check_arch(x) ( (x) == ELF_ARCH )
984 
985 #define ELF_CLASS	ELFCLASS64
986 #define ELF_DATA	ELFDATA2MSB
987 #define ELF_ARCH	EM_S390
988 
989 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
990 {
991     regs->psw.addr = infop->entry;
992     regs->psw.mask = PSW_MASK_64 | PSW_MASK_32;
993     regs->gprs[15] = infop->start_stack;
994 }
995 
996 #endif /* TARGET_S390X */
997 
998 #ifndef ELF_PLATFORM
999 #define ELF_PLATFORM (NULL)
1000 #endif
1001 
1002 #ifndef ELF_HWCAP
1003 #define ELF_HWCAP 0
1004 #endif
1005 
1006 #ifdef TARGET_ABI32
1007 #undef ELF_CLASS
1008 #define ELF_CLASS ELFCLASS32
1009 #undef bswaptls
1010 #define bswaptls(ptr) bswap32s(ptr)
1011 #endif
1012 
1013 #include "elf.h"
1014 
1015 struct exec
1016 {
1017     unsigned int a_info;   /* Use macros N_MAGIC, etc for access */
1018     unsigned int a_text;   /* length of text, in bytes */
1019     unsigned int a_data;   /* length of data, in bytes */
1020     unsigned int a_bss;    /* length of uninitialized data area, in bytes */
1021     unsigned int a_syms;   /* length of symbol table data in file, in bytes */
1022     unsigned int a_entry;  /* start address */
1023     unsigned int a_trsize; /* length of relocation info for text, in bytes */
1024     unsigned int a_drsize; /* length of relocation info for data, in bytes */
1025 };
1026 
1027 
1028 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
1029 #define OMAGIC 0407
1030 #define NMAGIC 0410
1031 #define ZMAGIC 0413
1032 #define QMAGIC 0314
1033 
1034 /* Necessary parameters */
1035 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
1036 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
1037 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
1038 
1039 #define DLINFO_ITEMS 13
1040 
1041 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
1042 {
1043     memcpy(to, from, n);
1044 }
1045 
1046 #ifdef BSWAP_NEEDED
1047 static void bswap_ehdr(struct elfhdr *ehdr)
1048 {
1049     bswap16s(&ehdr->e_type);            /* Object file type */
1050     bswap16s(&ehdr->e_machine);         /* Architecture */
1051     bswap32s(&ehdr->e_version);         /* Object file version */
1052     bswaptls(&ehdr->e_entry);           /* Entry point virtual address */
1053     bswaptls(&ehdr->e_phoff);           /* Program header table file offset */
1054     bswaptls(&ehdr->e_shoff);           /* Section header table file offset */
1055     bswap32s(&ehdr->e_flags);           /* Processor-specific flags */
1056     bswap16s(&ehdr->e_ehsize);          /* ELF header size in bytes */
1057     bswap16s(&ehdr->e_phentsize);       /* Program header table entry size */
1058     bswap16s(&ehdr->e_phnum);           /* Program header table entry count */
1059     bswap16s(&ehdr->e_shentsize);       /* Section header table entry size */
1060     bswap16s(&ehdr->e_shnum);           /* Section header table entry count */
1061     bswap16s(&ehdr->e_shstrndx);        /* Section header string table index */
1062 }
1063 
1064 static void bswap_phdr(struct elf_phdr *phdr, int phnum)
1065 {
1066     int i;
1067     for (i = 0; i < phnum; ++i, ++phdr) {
1068         bswap32s(&phdr->p_type);        /* Segment type */
1069         bswap32s(&phdr->p_flags);       /* Segment flags */
1070         bswaptls(&phdr->p_offset);      /* Segment file offset */
1071         bswaptls(&phdr->p_vaddr);       /* Segment virtual address */
1072         bswaptls(&phdr->p_paddr);       /* Segment physical address */
1073         bswaptls(&phdr->p_filesz);      /* Segment size in file */
1074         bswaptls(&phdr->p_memsz);       /* Segment size in memory */
1075         bswaptls(&phdr->p_align);       /* Segment alignment */
1076     }
1077 }
1078 
1079 static void bswap_shdr(struct elf_shdr *shdr, int shnum)
1080 {
1081     int i;
1082     for (i = 0; i < shnum; ++i, ++shdr) {
1083         bswap32s(&shdr->sh_name);
1084         bswap32s(&shdr->sh_type);
1085         bswaptls(&shdr->sh_flags);
1086         bswaptls(&shdr->sh_addr);
1087         bswaptls(&shdr->sh_offset);
1088         bswaptls(&shdr->sh_size);
1089         bswap32s(&shdr->sh_link);
1090         bswap32s(&shdr->sh_info);
1091         bswaptls(&shdr->sh_addralign);
1092         bswaptls(&shdr->sh_entsize);
1093     }
1094 }
1095 
1096 static void bswap_sym(struct elf_sym *sym)
1097 {
1098     bswap32s(&sym->st_name);
1099     bswaptls(&sym->st_value);
1100     bswaptls(&sym->st_size);
1101     bswap16s(&sym->st_shndx);
1102 }
1103 #else
1104 static inline void bswap_ehdr(struct elfhdr *ehdr) { }
1105 static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { }
1106 static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { }
1107 static inline void bswap_sym(struct elf_sym *sym) { }
1108 #endif
1109 
1110 #ifdef USE_ELF_CORE_DUMP
1111 static int elf_core_dump(int, const CPUArchState *);
1112 #endif /* USE_ELF_CORE_DUMP */
1113 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias);
1114 
1115 /* Verify the portions of EHDR within E_IDENT for the target.
1116    This can be performed before bswapping the entire header.  */
1117 static bool elf_check_ident(struct elfhdr *ehdr)
1118 {
1119     return (ehdr->e_ident[EI_MAG0] == ELFMAG0
1120             && ehdr->e_ident[EI_MAG1] == ELFMAG1
1121             && ehdr->e_ident[EI_MAG2] == ELFMAG2
1122             && ehdr->e_ident[EI_MAG3] == ELFMAG3
1123             && ehdr->e_ident[EI_CLASS] == ELF_CLASS
1124             && ehdr->e_ident[EI_DATA] == ELF_DATA
1125             && ehdr->e_ident[EI_VERSION] == EV_CURRENT);
1126 }
1127 
1128 /* Verify the portions of EHDR outside of E_IDENT for the target.
1129    This has to wait until after bswapping the header.  */
1130 static bool elf_check_ehdr(struct elfhdr *ehdr)
1131 {
1132     return (elf_check_arch(ehdr->e_machine)
1133             && ehdr->e_ehsize == sizeof(struct elfhdr)
1134             && ehdr->e_phentsize == sizeof(struct elf_phdr)
1135             && ehdr->e_shentsize == sizeof(struct elf_shdr)
1136             && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN));
1137 }
1138 
1139 /*
1140  * 'copy_elf_strings()' copies argument/envelope strings from user
1141  * memory to free pages in kernel mem. These are in a format ready
1142  * to be put directly into the top of new user memory.
1143  *
1144  */
1145 static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
1146                                   abi_ulong p)
1147 {
1148     char *tmp, *tmp1, *pag = NULL;
1149     int len, offset = 0;
1150 
1151     if (!p) {
1152         return 0;       /* bullet-proofing */
1153     }
1154     while (argc-- > 0) {
1155         tmp = argv[argc];
1156         if (!tmp) {
1157             fprintf(stderr, "VFS: argc is wrong");
1158             exit(-1);
1159         }
1160         tmp1 = tmp;
1161         while (*tmp++);
1162         len = tmp - tmp1;
1163         if (p < len) {  /* this shouldn't happen - 128kB */
1164             return 0;
1165         }
1166         while (len) {
1167             --p; --tmp; --len;
1168             if (--offset < 0) {
1169                 offset = p % TARGET_PAGE_SIZE;
1170                 pag = (char *)page[p/TARGET_PAGE_SIZE];
1171                 if (!pag) {
1172                     pag = g_try_malloc0(TARGET_PAGE_SIZE);
1173                     page[p/TARGET_PAGE_SIZE] = pag;
1174                     if (!pag)
1175                         return 0;
1176                 }
1177             }
1178             if (len == 0 || offset == 0) {
1179                 *(pag + offset) = *tmp;
1180             }
1181             else {
1182                 int bytes_to_copy = (len > offset) ? offset : len;
1183                 tmp -= bytes_to_copy;
1184                 p -= bytes_to_copy;
1185                 offset -= bytes_to_copy;
1186                 len -= bytes_to_copy;
1187                 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
1188             }
1189         }
1190     }
1191     return p;
1192 }
1193 
1194 static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
1195                                  struct image_info *info)
1196 {
1197     abi_ulong stack_base, size, error, guard;
1198     int i;
1199 
1200     /* Create enough stack to hold everything.  If we don't use
1201        it for args, we'll use it for something else.  */
1202     size = guest_stack_size;
1203     if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE) {
1204         size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1205     }
1206     guard = TARGET_PAGE_SIZE;
1207     if (guard < qemu_real_host_page_size) {
1208         guard = qemu_real_host_page_size;
1209     }
1210 
1211     error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE,
1212                         MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1213     if (error == -1) {
1214         perror("mmap stack");
1215         exit(-1);
1216     }
1217 
1218     /* We reserve one extra page at the top of the stack as guard.  */
1219     target_mprotect(error, guard, PROT_NONE);
1220 
1221     info->stack_limit = error + guard;
1222     stack_base = info->stack_limit + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1223     p += stack_base;
1224 
1225     for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
1226         if (bprm->page[i]) {
1227             info->rss++;
1228             /* FIXME - check return value of memcpy_to_target() for failure */
1229             memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
1230             g_free(bprm->page[i]);
1231         }
1232         stack_base += TARGET_PAGE_SIZE;
1233     }
1234     return p;
1235 }
1236 
1237 /* Map and zero the bss.  We need to explicitly zero any fractional pages
1238    after the data section (i.e. bss).  */
1239 static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
1240 {
1241     uintptr_t host_start, host_map_start, host_end;
1242 
1243     last_bss = TARGET_PAGE_ALIGN(last_bss);
1244 
1245     /* ??? There is confusion between qemu_real_host_page_size and
1246        qemu_host_page_size here and elsewhere in target_mmap, which
1247        may lead to the end of the data section mapping from the file
1248        not being mapped.  At least there was an explicit test and
1249        comment for that here, suggesting that "the file size must
1250        be known".  The comment probably pre-dates the introduction
1251        of the fstat system call in target_mmap which does in fact
1252        find out the size.  What isn't clear is if the workaround
1253        here is still actually needed.  For now, continue with it,
1254        but merge it with the "normal" mmap that would allocate the bss.  */
1255 
1256     host_start = (uintptr_t) g2h(elf_bss);
1257     host_end = (uintptr_t) g2h(last_bss);
1258     host_map_start = (host_start + qemu_real_host_page_size - 1);
1259     host_map_start &= -qemu_real_host_page_size;
1260 
1261     if (host_map_start < host_end) {
1262         void *p = mmap((void *)host_map_start, host_end - host_map_start,
1263                        prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1264         if (p == MAP_FAILED) {
1265             perror("cannot mmap brk");
1266             exit(-1);
1267         }
1268 
1269         /* Since we didn't use target_mmap, make sure to record
1270            the validity of the pages with qemu.  */
1271         page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot|PAGE_VALID);
1272     }
1273 
1274     if (host_start < host_map_start) {
1275         memset((void *)host_start, 0, host_map_start - host_start);
1276     }
1277 }
1278 
1279 #ifdef CONFIG_USE_FDPIC
1280 static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp)
1281 {
1282     uint16_t n;
1283     struct elf32_fdpic_loadseg *loadsegs = info->loadsegs;
1284 
1285     /* elf32_fdpic_loadseg */
1286     n = info->nsegs;
1287     while (n--) {
1288         sp -= 12;
1289         put_user_u32(loadsegs[n].addr, sp+0);
1290         put_user_u32(loadsegs[n].p_vaddr, sp+4);
1291         put_user_u32(loadsegs[n].p_memsz, sp+8);
1292     }
1293 
1294     /* elf32_fdpic_loadmap */
1295     sp -= 4;
1296     put_user_u16(0, sp+0); /* version */
1297     put_user_u16(info->nsegs, sp+2); /* nsegs */
1298 
1299     info->personality = PER_LINUX_FDPIC;
1300     info->loadmap_addr = sp;
1301 
1302     return sp;
1303 }
1304 #endif
1305 
1306 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
1307                                    struct elfhdr *exec,
1308                                    struct image_info *info,
1309                                    struct image_info *interp_info)
1310 {
1311     abi_ulong sp;
1312     abi_ulong sp_auxv;
1313     int size;
1314     int i;
1315     abi_ulong u_rand_bytes;
1316     uint8_t k_rand_bytes[16];
1317     abi_ulong u_platform;
1318     const char *k_platform;
1319     const int n = sizeof(elf_addr_t);
1320 
1321     sp = p;
1322 
1323 #ifdef CONFIG_USE_FDPIC
1324     /* Needs to be before we load the env/argc/... */
1325     if (elf_is_fdpic(exec)) {
1326         /* Need 4 byte alignment for these structs */
1327         sp &= ~3;
1328         sp = loader_build_fdpic_loadmap(info, sp);
1329         info->other_info = interp_info;
1330         if (interp_info) {
1331             interp_info->other_info = info;
1332             sp = loader_build_fdpic_loadmap(interp_info, sp);
1333         }
1334     }
1335 #endif
1336 
1337     u_platform = 0;
1338     k_platform = ELF_PLATFORM;
1339     if (k_platform) {
1340         size_t len = strlen(k_platform) + 1;
1341         sp -= (len + n - 1) & ~(n - 1);
1342         u_platform = sp;
1343         /* FIXME - check return value of memcpy_to_target() for failure */
1344         memcpy_to_target(sp, k_platform, len);
1345     }
1346 
1347     /*
1348      * Generate 16 random bytes for userspace PRNG seeding (not
1349      * cryptically secure but it's not the aim of QEMU).
1350      */
1351     srand((unsigned int) time(NULL));
1352     for (i = 0; i < 16; i++) {
1353         k_rand_bytes[i] = rand();
1354     }
1355     sp -= 16;
1356     u_rand_bytes = sp;
1357     /* FIXME - check return value of memcpy_to_target() for failure */
1358     memcpy_to_target(sp, k_rand_bytes, 16);
1359 
1360     /*
1361      * Force 16 byte _final_ alignment here for generality.
1362      */
1363     sp = sp &~ (abi_ulong)15;
1364     size = (DLINFO_ITEMS + 1) * 2;
1365     if (k_platform)
1366         size += 2;
1367 #ifdef DLINFO_ARCH_ITEMS
1368     size += DLINFO_ARCH_ITEMS * 2;
1369 #endif
1370     size += envc + argc + 2;
1371     size += 1;  /* argc itself */
1372     size *= n;
1373     if (size & 15)
1374         sp -= 16 - (size & 15);
1375 
1376     /* This is correct because Linux defines
1377      * elf_addr_t as Elf32_Off / Elf64_Off
1378      */
1379 #define NEW_AUX_ENT(id, val) do {               \
1380         sp -= n; put_user_ual(val, sp);         \
1381         sp -= n; put_user_ual(id, sp);          \
1382     } while(0)
1383 
1384     sp_auxv = sp;
1385     NEW_AUX_ENT (AT_NULL, 0);
1386 
1387     /* There must be exactly DLINFO_ITEMS entries here.  */
1388     NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff));
1389     NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
1390     NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
1391     NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
1392     NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0));
1393     NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
1394     NEW_AUX_ENT(AT_ENTRY, info->entry);
1395     NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
1396     NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
1397     NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
1398     NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
1399     NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
1400     NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
1401     NEW_AUX_ENT(AT_RANDOM, (abi_ulong) u_rand_bytes);
1402 
1403     if (k_platform)
1404         NEW_AUX_ENT(AT_PLATFORM, u_platform);
1405 #ifdef ARCH_DLINFO
1406     /*
1407      * ARCH_DLINFO must come last so platform specific code can enforce
1408      * special alignment requirements on the AUXV if necessary (eg. PPC).
1409      */
1410     ARCH_DLINFO;
1411 #endif
1412 #undef NEW_AUX_ENT
1413 
1414     info->saved_auxv = sp;
1415     info->auxv_len = sp_auxv - sp;
1416 
1417     sp = loader_build_argptr(envc, argc, sp, p, 0);
1418     return sp;
1419 }
1420 
1421 #ifndef TARGET_HAS_GUEST_VALIDATE_BASE
1422 /* If the guest doesn't have a validation function just agree */
1423 bool guest_validate_base(unsigned long guest_base)
1424 {
1425     return 1;
1426 }
1427 #endif
1428 
1429 static void probe_guest_base(const char *image_name,
1430                              abi_ulong loaddr, abi_ulong hiaddr)
1431 {
1432     /* Probe for a suitable guest base address, if the user has not set
1433      * it explicitly, and set guest_base appropriately.
1434      * In case of error we will print a suitable message and exit.
1435      */
1436 #if defined(CONFIG_USE_GUEST_BASE)
1437     const char *errmsg;
1438     if (!have_guest_base && !reserved_va) {
1439         unsigned long host_start, real_start, host_size;
1440 
1441         /* Round addresses to page boundaries.  */
1442         loaddr &= qemu_host_page_mask;
1443         hiaddr = HOST_PAGE_ALIGN(hiaddr);
1444 
1445         if (loaddr < mmap_min_addr) {
1446             host_start = HOST_PAGE_ALIGN(mmap_min_addr);
1447         } else {
1448             host_start = loaddr;
1449             if (host_start != loaddr) {
1450                 errmsg = "Address overflow loading ELF binary";
1451                 goto exit_errmsg;
1452             }
1453         }
1454         host_size = hiaddr - loaddr;
1455         while (1) {
1456             /* Do not use mmap_find_vma here because that is limited to the
1457                guest address space.  We are going to make the
1458                guest address space fit whatever we're given.  */
1459             real_start = (unsigned long)
1460                 mmap((void *)host_start, host_size, PROT_NONE,
1461                      MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
1462             if (real_start == (unsigned long)-1) {
1463                 goto exit_perror;
1464             }
1465             guest_base = real_start - loaddr;
1466             if ((real_start == host_start) &&
1467                 guest_validate_base(guest_base)) {
1468                 break;
1469             }
1470             /* That address didn't work.  Unmap and try a different one.
1471                The address the host picked because is typically right at
1472                the top of the host address space and leaves the guest with
1473                no usable address space.  Resort to a linear search.  We
1474                already compensated for mmap_min_addr, so this should not
1475                happen often.  Probably means we got unlucky and host
1476                address space randomization put a shared library somewhere
1477                inconvenient.  */
1478             munmap((void *)real_start, host_size);
1479             host_start += qemu_host_page_size;
1480             if (host_start == loaddr) {
1481                 /* Theoretically possible if host doesn't have any suitably
1482                    aligned areas.  Normally the first mmap will fail.  */
1483                 errmsg = "Unable to find space for application";
1484                 goto exit_errmsg;
1485             }
1486         }
1487         qemu_log("Relocating guest address space from 0x"
1488                  TARGET_ABI_FMT_lx " to 0x%lx\n",
1489                  loaddr, real_start);
1490     }
1491     return;
1492 
1493 exit_perror:
1494     errmsg = strerror(errno);
1495 exit_errmsg:
1496     fprintf(stderr, "%s: %s\n", image_name, errmsg);
1497     exit(-1);
1498 #endif
1499 }
1500 
1501 
1502 /* Load an ELF image into the address space.
1503 
1504    IMAGE_NAME is the filename of the image, to use in error messages.
1505    IMAGE_FD is the open file descriptor for the image.
1506 
1507    BPRM_BUF is a copy of the beginning of the file; this of course
1508    contains the elf file header at offset 0.  It is assumed that this
1509    buffer is sufficiently aligned to present no problems to the host
1510    in accessing data at aligned offsets within the buffer.
1511 
1512    On return: INFO values will be filled in, as necessary or available.  */
1513 
1514 static void load_elf_image(const char *image_name, int image_fd,
1515                            struct image_info *info, char **pinterp_name,
1516                            char bprm_buf[BPRM_BUF_SIZE])
1517 {
1518     struct elfhdr *ehdr = (struct elfhdr *)bprm_buf;
1519     struct elf_phdr *phdr;
1520     abi_ulong load_addr, load_bias, loaddr, hiaddr, error;
1521     int i, retval;
1522     const char *errmsg;
1523 
1524     /* First of all, some simple consistency checks */
1525     errmsg = "Invalid ELF image for this architecture";
1526     if (!elf_check_ident(ehdr)) {
1527         goto exit_errmsg;
1528     }
1529     bswap_ehdr(ehdr);
1530     if (!elf_check_ehdr(ehdr)) {
1531         goto exit_errmsg;
1532     }
1533 
1534     i = ehdr->e_phnum * sizeof(struct elf_phdr);
1535     if (ehdr->e_phoff + i <= BPRM_BUF_SIZE) {
1536         phdr = (struct elf_phdr *)(bprm_buf + ehdr->e_phoff);
1537     } else {
1538         phdr = (struct elf_phdr *) alloca(i);
1539         retval = pread(image_fd, phdr, i, ehdr->e_phoff);
1540         if (retval != i) {
1541             goto exit_read;
1542         }
1543     }
1544     bswap_phdr(phdr, ehdr->e_phnum);
1545 
1546 #ifdef CONFIG_USE_FDPIC
1547     info->nsegs = 0;
1548     info->pt_dynamic_addr = 0;
1549 #endif
1550 
1551     /* Find the maximum size of the image and allocate an appropriate
1552        amount of memory to handle that.  */
1553     loaddr = -1, hiaddr = 0;
1554     for (i = 0; i < ehdr->e_phnum; ++i) {
1555         if (phdr[i].p_type == PT_LOAD) {
1556             abi_ulong a = phdr[i].p_vaddr;
1557             if (a < loaddr) {
1558                 loaddr = a;
1559             }
1560             a += phdr[i].p_memsz;
1561             if (a > hiaddr) {
1562                 hiaddr = a;
1563             }
1564 #ifdef CONFIG_USE_FDPIC
1565             ++info->nsegs;
1566 #endif
1567         }
1568     }
1569 
1570     load_addr = loaddr;
1571     if (ehdr->e_type == ET_DYN) {
1572         /* The image indicates that it can be loaded anywhere.  Find a
1573            location that can hold the memory space required.  If the
1574            image is pre-linked, LOADDR will be non-zero.  Since we do
1575            not supply MAP_FIXED here we'll use that address if and
1576            only if it remains available.  */
1577         load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE,
1578                                 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
1579                                 -1, 0);
1580         if (load_addr == -1) {
1581             goto exit_perror;
1582         }
1583     } else if (pinterp_name != NULL) {
1584         /* This is the main executable.  Make sure that the low
1585            address does not conflict with MMAP_MIN_ADDR or the
1586            QEMU application itself.  */
1587         probe_guest_base(image_name, loaddr, hiaddr);
1588     }
1589     load_bias = load_addr - loaddr;
1590 
1591 #ifdef CONFIG_USE_FDPIC
1592     {
1593         struct elf32_fdpic_loadseg *loadsegs = info->loadsegs =
1594             g_malloc(sizeof(*loadsegs) * info->nsegs);
1595 
1596         for (i = 0; i < ehdr->e_phnum; ++i) {
1597             switch (phdr[i].p_type) {
1598             case PT_DYNAMIC:
1599                 info->pt_dynamic_addr = phdr[i].p_vaddr + load_bias;
1600                 break;
1601             case PT_LOAD:
1602                 loadsegs->addr = phdr[i].p_vaddr + load_bias;
1603                 loadsegs->p_vaddr = phdr[i].p_vaddr;
1604                 loadsegs->p_memsz = phdr[i].p_memsz;
1605                 ++loadsegs;
1606                 break;
1607             }
1608         }
1609     }
1610 #endif
1611 
1612     info->load_bias = load_bias;
1613     info->load_addr = load_addr;
1614     info->entry = ehdr->e_entry + load_bias;
1615     info->start_code = -1;
1616     info->end_code = 0;
1617     info->start_data = -1;
1618     info->end_data = 0;
1619     info->brk = 0;
1620     info->elf_flags = ehdr->e_flags;
1621 
1622     for (i = 0; i < ehdr->e_phnum; i++) {
1623         struct elf_phdr *eppnt = phdr + i;
1624         if (eppnt->p_type == PT_LOAD) {
1625             abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em;
1626             int elf_prot = 0;
1627 
1628             if (eppnt->p_flags & PF_R) elf_prot =  PROT_READ;
1629             if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1630             if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1631 
1632             vaddr = load_bias + eppnt->p_vaddr;
1633             vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr);
1634             vaddr_ps = TARGET_ELF_PAGESTART(vaddr);
1635 
1636             error = target_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po,
1637                                 elf_prot, MAP_PRIVATE | MAP_FIXED,
1638                                 image_fd, eppnt->p_offset - vaddr_po);
1639             if (error == -1) {
1640                 goto exit_perror;
1641             }
1642 
1643             vaddr_ef = vaddr + eppnt->p_filesz;
1644             vaddr_em = vaddr + eppnt->p_memsz;
1645 
1646             /* If the load segment requests extra zeros (e.g. bss), map it.  */
1647             if (vaddr_ef < vaddr_em) {
1648                 zero_bss(vaddr_ef, vaddr_em, elf_prot);
1649             }
1650 
1651             /* Find the full program boundaries.  */
1652             if (elf_prot & PROT_EXEC) {
1653                 if (vaddr < info->start_code) {
1654                     info->start_code = vaddr;
1655                 }
1656                 if (vaddr_ef > info->end_code) {
1657                     info->end_code = vaddr_ef;
1658                 }
1659             }
1660             if (elf_prot & PROT_WRITE) {
1661                 if (vaddr < info->start_data) {
1662                     info->start_data = vaddr;
1663                 }
1664                 if (vaddr_ef > info->end_data) {
1665                     info->end_data = vaddr_ef;
1666                 }
1667                 if (vaddr_em > info->brk) {
1668                     info->brk = vaddr_em;
1669                 }
1670             }
1671         } else if (eppnt->p_type == PT_INTERP && pinterp_name) {
1672             char *interp_name;
1673 
1674             if (*pinterp_name) {
1675                 errmsg = "Multiple PT_INTERP entries";
1676                 goto exit_errmsg;
1677             }
1678             interp_name = malloc(eppnt->p_filesz);
1679             if (!interp_name) {
1680                 goto exit_perror;
1681             }
1682 
1683             if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) {
1684                 memcpy(interp_name, bprm_buf + eppnt->p_offset,
1685                        eppnt->p_filesz);
1686             } else {
1687                 retval = pread(image_fd, interp_name, eppnt->p_filesz,
1688                                eppnt->p_offset);
1689                 if (retval != eppnt->p_filesz) {
1690                     goto exit_perror;
1691                 }
1692             }
1693             if (interp_name[eppnt->p_filesz - 1] != 0) {
1694                 errmsg = "Invalid PT_INTERP entry";
1695                 goto exit_errmsg;
1696             }
1697             *pinterp_name = interp_name;
1698         }
1699     }
1700 
1701     if (info->end_data == 0) {
1702         info->start_data = info->end_code;
1703         info->end_data = info->end_code;
1704         info->brk = info->end_code;
1705     }
1706 
1707     if (qemu_log_enabled()) {
1708         load_symbols(ehdr, image_fd, load_bias);
1709     }
1710 
1711     close(image_fd);
1712     return;
1713 
1714  exit_read:
1715     if (retval >= 0) {
1716         errmsg = "Incomplete read of file header";
1717         goto exit_errmsg;
1718     }
1719  exit_perror:
1720     errmsg = strerror(errno);
1721  exit_errmsg:
1722     fprintf(stderr, "%s: %s\n", image_name, errmsg);
1723     exit(-1);
1724 }
1725 
1726 static void load_elf_interp(const char *filename, struct image_info *info,
1727                             char bprm_buf[BPRM_BUF_SIZE])
1728 {
1729     int fd, retval;
1730 
1731     fd = open(path(filename), O_RDONLY);
1732     if (fd < 0) {
1733         goto exit_perror;
1734     }
1735 
1736     retval = read(fd, bprm_buf, BPRM_BUF_SIZE);
1737     if (retval < 0) {
1738         goto exit_perror;
1739     }
1740     if (retval < BPRM_BUF_SIZE) {
1741         memset(bprm_buf + retval, 0, BPRM_BUF_SIZE - retval);
1742     }
1743 
1744     load_elf_image(filename, fd, info, NULL, bprm_buf);
1745     return;
1746 
1747  exit_perror:
1748     fprintf(stderr, "%s: %s\n", filename, strerror(errno));
1749     exit(-1);
1750 }
1751 
1752 static int symfind(const void *s0, const void *s1)
1753 {
1754     target_ulong addr = *(target_ulong *)s0;
1755     struct elf_sym *sym = (struct elf_sym *)s1;
1756     int result = 0;
1757     if (addr < sym->st_value) {
1758         result = -1;
1759     } else if (addr >= sym->st_value + sym->st_size) {
1760         result = 1;
1761     }
1762     return result;
1763 }
1764 
1765 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
1766 {
1767 #if ELF_CLASS == ELFCLASS32
1768     struct elf_sym *syms = s->disas_symtab.elf32;
1769 #else
1770     struct elf_sym *syms = s->disas_symtab.elf64;
1771 #endif
1772 
1773     // binary search
1774     struct elf_sym *sym;
1775 
1776     sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind);
1777     if (sym != NULL) {
1778         return s->disas_strtab + sym->st_name;
1779     }
1780 
1781     return "";
1782 }
1783 
1784 /* FIXME: This should use elf_ops.h  */
1785 static int symcmp(const void *s0, const void *s1)
1786 {
1787     struct elf_sym *sym0 = (struct elf_sym *)s0;
1788     struct elf_sym *sym1 = (struct elf_sym *)s1;
1789     return (sym0->st_value < sym1->st_value)
1790         ? -1
1791         : ((sym0->st_value > sym1->st_value) ? 1 : 0);
1792 }
1793 
1794 /* Best attempt to load symbols from this ELF object. */
1795 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias)
1796 {
1797     int i, shnum, nsyms, sym_idx = 0, str_idx = 0;
1798     struct elf_shdr *shdr;
1799     char *strings = NULL;
1800     struct syminfo *s = NULL;
1801     struct elf_sym *new_syms, *syms = NULL;
1802 
1803     shnum = hdr->e_shnum;
1804     i = shnum * sizeof(struct elf_shdr);
1805     shdr = (struct elf_shdr *)alloca(i);
1806     if (pread(fd, shdr, i, hdr->e_shoff) != i) {
1807         return;
1808     }
1809 
1810     bswap_shdr(shdr, shnum);
1811     for (i = 0; i < shnum; ++i) {
1812         if (shdr[i].sh_type == SHT_SYMTAB) {
1813             sym_idx = i;
1814             str_idx = shdr[i].sh_link;
1815             goto found;
1816         }
1817     }
1818 
1819     /* There will be no symbol table if the file was stripped.  */
1820     return;
1821 
1822  found:
1823     /* Now know where the strtab and symtab are.  Snarf them.  */
1824     s = malloc(sizeof(*s));
1825     if (!s) {
1826         goto give_up;
1827     }
1828 
1829     i = shdr[str_idx].sh_size;
1830     s->disas_strtab = strings = malloc(i);
1831     if (!strings || pread(fd, strings, i, shdr[str_idx].sh_offset) != i) {
1832         goto give_up;
1833     }
1834 
1835     i = shdr[sym_idx].sh_size;
1836     syms = malloc(i);
1837     if (!syms || pread(fd, syms, i, shdr[sym_idx].sh_offset) != i) {
1838         goto give_up;
1839     }
1840 
1841     nsyms = i / sizeof(struct elf_sym);
1842     for (i = 0; i < nsyms; ) {
1843         bswap_sym(syms + i);
1844         /* Throw away entries which we do not need.  */
1845         if (syms[i].st_shndx == SHN_UNDEF
1846             || syms[i].st_shndx >= SHN_LORESERVE
1847             || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
1848             if (i < --nsyms) {
1849                 syms[i] = syms[nsyms];
1850             }
1851         } else {
1852 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1853             /* The bottom address bit marks a Thumb or MIPS16 symbol.  */
1854             syms[i].st_value &= ~(target_ulong)1;
1855 #endif
1856             syms[i].st_value += load_bias;
1857             i++;
1858         }
1859     }
1860 
1861     /* No "useful" symbol.  */
1862     if (nsyms == 0) {
1863         goto give_up;
1864     }
1865 
1866     /* Attempt to free the storage associated with the local symbols
1867        that we threw away.  Whether or not this has any effect on the
1868        memory allocation depends on the malloc implementation and how
1869        many symbols we managed to discard.  */
1870     new_syms = realloc(syms, nsyms * sizeof(*syms));
1871     if (new_syms == NULL) {
1872         goto give_up;
1873     }
1874     syms = new_syms;
1875 
1876     qsort(syms, nsyms, sizeof(*syms), symcmp);
1877 
1878     s->disas_num_syms = nsyms;
1879 #if ELF_CLASS == ELFCLASS32
1880     s->disas_symtab.elf32 = syms;
1881 #else
1882     s->disas_symtab.elf64 = syms;
1883 #endif
1884     s->lookup_symbol = lookup_symbolxx;
1885     s->next = syminfos;
1886     syminfos = s;
1887 
1888     return;
1889 
1890 give_up:
1891     free(s);
1892     free(strings);
1893     free(syms);
1894 }
1895 
1896 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1897                     struct image_info * info)
1898 {
1899     struct image_info interp_info;
1900     struct elfhdr elf_ex;
1901     char *elf_interpreter = NULL;
1902 
1903     info->start_mmap = (abi_ulong)ELF_START_MMAP;
1904     info->mmap = 0;
1905     info->rss = 0;
1906 
1907     load_elf_image(bprm->filename, bprm->fd, info,
1908                    &elf_interpreter, bprm->buf);
1909 
1910     /* ??? We need a copy of the elf header for passing to create_elf_tables.
1911        If we do nothing, we'll have overwritten this when we re-use bprm->buf
1912        when we load the interpreter.  */
1913     elf_ex = *(struct elfhdr *)bprm->buf;
1914 
1915     bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
1916     bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
1917     bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
1918     if (!bprm->p) {
1919         fprintf(stderr, "%s: %s\n", bprm->filename, strerror(E2BIG));
1920         exit(-1);
1921     }
1922 
1923     /* Do this so that we can load the interpreter, if need be.  We will
1924        change some of these later */
1925     bprm->p = setup_arg_pages(bprm->p, bprm, info);
1926 
1927     if (elf_interpreter) {
1928         load_elf_interp(elf_interpreter, &interp_info, bprm->buf);
1929 
1930         /* If the program interpreter is one of these two, then assume
1931            an iBCS2 image.  Otherwise assume a native linux image.  */
1932 
1933         if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0
1934             || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) {
1935             info->personality = PER_SVR4;
1936 
1937             /* Why this, you ask???  Well SVr4 maps page 0 as read-only,
1938                and some applications "depend" upon this behavior.  Since
1939                we do not have the power to recompile these, we emulate
1940                the SVr4 behavior.  Sigh.  */
1941             target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1942                         MAP_FIXED | MAP_PRIVATE, -1, 0);
1943         }
1944     }
1945 
1946     bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &elf_ex,
1947                                 info, (elf_interpreter ? &interp_info : NULL));
1948     info->start_stack = bprm->p;
1949 
1950     /* If we have an interpreter, set that as the program's entry point.
1951        Copy the load_bias as well, to help PPC64 interpret the entry
1952        point as a function descriptor.  Do this after creating elf tables
1953        so that we copy the original program entry point into the AUXV.  */
1954     if (elf_interpreter) {
1955         info->load_bias = interp_info.load_bias;
1956         info->entry = interp_info.entry;
1957         free(elf_interpreter);
1958     }
1959 
1960 #ifdef USE_ELF_CORE_DUMP
1961     bprm->core_dump = &elf_core_dump;
1962 #endif
1963 
1964     return 0;
1965 }
1966 
1967 #ifdef USE_ELF_CORE_DUMP
1968 /*
1969  * Definitions to generate Intel SVR4-like core files.
1970  * These mostly have the same names as the SVR4 types with "target_elf_"
1971  * tacked on the front to prevent clashes with linux definitions,
1972  * and the typedef forms have been avoided.  This is mostly like
1973  * the SVR4 structure, but more Linuxy, with things that Linux does
1974  * not support and which gdb doesn't really use excluded.
1975  *
1976  * Fields we don't dump (their contents is zero) in linux-user qemu
1977  * are marked with XXX.
1978  *
1979  * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
1980  *
1981  * Porting ELF coredump for target is (quite) simple process.  First you
1982  * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
1983  * the target resides):
1984  *
1985  * #define USE_ELF_CORE_DUMP
1986  *
1987  * Next you define type of register set used for dumping.  ELF specification
1988  * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
1989  *
1990  * typedef <target_regtype> target_elf_greg_t;
1991  * #define ELF_NREG <number of registers>
1992  * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
1993  *
1994  * Last step is to implement target specific function that copies registers
1995  * from given cpu into just specified register set.  Prototype is:
1996  *
1997  * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
1998  *                                const CPUArchState *env);
1999  *
2000  * Parameters:
2001  *     regs - copy register values into here (allocated and zeroed by caller)
2002  *     env - copy registers from here
2003  *
2004  * Example for ARM target is provided in this file.
2005  */
2006 
2007 /* An ELF note in memory */
2008 struct memelfnote {
2009     const char *name;
2010     size_t     namesz;
2011     size_t     namesz_rounded;
2012     int        type;
2013     size_t     datasz;
2014     size_t     datasz_rounded;
2015     void       *data;
2016     size_t     notesz;
2017 };
2018 
2019 struct target_elf_siginfo {
2020     target_int  si_signo; /* signal number */
2021     target_int  si_code;  /* extra code */
2022     target_int  si_errno; /* errno */
2023 };
2024 
2025 struct target_elf_prstatus {
2026     struct target_elf_siginfo pr_info;      /* Info associated with signal */
2027     target_short       pr_cursig;    /* Current signal */
2028     target_ulong       pr_sigpend;   /* XXX */
2029     target_ulong       pr_sighold;   /* XXX */
2030     target_pid_t       pr_pid;
2031     target_pid_t       pr_ppid;
2032     target_pid_t       pr_pgrp;
2033     target_pid_t       pr_sid;
2034     struct target_timeval pr_utime;  /* XXX User time */
2035     struct target_timeval pr_stime;  /* XXX System time */
2036     struct target_timeval pr_cutime; /* XXX Cumulative user time */
2037     struct target_timeval pr_cstime; /* XXX Cumulative system time */
2038     target_elf_gregset_t      pr_reg;       /* GP registers */
2039     target_int         pr_fpvalid;   /* XXX */
2040 };
2041 
2042 #define ELF_PRARGSZ     (80) /* Number of chars for args */
2043 
2044 struct target_elf_prpsinfo {
2045     char         pr_state;       /* numeric process state */
2046     char         pr_sname;       /* char for pr_state */
2047     char         pr_zomb;        /* zombie */
2048     char         pr_nice;        /* nice val */
2049     target_ulong pr_flag;        /* flags */
2050     target_uid_t pr_uid;
2051     target_gid_t pr_gid;
2052     target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
2053     /* Lots missing */
2054     char    pr_fname[16];           /* filename of executable */
2055     char    pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
2056 };
2057 
2058 /* Here is the structure in which status of each thread is captured. */
2059 struct elf_thread_status {
2060     QTAILQ_ENTRY(elf_thread_status)  ets_link;
2061     struct target_elf_prstatus prstatus;   /* NT_PRSTATUS */
2062 #if 0
2063     elf_fpregset_t fpu;             /* NT_PRFPREG */
2064     struct task_struct *thread;
2065     elf_fpxregset_t xfpu;           /* ELF_CORE_XFPREG_TYPE */
2066 #endif
2067     struct memelfnote notes[1];
2068     int num_notes;
2069 };
2070 
2071 struct elf_note_info {
2072     struct memelfnote   *notes;
2073     struct target_elf_prstatus *prstatus;  /* NT_PRSTATUS */
2074     struct target_elf_prpsinfo *psinfo;    /* NT_PRPSINFO */
2075 
2076     QTAILQ_HEAD(thread_list_head, elf_thread_status) thread_list;
2077 #if 0
2078     /*
2079      * Current version of ELF coredump doesn't support
2080      * dumping fp regs etc.
2081      */
2082     elf_fpregset_t *fpu;
2083     elf_fpxregset_t *xfpu;
2084     int thread_status_size;
2085 #endif
2086     int notes_size;
2087     int numnote;
2088 };
2089 
2090 struct vm_area_struct {
2091     abi_ulong   vma_start;  /* start vaddr of memory region */
2092     abi_ulong   vma_end;    /* end vaddr of memory region */
2093     abi_ulong   vma_flags;  /* protection etc. flags for the region */
2094     QTAILQ_ENTRY(vm_area_struct) vma_link;
2095 };
2096 
2097 struct mm_struct {
2098     QTAILQ_HEAD(, vm_area_struct) mm_mmap;
2099     int mm_count;           /* number of mappings */
2100 };
2101 
2102 static struct mm_struct *vma_init(void);
2103 static void vma_delete(struct mm_struct *);
2104 static int vma_add_mapping(struct mm_struct *, abi_ulong,
2105                            abi_ulong, abi_ulong);
2106 static int vma_get_mapping_count(const struct mm_struct *);
2107 static struct vm_area_struct *vma_first(const struct mm_struct *);
2108 static struct vm_area_struct *vma_next(struct vm_area_struct *);
2109 static abi_ulong vma_dump_size(const struct vm_area_struct *);
2110 static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
2111                       unsigned long flags);
2112 
2113 static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
2114 static void fill_note(struct memelfnote *, const char *, int,
2115                       unsigned int, void *);
2116 static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
2117 static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
2118 static void fill_auxv_note(struct memelfnote *, const TaskState *);
2119 static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
2120 static size_t note_size(const struct memelfnote *);
2121 static void free_note_info(struct elf_note_info *);
2122 static int fill_note_info(struct elf_note_info *, long, const CPUArchState *);
2123 static void fill_thread_info(struct elf_note_info *, const CPUArchState *);
2124 static int core_dump_filename(const TaskState *, char *, size_t);
2125 
2126 static int dump_write(int, const void *, size_t);
2127 static int write_note(struct memelfnote *, int);
2128 static int write_note_info(struct elf_note_info *, int);
2129 
2130 #ifdef BSWAP_NEEDED
2131 static void bswap_prstatus(struct target_elf_prstatus *prstatus)
2132 {
2133     prstatus->pr_info.si_signo = tswapl(prstatus->pr_info.si_signo);
2134     prstatus->pr_info.si_code = tswapl(prstatus->pr_info.si_code);
2135     prstatus->pr_info.si_errno = tswapl(prstatus->pr_info.si_errno);
2136     prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
2137     prstatus->pr_sigpend = tswapl(prstatus->pr_sigpend);
2138     prstatus->pr_sighold = tswapl(prstatus->pr_sighold);
2139     prstatus->pr_pid = tswap32(prstatus->pr_pid);
2140     prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
2141     prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
2142     prstatus->pr_sid = tswap32(prstatus->pr_sid);
2143     /* cpu times are not filled, so we skip them */
2144     /* regs should be in correct format already */
2145     prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
2146 }
2147 
2148 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
2149 {
2150     psinfo->pr_flag = tswapl(psinfo->pr_flag);
2151     psinfo->pr_uid = tswap16(psinfo->pr_uid);
2152     psinfo->pr_gid = tswap16(psinfo->pr_gid);
2153     psinfo->pr_pid = tswap32(psinfo->pr_pid);
2154     psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
2155     psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
2156     psinfo->pr_sid = tswap32(psinfo->pr_sid);
2157 }
2158 
2159 static void bswap_note(struct elf_note *en)
2160 {
2161     bswap32s(&en->n_namesz);
2162     bswap32s(&en->n_descsz);
2163     bswap32s(&en->n_type);
2164 }
2165 #else
2166 static inline void bswap_prstatus(struct target_elf_prstatus *p) { }
2167 static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {}
2168 static inline void bswap_note(struct elf_note *en) { }
2169 #endif /* BSWAP_NEEDED */
2170 
2171 /*
2172  * Minimal support for linux memory regions.  These are needed
2173  * when we are finding out what memory exactly belongs to
2174  * emulated process.  No locks needed here, as long as
2175  * thread that received the signal is stopped.
2176  */
2177 
2178 static struct mm_struct *vma_init(void)
2179 {
2180     struct mm_struct *mm;
2181 
2182     if ((mm = g_malloc(sizeof (*mm))) == NULL)
2183         return (NULL);
2184 
2185     mm->mm_count = 0;
2186     QTAILQ_INIT(&mm->mm_mmap);
2187 
2188     return (mm);
2189 }
2190 
2191 static void vma_delete(struct mm_struct *mm)
2192 {
2193     struct vm_area_struct *vma;
2194 
2195     while ((vma = vma_first(mm)) != NULL) {
2196         QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
2197         g_free(vma);
2198     }
2199     g_free(mm);
2200 }
2201 
2202 static int vma_add_mapping(struct mm_struct *mm, abi_ulong start,
2203                            abi_ulong end, abi_ulong flags)
2204 {
2205     struct vm_area_struct *vma;
2206 
2207     if ((vma = g_malloc0(sizeof (*vma))) == NULL)
2208         return (-1);
2209 
2210     vma->vma_start = start;
2211     vma->vma_end = end;
2212     vma->vma_flags = flags;
2213 
2214     QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
2215     mm->mm_count++;
2216 
2217     return (0);
2218 }
2219 
2220 static struct vm_area_struct *vma_first(const struct mm_struct *mm)
2221 {
2222     return (QTAILQ_FIRST(&mm->mm_mmap));
2223 }
2224 
2225 static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
2226 {
2227     return (QTAILQ_NEXT(vma, vma_link));
2228 }
2229 
2230 static int vma_get_mapping_count(const struct mm_struct *mm)
2231 {
2232     return (mm->mm_count);
2233 }
2234 
2235 /*
2236  * Calculate file (dump) size of given memory region.
2237  */
2238 static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
2239 {
2240     /* if we cannot even read the first page, skip it */
2241     if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
2242         return (0);
2243 
2244     /*
2245      * Usually we don't dump executable pages as they contain
2246      * non-writable code that debugger can read directly from
2247      * target library etc.  However, thread stacks are marked
2248      * also executable so we read in first page of given region
2249      * and check whether it contains elf header.  If there is
2250      * no elf header, we dump it.
2251      */
2252     if (vma->vma_flags & PROT_EXEC) {
2253         char page[TARGET_PAGE_SIZE];
2254 
2255         copy_from_user(page, vma->vma_start, sizeof (page));
2256         if ((page[EI_MAG0] == ELFMAG0) &&
2257             (page[EI_MAG1] == ELFMAG1) &&
2258             (page[EI_MAG2] == ELFMAG2) &&
2259             (page[EI_MAG3] == ELFMAG3)) {
2260             /*
2261              * Mappings are possibly from ELF binary.  Don't dump
2262              * them.
2263              */
2264             return (0);
2265         }
2266     }
2267 
2268     return (vma->vma_end - vma->vma_start);
2269 }
2270 
2271 static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
2272                       unsigned long flags)
2273 {
2274     struct mm_struct *mm = (struct mm_struct *)priv;
2275 
2276     vma_add_mapping(mm, start, end, flags);
2277     return (0);
2278 }
2279 
2280 static void fill_note(struct memelfnote *note, const char *name, int type,
2281                       unsigned int sz, void *data)
2282 {
2283     unsigned int namesz;
2284 
2285     namesz = strlen(name) + 1;
2286     note->name = name;
2287     note->namesz = namesz;
2288     note->namesz_rounded = roundup(namesz, sizeof (int32_t));
2289     note->type = type;
2290     note->datasz = sz;
2291     note->datasz_rounded = roundup(sz, sizeof (int32_t));
2292 
2293     note->data = data;
2294 
2295     /*
2296      * We calculate rounded up note size here as specified by
2297      * ELF document.
2298      */
2299     note->notesz = sizeof (struct elf_note) +
2300         note->namesz_rounded + note->datasz_rounded;
2301 }
2302 
2303 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
2304                             uint32_t flags)
2305 {
2306     (void) memset(elf, 0, sizeof(*elf));
2307 
2308     (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
2309     elf->e_ident[EI_CLASS] = ELF_CLASS;
2310     elf->e_ident[EI_DATA] = ELF_DATA;
2311     elf->e_ident[EI_VERSION] = EV_CURRENT;
2312     elf->e_ident[EI_OSABI] = ELF_OSABI;
2313 
2314     elf->e_type = ET_CORE;
2315     elf->e_machine = machine;
2316     elf->e_version = EV_CURRENT;
2317     elf->e_phoff = sizeof(struct elfhdr);
2318     elf->e_flags = flags;
2319     elf->e_ehsize = sizeof(struct elfhdr);
2320     elf->e_phentsize = sizeof(struct elf_phdr);
2321     elf->e_phnum = segs;
2322 
2323     bswap_ehdr(elf);
2324 }
2325 
2326 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
2327 {
2328     phdr->p_type = PT_NOTE;
2329     phdr->p_offset = offset;
2330     phdr->p_vaddr = 0;
2331     phdr->p_paddr = 0;
2332     phdr->p_filesz = sz;
2333     phdr->p_memsz = 0;
2334     phdr->p_flags = 0;
2335     phdr->p_align = 0;
2336 
2337     bswap_phdr(phdr, 1);
2338 }
2339 
2340 static size_t note_size(const struct memelfnote *note)
2341 {
2342     return (note->notesz);
2343 }
2344 
2345 static void fill_prstatus(struct target_elf_prstatus *prstatus,
2346                           const TaskState *ts, int signr)
2347 {
2348     (void) memset(prstatus, 0, sizeof (*prstatus));
2349     prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
2350     prstatus->pr_pid = ts->ts_tid;
2351     prstatus->pr_ppid = getppid();
2352     prstatus->pr_pgrp = getpgrp();
2353     prstatus->pr_sid = getsid(0);
2354 
2355     bswap_prstatus(prstatus);
2356 }
2357 
2358 static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
2359 {
2360     char *filename, *base_filename;
2361     unsigned int i, len;
2362 
2363     (void) memset(psinfo, 0, sizeof (*psinfo));
2364 
2365     len = ts->info->arg_end - ts->info->arg_start;
2366     if (len >= ELF_PRARGSZ)
2367         len = ELF_PRARGSZ - 1;
2368     if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_start, len))
2369         return -EFAULT;
2370     for (i = 0; i < len; i++)
2371         if (psinfo->pr_psargs[i] == 0)
2372             psinfo->pr_psargs[i] = ' ';
2373     psinfo->pr_psargs[len] = 0;
2374 
2375     psinfo->pr_pid = getpid();
2376     psinfo->pr_ppid = getppid();
2377     psinfo->pr_pgrp = getpgrp();
2378     psinfo->pr_sid = getsid(0);
2379     psinfo->pr_uid = getuid();
2380     psinfo->pr_gid = getgid();
2381 
2382     filename = strdup(ts->bprm->filename);
2383     base_filename = strdup(basename(filename));
2384     (void) strncpy(psinfo->pr_fname, base_filename,
2385                    sizeof(psinfo->pr_fname));
2386     free(base_filename);
2387     free(filename);
2388 
2389     bswap_psinfo(psinfo);
2390     return (0);
2391 }
2392 
2393 static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
2394 {
2395     elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
2396     elf_addr_t orig_auxv = auxv;
2397     void *ptr;
2398     int len = ts->info->auxv_len;
2399 
2400     /*
2401      * Auxiliary vector is stored in target process stack.  It contains
2402      * {type, value} pairs that we need to dump into note.  This is not
2403      * strictly necessary but we do it here for sake of completeness.
2404      */
2405 
2406     /* read in whole auxv vector and copy it to memelfnote */
2407     ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
2408     if (ptr != NULL) {
2409         fill_note(note, "CORE", NT_AUXV, len, ptr);
2410         unlock_user(ptr, auxv, len);
2411     }
2412 }
2413 
2414 /*
2415  * Constructs name of coredump file.  We have following convention
2416  * for the name:
2417  *     qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
2418  *
2419  * Returns 0 in case of success, -1 otherwise (errno is set).
2420  */
2421 static int core_dump_filename(const TaskState *ts, char *buf,
2422                               size_t bufsize)
2423 {
2424     char timestamp[64];
2425     char *filename = NULL;
2426     char *base_filename = NULL;
2427     struct timeval tv;
2428     struct tm tm;
2429 
2430     assert(bufsize >= PATH_MAX);
2431 
2432     if (gettimeofday(&tv, NULL) < 0) {
2433         (void) fprintf(stderr, "unable to get current timestamp: %s",
2434                        strerror(errno));
2435         return (-1);
2436     }
2437 
2438     filename = strdup(ts->bprm->filename);
2439     base_filename = strdup(basename(filename));
2440     (void) strftime(timestamp, sizeof (timestamp), "%Y%m%d-%H%M%S",
2441                     localtime_r(&tv.tv_sec, &tm));
2442     (void) snprintf(buf, bufsize, "qemu_%s_%s_%d.core",
2443                     base_filename, timestamp, (int)getpid());
2444     free(base_filename);
2445     free(filename);
2446 
2447     return (0);
2448 }
2449 
2450 static int dump_write(int fd, const void *ptr, size_t size)
2451 {
2452     const char *bufp = (const char *)ptr;
2453     ssize_t bytes_written, bytes_left;
2454     struct rlimit dumpsize;
2455     off_t pos;
2456 
2457     bytes_written = 0;
2458     getrlimit(RLIMIT_CORE, &dumpsize);
2459     if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
2460         if (errno == ESPIPE) { /* not a seekable stream */
2461             bytes_left = size;
2462         } else {
2463             return pos;
2464         }
2465     } else {
2466         if (dumpsize.rlim_cur <= pos) {
2467             return -1;
2468         } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
2469             bytes_left = size;
2470         } else {
2471             size_t limit_left=dumpsize.rlim_cur - pos;
2472             bytes_left = limit_left >= size ? size : limit_left ;
2473         }
2474     }
2475 
2476     /*
2477      * In normal conditions, single write(2) should do but
2478      * in case of socket etc. this mechanism is more portable.
2479      */
2480     do {
2481         bytes_written = write(fd, bufp, bytes_left);
2482         if (bytes_written < 0) {
2483             if (errno == EINTR)
2484                 continue;
2485             return (-1);
2486         } else if (bytes_written == 0) { /* eof */
2487             return (-1);
2488         }
2489         bufp += bytes_written;
2490         bytes_left -= bytes_written;
2491     } while (bytes_left > 0);
2492 
2493     return (0);
2494 }
2495 
2496 static int write_note(struct memelfnote *men, int fd)
2497 {
2498     struct elf_note en;
2499 
2500     en.n_namesz = men->namesz;
2501     en.n_type = men->type;
2502     en.n_descsz = men->datasz;
2503 
2504     bswap_note(&en);
2505 
2506     if (dump_write(fd, &en, sizeof(en)) != 0)
2507         return (-1);
2508     if (dump_write(fd, men->name, men->namesz_rounded) != 0)
2509         return (-1);
2510     if (dump_write(fd, men->data, men->datasz_rounded) != 0)
2511         return (-1);
2512 
2513     return (0);
2514 }
2515 
2516 static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env)
2517 {
2518     TaskState *ts = (TaskState *)env->opaque;
2519     struct elf_thread_status *ets;
2520 
2521     ets = g_malloc0(sizeof (*ets));
2522     ets->num_notes = 1; /* only prstatus is dumped */
2523     fill_prstatus(&ets->prstatus, ts, 0);
2524     elf_core_copy_regs(&ets->prstatus.pr_reg, env);
2525     fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
2526               &ets->prstatus);
2527 
2528     QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
2529 
2530     info->notes_size += note_size(&ets->notes[0]);
2531 }
2532 
2533 static int fill_note_info(struct elf_note_info *info,
2534                           long signr, const CPUArchState *env)
2535 {
2536 #define NUMNOTES 3
2537     CPUArchState *cpu = NULL;
2538     TaskState *ts = (TaskState *)env->opaque;
2539     int i;
2540 
2541     (void) memset(info, 0, sizeof (*info));
2542 
2543     QTAILQ_INIT(&info->thread_list);
2544 
2545     info->notes = g_malloc0(NUMNOTES * sizeof (struct memelfnote));
2546     if (info->notes == NULL)
2547         return (-ENOMEM);
2548     info->prstatus = g_malloc0(sizeof (*info->prstatus));
2549     if (info->prstatus == NULL)
2550         return (-ENOMEM);
2551     info->psinfo = g_malloc0(sizeof (*info->psinfo));
2552     if (info->prstatus == NULL)
2553         return (-ENOMEM);
2554 
2555     /*
2556      * First fill in status (and registers) of current thread
2557      * including process info & aux vector.
2558      */
2559     fill_prstatus(info->prstatus, ts, signr);
2560     elf_core_copy_regs(&info->prstatus->pr_reg, env);
2561     fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
2562               sizeof (*info->prstatus), info->prstatus);
2563     fill_psinfo(info->psinfo, ts);
2564     fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
2565               sizeof (*info->psinfo), info->psinfo);
2566     fill_auxv_note(&info->notes[2], ts);
2567     info->numnote = 3;
2568 
2569     info->notes_size = 0;
2570     for (i = 0; i < info->numnote; i++)
2571         info->notes_size += note_size(&info->notes[i]);
2572 
2573     /* read and fill status of all threads */
2574     cpu_list_lock();
2575     for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2576         if (cpu == thread_env)
2577             continue;
2578         fill_thread_info(info, cpu);
2579     }
2580     cpu_list_unlock();
2581 
2582     return (0);
2583 }
2584 
2585 static void free_note_info(struct elf_note_info *info)
2586 {
2587     struct elf_thread_status *ets;
2588 
2589     while (!QTAILQ_EMPTY(&info->thread_list)) {
2590         ets = QTAILQ_FIRST(&info->thread_list);
2591         QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
2592         g_free(ets);
2593     }
2594 
2595     g_free(info->prstatus);
2596     g_free(info->psinfo);
2597     g_free(info->notes);
2598 }
2599 
2600 static int write_note_info(struct elf_note_info *info, int fd)
2601 {
2602     struct elf_thread_status *ets;
2603     int i, error = 0;
2604 
2605     /* write prstatus, psinfo and auxv for current thread */
2606     for (i = 0; i < info->numnote; i++)
2607         if ((error = write_note(&info->notes[i], fd)) != 0)
2608             return (error);
2609 
2610     /* write prstatus for each thread */
2611     for (ets = info->thread_list.tqh_first; ets != NULL;
2612          ets = ets->ets_link.tqe_next) {
2613         if ((error = write_note(&ets->notes[0], fd)) != 0)
2614             return (error);
2615     }
2616 
2617     return (0);
2618 }
2619 
2620 /*
2621  * Write out ELF coredump.
2622  *
2623  * See documentation of ELF object file format in:
2624  * http://www.caldera.com/developers/devspecs/gabi41.pdf
2625  *
2626  * Coredump format in linux is following:
2627  *
2628  * 0   +----------------------+         \
2629  *     | ELF header           | ET_CORE  |
2630  *     +----------------------+          |
2631  *     | ELF program headers  |          |--- headers
2632  *     | - NOTE section       |          |
2633  *     | - PT_LOAD sections   |          |
2634  *     +----------------------+         /
2635  *     | NOTEs:               |
2636  *     | - NT_PRSTATUS        |
2637  *     | - NT_PRSINFO         |
2638  *     | - NT_AUXV            |
2639  *     +----------------------+ <-- aligned to target page
2640  *     | Process memory dump  |
2641  *     :                      :
2642  *     .                      .
2643  *     :                      :
2644  *     |                      |
2645  *     +----------------------+
2646  *
2647  * NT_PRSTATUS -> struct elf_prstatus (per thread)
2648  * NT_PRSINFO  -> struct elf_prpsinfo
2649  * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
2650  *
2651  * Format follows System V format as close as possible.  Current
2652  * version limitations are as follows:
2653  *     - no floating point registers are dumped
2654  *
2655  * Function returns 0 in case of success, negative errno otherwise.
2656  *
2657  * TODO: make this work also during runtime: it should be
2658  * possible to force coredump from running process and then
2659  * continue processing.  For example qemu could set up SIGUSR2
2660  * handler (provided that target process haven't registered
2661  * handler for that) that does the dump when signal is received.
2662  */
2663 static int elf_core_dump(int signr, const CPUArchState *env)
2664 {
2665     const TaskState *ts = (const TaskState *)env->opaque;
2666     struct vm_area_struct *vma = NULL;
2667     char corefile[PATH_MAX];
2668     struct elf_note_info info;
2669     struct elfhdr elf;
2670     struct elf_phdr phdr;
2671     struct rlimit dumpsize;
2672     struct mm_struct *mm = NULL;
2673     off_t offset = 0, data_offset = 0;
2674     int segs = 0;
2675     int fd = -1;
2676 
2677     errno = 0;
2678     getrlimit(RLIMIT_CORE, &dumpsize);
2679     if (dumpsize.rlim_cur == 0)
2680         return 0;
2681 
2682     if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0)
2683         return (-errno);
2684 
2685     if ((fd = open(corefile, O_WRONLY | O_CREAT,
2686                    S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
2687         return (-errno);
2688 
2689     /*
2690      * Walk through target process memory mappings and
2691      * set up structure containing this information.  After
2692      * this point vma_xxx functions can be used.
2693      */
2694     if ((mm = vma_init()) == NULL)
2695         goto out;
2696 
2697     walk_memory_regions(mm, vma_walker);
2698     segs = vma_get_mapping_count(mm);
2699 
2700     /*
2701      * Construct valid coredump ELF header.  We also
2702      * add one more segment for notes.
2703      */
2704     fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
2705     if (dump_write(fd, &elf, sizeof (elf)) != 0)
2706         goto out;
2707 
2708     /* fill in in-memory version of notes */
2709     if (fill_note_info(&info, signr, env) < 0)
2710         goto out;
2711 
2712     offset += sizeof (elf);                             /* elf header */
2713     offset += (segs + 1) * sizeof (struct elf_phdr);    /* program headers */
2714 
2715     /* write out notes program header */
2716     fill_elf_note_phdr(&phdr, info.notes_size, offset);
2717 
2718     offset += info.notes_size;
2719     if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
2720         goto out;
2721 
2722     /*
2723      * ELF specification wants data to start at page boundary so
2724      * we align it here.
2725      */
2726     data_offset = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2727 
2728     /*
2729      * Write program headers for memory regions mapped in
2730      * the target process.
2731      */
2732     for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2733         (void) memset(&phdr, 0, sizeof (phdr));
2734 
2735         phdr.p_type = PT_LOAD;
2736         phdr.p_offset = offset;
2737         phdr.p_vaddr = vma->vma_start;
2738         phdr.p_paddr = 0;
2739         phdr.p_filesz = vma_dump_size(vma);
2740         offset += phdr.p_filesz;
2741         phdr.p_memsz = vma->vma_end - vma->vma_start;
2742         phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
2743         if (vma->vma_flags & PROT_WRITE)
2744             phdr.p_flags |= PF_W;
2745         if (vma->vma_flags & PROT_EXEC)
2746             phdr.p_flags |= PF_X;
2747         phdr.p_align = ELF_EXEC_PAGESIZE;
2748 
2749         bswap_phdr(&phdr, 1);
2750         dump_write(fd, &phdr, sizeof (phdr));
2751     }
2752 
2753     /*
2754      * Next we write notes just after program headers.  No
2755      * alignment needed here.
2756      */
2757     if (write_note_info(&info, fd) < 0)
2758         goto out;
2759 
2760     /* align data to page boundary */
2761     if (lseek(fd, data_offset, SEEK_SET) != data_offset)
2762         goto out;
2763 
2764     /*
2765      * Finally we can dump process memory into corefile as well.
2766      */
2767     for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2768         abi_ulong addr;
2769         abi_ulong end;
2770 
2771         end = vma->vma_start + vma_dump_size(vma);
2772 
2773         for (addr = vma->vma_start; addr < end;
2774              addr += TARGET_PAGE_SIZE) {
2775             char page[TARGET_PAGE_SIZE];
2776             int error;
2777 
2778             /*
2779              *  Read in page from target process memory and
2780              *  write it to coredump file.
2781              */
2782             error = copy_from_user(page, addr, sizeof (page));
2783             if (error != 0) {
2784                 (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
2785                                addr);
2786                 errno = -error;
2787                 goto out;
2788             }
2789             if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
2790                 goto out;
2791         }
2792     }
2793 
2794  out:
2795     free_note_info(&info);
2796     if (mm != NULL)
2797         vma_delete(mm);
2798     (void) close(fd);
2799 
2800     if (errno != 0)
2801         return (-errno);
2802     return (0);
2803 }
2804 #endif /* USE_ELF_CORE_DUMP */
2805 
2806 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
2807 {
2808     init_thread(regs, infop);
2809 }
2810