xref: /openbmc/qemu/linux-user/elfload.c (revision 1d300b5f)
1 /* This is the Linux kernel elf-loading code, ported into user space */
2 #include <sys/time.h>
3 #include <sys/param.h>
4 
5 #include <stdio.h>
6 #include <sys/types.h>
7 #include <fcntl.h>
8 #include <errno.h>
9 #include <unistd.h>
10 #include <sys/mman.h>
11 #include <sys/resource.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <time.h>
15 
16 #include "qemu.h"
17 #include "disas/disas.h"
18 
19 #ifdef _ARCH_PPC64
20 #undef ARCH_DLINFO
21 #undef ELF_PLATFORM
22 #undef ELF_HWCAP
23 #undef ELF_CLASS
24 #undef ELF_DATA
25 #undef ELF_ARCH
26 #endif
27 
28 #define ELF_OSABI   ELFOSABI_SYSV
29 
30 /* from personality.h */
31 
32 /*
33  * Flags for bug emulation.
34  *
35  * These occupy the top three bytes.
36  */
37 enum {
38     ADDR_NO_RANDOMIZE = 0x0040000,      /* disable randomization of VA space */
39     FDPIC_FUNCPTRS =    0x0080000,      /* userspace function ptrs point to
40                                            descriptors (signal handling) */
41     MMAP_PAGE_ZERO =    0x0100000,
42     ADDR_COMPAT_LAYOUT = 0x0200000,
43     READ_IMPLIES_EXEC = 0x0400000,
44     ADDR_LIMIT_32BIT =  0x0800000,
45     SHORT_INODE =       0x1000000,
46     WHOLE_SECONDS =     0x2000000,
47     STICKY_TIMEOUTS =   0x4000000,
48     ADDR_LIMIT_3GB =    0x8000000,
49 };
50 
51 /*
52  * Personality types.
53  *
54  * These go in the low byte.  Avoid using the top bit, it will
55  * conflict with error returns.
56  */
57 enum {
58     PER_LINUX =         0x0000,
59     PER_LINUX_32BIT =   0x0000 | ADDR_LIMIT_32BIT,
60     PER_LINUX_FDPIC =   0x0000 | FDPIC_FUNCPTRS,
61     PER_SVR4 =          0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
62     PER_SVR3 =          0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
63     PER_SCOSVR3 =       0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE,
64     PER_OSR5 =          0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
65     PER_WYSEV386 =      0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
66     PER_ISCR4 =         0x0005 | STICKY_TIMEOUTS,
67     PER_BSD =           0x0006,
68     PER_SUNOS =         0x0006 | STICKY_TIMEOUTS,
69     PER_XENIX =         0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
70     PER_LINUX32 =       0x0008,
71     PER_LINUX32_3GB =   0x0008 | ADDR_LIMIT_3GB,
72     PER_IRIX32 =        0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
73     PER_IRIXN32 =       0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
74     PER_IRIX64 =        0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
75     PER_RISCOS =        0x000c,
76     PER_SOLARIS =       0x000d | STICKY_TIMEOUTS,
77     PER_UW7 =           0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
78     PER_OSF4 =          0x000f,                  /* OSF/1 v4 */
79     PER_HPUX =          0x0010,
80     PER_MASK =          0x00ff,
81 };
82 
83 /*
84  * Return the base personality without flags.
85  */
86 #define personality(pers)       (pers & PER_MASK)
87 
88 /* this flag is uneffective under linux too, should be deleted */
89 #ifndef MAP_DENYWRITE
90 #define MAP_DENYWRITE 0
91 #endif
92 
93 /* should probably go in elf.h */
94 #ifndef ELIBBAD
95 #define ELIBBAD 80
96 #endif
97 
98 #ifdef TARGET_WORDS_BIGENDIAN
99 #define ELF_DATA        ELFDATA2MSB
100 #else
101 #define ELF_DATA        ELFDATA2LSB
102 #endif
103 
104 #ifdef TARGET_ABI_MIPSN32
105 typedef abi_ullong      target_elf_greg_t;
106 #define tswapreg(ptr)   tswap64(ptr)
107 #else
108 typedef abi_ulong       target_elf_greg_t;
109 #define tswapreg(ptr)   tswapal(ptr)
110 #endif
111 
112 #ifdef USE_UID16
113 typedef abi_ushort      target_uid_t;
114 typedef abi_ushort      target_gid_t;
115 #else
116 typedef abi_uint        target_uid_t;
117 typedef abi_uint        target_gid_t;
118 #endif
119 typedef abi_int         target_pid_t;
120 
121 #ifdef TARGET_I386
122 
123 #define ELF_PLATFORM get_elf_platform()
124 
125 static const char *get_elf_platform(void)
126 {
127     static char elf_platform[] = "i386";
128     int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL);
129     if (family > 6)
130         family = 6;
131     if (family >= 3)
132         elf_platform[1] = '0' + family;
133     return elf_platform;
134 }
135 
136 #define ELF_HWCAP get_elf_hwcap()
137 
138 static uint32_t get_elf_hwcap(void)
139 {
140     X86CPU *cpu = X86_CPU(thread_cpu);
141 
142     return cpu->env.features[FEAT_1_EDX];
143 }
144 
145 #ifdef TARGET_X86_64
146 #define ELF_START_MMAP 0x2aaaaab000ULL
147 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
148 
149 #define ELF_CLASS      ELFCLASS64
150 #define ELF_ARCH       EM_X86_64
151 
152 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
153 {
154     regs->rax = 0;
155     regs->rsp = infop->start_stack;
156     regs->rip = infop->entry;
157 }
158 
159 #define ELF_NREG    27
160 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
161 
162 /*
163  * Note that ELF_NREG should be 29 as there should be place for
164  * TRAPNO and ERR "registers" as well but linux doesn't dump
165  * those.
166  *
167  * See linux kernel: arch/x86/include/asm/elf.h
168  */
169 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env)
170 {
171     (*regs)[0] = env->regs[15];
172     (*regs)[1] = env->regs[14];
173     (*regs)[2] = env->regs[13];
174     (*regs)[3] = env->regs[12];
175     (*regs)[4] = env->regs[R_EBP];
176     (*regs)[5] = env->regs[R_EBX];
177     (*regs)[6] = env->regs[11];
178     (*regs)[7] = env->regs[10];
179     (*regs)[8] = env->regs[9];
180     (*regs)[9] = env->regs[8];
181     (*regs)[10] = env->regs[R_EAX];
182     (*regs)[11] = env->regs[R_ECX];
183     (*regs)[12] = env->regs[R_EDX];
184     (*regs)[13] = env->regs[R_ESI];
185     (*regs)[14] = env->regs[R_EDI];
186     (*regs)[15] = env->regs[R_EAX]; /* XXX */
187     (*regs)[16] = env->eip;
188     (*regs)[17] = env->segs[R_CS].selector & 0xffff;
189     (*regs)[18] = env->eflags;
190     (*regs)[19] = env->regs[R_ESP];
191     (*regs)[20] = env->segs[R_SS].selector & 0xffff;
192     (*regs)[21] = env->segs[R_FS].selector & 0xffff;
193     (*regs)[22] = env->segs[R_GS].selector & 0xffff;
194     (*regs)[23] = env->segs[R_DS].selector & 0xffff;
195     (*regs)[24] = env->segs[R_ES].selector & 0xffff;
196     (*regs)[25] = env->segs[R_FS].selector & 0xffff;
197     (*regs)[26] = env->segs[R_GS].selector & 0xffff;
198 }
199 
200 #else
201 
202 #define ELF_START_MMAP 0x80000000
203 
204 /*
205  * This is used to ensure we don't load something for the wrong architecture.
206  */
207 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
208 
209 /*
210  * These are used to set parameters in the core dumps.
211  */
212 #define ELF_CLASS       ELFCLASS32
213 #define ELF_ARCH        EM_386
214 
215 static inline void init_thread(struct target_pt_regs *regs,
216                                struct image_info *infop)
217 {
218     regs->esp = infop->start_stack;
219     regs->eip = infop->entry;
220 
221     /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
222        starts %edx contains a pointer to a function which might be
223        registered using `atexit'.  This provides a mean for the
224        dynamic linker to call DT_FINI functions for shared libraries
225        that have been loaded before the code runs.
226 
227        A value of 0 tells we have no such handler.  */
228     regs->edx = 0;
229 }
230 
231 #define ELF_NREG    17
232 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
233 
234 /*
235  * Note that ELF_NREG should be 19 as there should be place for
236  * TRAPNO and ERR "registers" as well but linux doesn't dump
237  * those.
238  *
239  * See linux kernel: arch/x86/include/asm/elf.h
240  */
241 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env)
242 {
243     (*regs)[0] = env->regs[R_EBX];
244     (*regs)[1] = env->regs[R_ECX];
245     (*regs)[2] = env->regs[R_EDX];
246     (*regs)[3] = env->regs[R_ESI];
247     (*regs)[4] = env->regs[R_EDI];
248     (*regs)[5] = env->regs[R_EBP];
249     (*regs)[6] = env->regs[R_EAX];
250     (*regs)[7] = env->segs[R_DS].selector & 0xffff;
251     (*regs)[8] = env->segs[R_ES].selector & 0xffff;
252     (*regs)[9] = env->segs[R_FS].selector & 0xffff;
253     (*regs)[10] = env->segs[R_GS].selector & 0xffff;
254     (*regs)[11] = env->regs[R_EAX]; /* XXX */
255     (*regs)[12] = env->eip;
256     (*regs)[13] = env->segs[R_CS].selector & 0xffff;
257     (*regs)[14] = env->eflags;
258     (*regs)[15] = env->regs[R_ESP];
259     (*regs)[16] = env->segs[R_SS].selector & 0xffff;
260 }
261 #endif
262 
263 #define USE_ELF_CORE_DUMP
264 #define ELF_EXEC_PAGESIZE       4096
265 
266 #endif
267 
268 #ifdef TARGET_ARM
269 
270 #define ELF_START_MMAP 0x80000000
271 
272 #define elf_check_arch(x) ( (x) == EM_ARM )
273 
274 #define ELF_CLASS       ELFCLASS32
275 #define ELF_ARCH        EM_ARM
276 
277 static inline void init_thread(struct target_pt_regs *regs,
278                                struct image_info *infop)
279 {
280     abi_long stack = infop->start_stack;
281     memset(regs, 0, sizeof(*regs));
282     regs->ARM_cpsr = 0x10;
283     if (infop->entry & 1)
284         regs->ARM_cpsr |= CPSR_T;
285     regs->ARM_pc = infop->entry & 0xfffffffe;
286     regs->ARM_sp = infop->start_stack;
287     /* FIXME - what to for failure of get_user()? */
288     get_user_ual(regs->ARM_r2, stack + 8); /* envp */
289     get_user_ual(regs->ARM_r1, stack + 4); /* envp */
290     /* XXX: it seems that r0 is zeroed after ! */
291     regs->ARM_r0 = 0;
292     /* For uClinux PIC binaries.  */
293     /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
294     regs->ARM_r10 = infop->start_data;
295 }
296 
297 #define ELF_NREG    18
298 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
299 
300 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUARMState *env)
301 {
302     (*regs)[0] = tswapreg(env->regs[0]);
303     (*regs)[1] = tswapreg(env->regs[1]);
304     (*regs)[2] = tswapreg(env->regs[2]);
305     (*regs)[3] = tswapreg(env->regs[3]);
306     (*regs)[4] = tswapreg(env->regs[4]);
307     (*regs)[5] = tswapreg(env->regs[5]);
308     (*regs)[6] = tswapreg(env->regs[6]);
309     (*regs)[7] = tswapreg(env->regs[7]);
310     (*regs)[8] = tswapreg(env->regs[8]);
311     (*regs)[9] = tswapreg(env->regs[9]);
312     (*regs)[10] = tswapreg(env->regs[10]);
313     (*regs)[11] = tswapreg(env->regs[11]);
314     (*regs)[12] = tswapreg(env->regs[12]);
315     (*regs)[13] = tswapreg(env->regs[13]);
316     (*regs)[14] = tswapreg(env->regs[14]);
317     (*regs)[15] = tswapreg(env->regs[15]);
318 
319     (*regs)[16] = tswapreg(cpsr_read((CPUARMState *)env));
320     (*regs)[17] = tswapreg(env->regs[0]); /* XXX */
321 }
322 
323 #define USE_ELF_CORE_DUMP
324 #define ELF_EXEC_PAGESIZE       4096
325 
326 enum
327 {
328     ARM_HWCAP_ARM_SWP       = 1 << 0,
329     ARM_HWCAP_ARM_HALF      = 1 << 1,
330     ARM_HWCAP_ARM_THUMB     = 1 << 2,
331     ARM_HWCAP_ARM_26BIT     = 1 << 3,
332     ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
333     ARM_HWCAP_ARM_FPA       = 1 << 5,
334     ARM_HWCAP_ARM_VFP       = 1 << 6,
335     ARM_HWCAP_ARM_EDSP      = 1 << 7,
336     ARM_HWCAP_ARM_JAVA      = 1 << 8,
337     ARM_HWCAP_ARM_IWMMXT    = 1 << 9,
338     ARM_HWCAP_ARM_THUMBEE   = 1 << 10,
339     ARM_HWCAP_ARM_NEON      = 1 << 11,
340     ARM_HWCAP_ARM_VFPv3     = 1 << 12,
341     ARM_HWCAP_ARM_VFPv3D16  = 1 << 13,
342 };
343 
344 #define TARGET_HAS_VALIDATE_GUEST_SPACE
345 /* Return 1 if the proposed guest space is suitable for the guest.
346  * Return 0 if the proposed guest space isn't suitable, but another
347  * address space should be tried.
348  * Return -1 if there is no way the proposed guest space can be
349  * valid regardless of the base.
350  * The guest code may leave a page mapped and populate it if the
351  * address is suitable.
352  */
353 static int validate_guest_space(unsigned long guest_base,
354                                 unsigned long guest_size)
355 {
356     unsigned long real_start, test_page_addr;
357 
358     /* We need to check that we can force a fault on access to the
359      * commpage at 0xffff0fxx
360      */
361     test_page_addr = guest_base + (0xffff0f00 & qemu_host_page_mask);
362 
363     /* If the commpage lies within the already allocated guest space,
364      * then there is no way we can allocate it.
365      */
366     if (test_page_addr >= guest_base
367         && test_page_addr <= (guest_base + guest_size)) {
368         return -1;
369     }
370 
371     /* Note it needs to be writeable to let us initialise it */
372     real_start = (unsigned long)
373                  mmap((void *)test_page_addr, qemu_host_page_size,
374                      PROT_READ | PROT_WRITE,
375                      MAP_ANONYMOUS | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
376 
377     /* If we can't map it then try another address */
378     if (real_start == -1ul) {
379         return 0;
380     }
381 
382     if (real_start != test_page_addr) {
383         /* OS didn't put the page where we asked - unmap and reject */
384         munmap((void *)real_start, qemu_host_page_size);
385         return 0;
386     }
387 
388     /* Leave the page mapped
389      * Populate it (mmap should have left it all 0'd)
390      */
391 
392     /* Kernel helper versions */
393     __put_user(5, (uint32_t *)g2h(0xffff0ffcul));
394 
395     /* Now it's populated make it RO */
396     if (mprotect((void *)test_page_addr, qemu_host_page_size, PROT_READ)) {
397         perror("Protecting guest commpage");
398         exit(-1);
399     }
400 
401     return 1; /* All good */
402 }
403 
404 
405 #define ELF_HWCAP get_elf_hwcap()
406 
407 static uint32_t get_elf_hwcap(void)
408 {
409     ARMCPU *cpu = ARM_CPU(thread_cpu);
410     uint32_t hwcaps = 0;
411 
412     hwcaps |= ARM_HWCAP_ARM_SWP;
413     hwcaps |= ARM_HWCAP_ARM_HALF;
414     hwcaps |= ARM_HWCAP_ARM_THUMB;
415     hwcaps |= ARM_HWCAP_ARM_FAST_MULT;
416     hwcaps |= ARM_HWCAP_ARM_FPA;
417 
418     /* probe for the extra features */
419 #define GET_FEATURE(feat, hwcap) \
420     do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
421     GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP);
422     GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT);
423     GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE);
424     GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON);
425     GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3);
426     GET_FEATURE(ARM_FEATURE_VFP_FP16, ARM_HWCAP_ARM_VFPv3D16);
427 #undef GET_FEATURE
428 
429     return hwcaps;
430 }
431 
432 #endif
433 
434 #ifdef TARGET_UNICORE32
435 
436 #define ELF_START_MMAP          0x80000000
437 
438 #define elf_check_arch(x)       ((x) == EM_UNICORE32)
439 
440 #define ELF_CLASS               ELFCLASS32
441 #define ELF_DATA                ELFDATA2LSB
442 #define ELF_ARCH                EM_UNICORE32
443 
444 static inline void init_thread(struct target_pt_regs *regs,
445         struct image_info *infop)
446 {
447     abi_long stack = infop->start_stack;
448     memset(regs, 0, sizeof(*regs));
449     regs->UC32_REG_asr = 0x10;
450     regs->UC32_REG_pc = infop->entry & 0xfffffffe;
451     regs->UC32_REG_sp = infop->start_stack;
452     /* FIXME - what to for failure of get_user()? */
453     get_user_ual(regs->UC32_REG_02, stack + 8); /* envp */
454     get_user_ual(regs->UC32_REG_01, stack + 4); /* envp */
455     /* XXX: it seems that r0 is zeroed after ! */
456     regs->UC32_REG_00 = 0;
457 }
458 
459 #define ELF_NREG    34
460 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
461 
462 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUUniCore32State *env)
463 {
464     (*regs)[0] = env->regs[0];
465     (*regs)[1] = env->regs[1];
466     (*regs)[2] = env->regs[2];
467     (*regs)[3] = env->regs[3];
468     (*regs)[4] = env->regs[4];
469     (*regs)[5] = env->regs[5];
470     (*regs)[6] = env->regs[6];
471     (*regs)[7] = env->regs[7];
472     (*regs)[8] = env->regs[8];
473     (*regs)[9] = env->regs[9];
474     (*regs)[10] = env->regs[10];
475     (*regs)[11] = env->regs[11];
476     (*regs)[12] = env->regs[12];
477     (*regs)[13] = env->regs[13];
478     (*regs)[14] = env->regs[14];
479     (*regs)[15] = env->regs[15];
480     (*regs)[16] = env->regs[16];
481     (*regs)[17] = env->regs[17];
482     (*regs)[18] = env->regs[18];
483     (*regs)[19] = env->regs[19];
484     (*regs)[20] = env->regs[20];
485     (*regs)[21] = env->regs[21];
486     (*regs)[22] = env->regs[22];
487     (*regs)[23] = env->regs[23];
488     (*regs)[24] = env->regs[24];
489     (*regs)[25] = env->regs[25];
490     (*regs)[26] = env->regs[26];
491     (*regs)[27] = env->regs[27];
492     (*regs)[28] = env->regs[28];
493     (*regs)[29] = env->regs[29];
494     (*regs)[30] = env->regs[30];
495     (*regs)[31] = env->regs[31];
496 
497     (*regs)[32] = cpu_asr_read((CPUUniCore32State *)env);
498     (*regs)[33] = env->regs[0]; /* XXX */
499 }
500 
501 #define USE_ELF_CORE_DUMP
502 #define ELF_EXEC_PAGESIZE               4096
503 
504 #define ELF_HWCAP                       (UC32_HWCAP_CMOV | UC32_HWCAP_UCF64)
505 
506 #endif
507 
508 #ifdef TARGET_SPARC
509 #ifdef TARGET_SPARC64
510 
511 #define ELF_START_MMAP 0x80000000
512 #define ELF_HWCAP  (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
513                     | HWCAP_SPARC_MULDIV | HWCAP_SPARC_V9)
514 #ifndef TARGET_ABI32
515 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
516 #else
517 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
518 #endif
519 
520 #define ELF_CLASS   ELFCLASS64
521 #define ELF_ARCH    EM_SPARCV9
522 
523 #define STACK_BIAS              2047
524 
525 static inline void init_thread(struct target_pt_regs *regs,
526                                struct image_info *infop)
527 {
528 #ifndef TARGET_ABI32
529     regs->tstate = 0;
530 #endif
531     regs->pc = infop->entry;
532     regs->npc = regs->pc + 4;
533     regs->y = 0;
534 #ifdef TARGET_ABI32
535     regs->u_regs[14] = infop->start_stack - 16 * 4;
536 #else
537     if (personality(infop->personality) == PER_LINUX32)
538         regs->u_regs[14] = infop->start_stack - 16 * 4;
539     else
540         regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
541 #endif
542 }
543 
544 #else
545 #define ELF_START_MMAP 0x80000000
546 #define ELF_HWCAP  (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
547                     | HWCAP_SPARC_MULDIV)
548 #define elf_check_arch(x) ( (x) == EM_SPARC )
549 
550 #define ELF_CLASS   ELFCLASS32
551 #define ELF_ARCH    EM_SPARC
552 
553 static inline void init_thread(struct target_pt_regs *regs,
554                                struct image_info *infop)
555 {
556     regs->psr = 0;
557     regs->pc = infop->entry;
558     regs->npc = regs->pc + 4;
559     regs->y = 0;
560     regs->u_regs[14] = infop->start_stack - 16 * 4;
561 }
562 
563 #endif
564 #endif
565 
566 #ifdef TARGET_PPC
567 
568 #define ELF_START_MMAP 0x80000000
569 
570 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
571 
572 #define elf_check_arch(x) ( (x) == EM_PPC64 )
573 
574 #define ELF_CLASS       ELFCLASS64
575 
576 #else
577 
578 #define elf_check_arch(x) ( (x) == EM_PPC )
579 
580 #define ELF_CLASS       ELFCLASS32
581 
582 #endif
583 
584 #define ELF_ARCH        EM_PPC
585 
586 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
587    See arch/powerpc/include/asm/cputable.h.  */
588 enum {
589     QEMU_PPC_FEATURE_32 = 0x80000000,
590     QEMU_PPC_FEATURE_64 = 0x40000000,
591     QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
592     QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
593     QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
594     QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
595     QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
596     QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
597     QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
598     QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
599     QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
600     QEMU_PPC_FEATURE_NO_TB = 0x00100000,
601     QEMU_PPC_FEATURE_POWER4 = 0x00080000,
602     QEMU_PPC_FEATURE_POWER5 = 0x00040000,
603     QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
604     QEMU_PPC_FEATURE_CELL = 0x00010000,
605     QEMU_PPC_FEATURE_BOOKE = 0x00008000,
606     QEMU_PPC_FEATURE_SMT = 0x00004000,
607     QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
608     QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
609     QEMU_PPC_FEATURE_PA6T = 0x00000800,
610     QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
611     QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
612     QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
613     QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
614     QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
615 
616     QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
617     QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
618 };
619 
620 #define ELF_HWCAP get_elf_hwcap()
621 
622 static uint32_t get_elf_hwcap(void)
623 {
624     PowerPCCPU *cpu = POWERPC_CPU(thread_cpu);
625     uint32_t features = 0;
626 
627     /* We don't have to be terribly complete here; the high points are
628        Altivec/FP/SPE support.  Anything else is just a bonus.  */
629 #define GET_FEATURE(flag, feature)                                      \
630     do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0)
631     GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
632     GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
633     GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
634     GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
635     GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
636     GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
637     GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
638     GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
639 #undef GET_FEATURE
640 
641     return features;
642 }
643 
644 /*
645  * The requirements here are:
646  * - keep the final alignment of sp (sp & 0xf)
647  * - make sure the 32-bit value at the first 16 byte aligned position of
648  *   AUXV is greater than 16 for glibc compatibility.
649  *   AT_IGNOREPPC is used for that.
650  * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
651  *   even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
652  */
653 #define DLINFO_ARCH_ITEMS       5
654 #define ARCH_DLINFO                                     \
655     do {                                                \
656         NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20);              \
657         NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20);              \
658         NEW_AUX_ENT(AT_UCACHEBSIZE, 0);                 \
659         /*                                              \
660          * Now handle glibc compatibility.              \
661          */                                             \
662         NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);        \
663         NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);        \
664     } while (0)
665 
666 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
667 {
668     _regs->gpr[1] = infop->start_stack;
669 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
670     _regs->gpr[2] = ldq_raw(infop->entry + 8) + infop->load_bias;
671     infop->entry = ldq_raw(infop->entry) + infop->load_bias;
672 #endif
673     _regs->nip = infop->entry;
674 }
675 
676 /* See linux kernel: arch/powerpc/include/asm/elf.h.  */
677 #define ELF_NREG 48
678 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
679 
680 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *env)
681 {
682     int i;
683     target_ulong ccr = 0;
684 
685     for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
686         (*regs)[i] = tswapreg(env->gpr[i]);
687     }
688 
689     (*regs)[32] = tswapreg(env->nip);
690     (*regs)[33] = tswapreg(env->msr);
691     (*regs)[35] = tswapreg(env->ctr);
692     (*regs)[36] = tswapreg(env->lr);
693     (*regs)[37] = tswapreg(env->xer);
694 
695     for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
696         ccr |= env->crf[i] << (32 - ((i + 1) * 4));
697     }
698     (*regs)[38] = tswapreg(ccr);
699 }
700 
701 #define USE_ELF_CORE_DUMP
702 #define ELF_EXEC_PAGESIZE       4096
703 
704 #endif
705 
706 #ifdef TARGET_MIPS
707 
708 #define ELF_START_MMAP 0x80000000
709 
710 #define elf_check_arch(x) ( (x) == EM_MIPS )
711 
712 #ifdef TARGET_MIPS64
713 #define ELF_CLASS   ELFCLASS64
714 #else
715 #define ELF_CLASS   ELFCLASS32
716 #endif
717 #define ELF_ARCH    EM_MIPS
718 
719 static inline void init_thread(struct target_pt_regs *regs,
720                                struct image_info *infop)
721 {
722     regs->cp0_status = 2 << CP0St_KSU;
723     regs->cp0_epc = infop->entry;
724     regs->regs[29] = infop->start_stack;
725 }
726 
727 /* See linux kernel: arch/mips/include/asm/elf.h.  */
728 #define ELF_NREG 45
729 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
730 
731 /* See linux kernel: arch/mips/include/asm/reg.h.  */
732 enum {
733 #ifdef TARGET_MIPS64
734     TARGET_EF_R0 = 0,
735 #else
736     TARGET_EF_R0 = 6,
737 #endif
738     TARGET_EF_R26 = TARGET_EF_R0 + 26,
739     TARGET_EF_R27 = TARGET_EF_R0 + 27,
740     TARGET_EF_LO = TARGET_EF_R0 + 32,
741     TARGET_EF_HI = TARGET_EF_R0 + 33,
742     TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34,
743     TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35,
744     TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36,
745     TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37
746 };
747 
748 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
749 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMIPSState *env)
750 {
751     int i;
752 
753     for (i = 0; i < TARGET_EF_R0; i++) {
754         (*regs)[i] = 0;
755     }
756     (*regs)[TARGET_EF_R0] = 0;
757 
758     for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) {
759         (*regs)[TARGET_EF_R0 + i] = tswapreg(env->active_tc.gpr[i]);
760     }
761 
762     (*regs)[TARGET_EF_R26] = 0;
763     (*regs)[TARGET_EF_R27] = 0;
764     (*regs)[TARGET_EF_LO] = tswapreg(env->active_tc.LO[0]);
765     (*regs)[TARGET_EF_HI] = tswapreg(env->active_tc.HI[0]);
766     (*regs)[TARGET_EF_CP0_EPC] = tswapreg(env->active_tc.PC);
767     (*regs)[TARGET_EF_CP0_BADVADDR] = tswapreg(env->CP0_BadVAddr);
768     (*regs)[TARGET_EF_CP0_STATUS] = tswapreg(env->CP0_Status);
769     (*regs)[TARGET_EF_CP0_CAUSE] = tswapreg(env->CP0_Cause);
770 }
771 
772 #define USE_ELF_CORE_DUMP
773 #define ELF_EXEC_PAGESIZE        4096
774 
775 #endif /* TARGET_MIPS */
776 
777 #ifdef TARGET_MICROBLAZE
778 
779 #define ELF_START_MMAP 0x80000000
780 
781 #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD)
782 
783 #define ELF_CLASS   ELFCLASS32
784 #define ELF_ARCH    EM_MICROBLAZE
785 
786 static inline void init_thread(struct target_pt_regs *regs,
787                                struct image_info *infop)
788 {
789     regs->pc = infop->entry;
790     regs->r1 = infop->start_stack;
791 
792 }
793 
794 #define ELF_EXEC_PAGESIZE        4096
795 
796 #define USE_ELF_CORE_DUMP
797 #define ELF_NREG 38
798 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
799 
800 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
801 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env)
802 {
803     int i, pos = 0;
804 
805     for (i = 0; i < 32; i++) {
806         (*regs)[pos++] = tswapreg(env->regs[i]);
807     }
808 
809     for (i = 0; i < 6; i++) {
810         (*regs)[pos++] = tswapreg(env->sregs[i]);
811     }
812 }
813 
814 #endif /* TARGET_MICROBLAZE */
815 
816 #ifdef TARGET_OPENRISC
817 
818 #define ELF_START_MMAP 0x08000000
819 
820 #define elf_check_arch(x) ((x) == EM_OPENRISC)
821 
822 #define ELF_ARCH EM_OPENRISC
823 #define ELF_CLASS ELFCLASS32
824 #define ELF_DATA  ELFDATA2MSB
825 
826 static inline void init_thread(struct target_pt_regs *regs,
827                                struct image_info *infop)
828 {
829     regs->pc = infop->entry;
830     regs->gpr[1] = infop->start_stack;
831 }
832 
833 #define USE_ELF_CORE_DUMP
834 #define ELF_EXEC_PAGESIZE 8192
835 
836 /* See linux kernel arch/openrisc/include/asm/elf.h.  */
837 #define ELF_NREG 34 /* gprs and pc, sr */
838 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
839 
840 static void elf_core_copy_regs(target_elf_gregset_t *regs,
841                                const CPUOpenRISCState *env)
842 {
843     int i;
844 
845     for (i = 0; i < 32; i++) {
846         (*regs)[i] = tswapreg(env->gpr[i]);
847     }
848 
849     (*regs)[32] = tswapreg(env->pc);
850     (*regs)[33] = tswapreg(env->sr);
851 }
852 #define ELF_HWCAP 0
853 #define ELF_PLATFORM NULL
854 
855 #endif /* TARGET_OPENRISC */
856 
857 #ifdef TARGET_SH4
858 
859 #define ELF_START_MMAP 0x80000000
860 
861 #define elf_check_arch(x) ( (x) == EM_SH )
862 
863 #define ELF_CLASS ELFCLASS32
864 #define ELF_ARCH  EM_SH
865 
866 static inline void init_thread(struct target_pt_regs *regs,
867                                struct image_info *infop)
868 {
869     /* Check other registers XXXXX */
870     regs->pc = infop->entry;
871     regs->regs[15] = infop->start_stack;
872 }
873 
874 /* See linux kernel: arch/sh/include/asm/elf.h.  */
875 #define ELF_NREG 23
876 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
877 
878 /* See linux kernel: arch/sh/include/asm/ptrace.h.  */
879 enum {
880     TARGET_REG_PC = 16,
881     TARGET_REG_PR = 17,
882     TARGET_REG_SR = 18,
883     TARGET_REG_GBR = 19,
884     TARGET_REG_MACH = 20,
885     TARGET_REG_MACL = 21,
886     TARGET_REG_SYSCALL = 22
887 };
888 
889 static inline void elf_core_copy_regs(target_elf_gregset_t *regs,
890                                       const CPUSH4State *env)
891 {
892     int i;
893 
894     for (i = 0; i < 16; i++) {
895         (*regs[i]) = tswapreg(env->gregs[i]);
896     }
897 
898     (*regs)[TARGET_REG_PC] = tswapreg(env->pc);
899     (*regs)[TARGET_REG_PR] = tswapreg(env->pr);
900     (*regs)[TARGET_REG_SR] = tswapreg(env->sr);
901     (*regs)[TARGET_REG_GBR] = tswapreg(env->gbr);
902     (*regs)[TARGET_REG_MACH] = tswapreg(env->mach);
903     (*regs)[TARGET_REG_MACL] = tswapreg(env->macl);
904     (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */
905 }
906 
907 #define USE_ELF_CORE_DUMP
908 #define ELF_EXEC_PAGESIZE        4096
909 
910 #endif
911 
912 #ifdef TARGET_CRIS
913 
914 #define ELF_START_MMAP 0x80000000
915 
916 #define elf_check_arch(x) ( (x) == EM_CRIS )
917 
918 #define ELF_CLASS ELFCLASS32
919 #define ELF_ARCH  EM_CRIS
920 
921 static inline void init_thread(struct target_pt_regs *regs,
922                                struct image_info *infop)
923 {
924     regs->erp = infop->entry;
925 }
926 
927 #define ELF_EXEC_PAGESIZE        8192
928 
929 #endif
930 
931 #ifdef TARGET_M68K
932 
933 #define ELF_START_MMAP 0x80000000
934 
935 #define elf_check_arch(x) ( (x) == EM_68K )
936 
937 #define ELF_CLASS       ELFCLASS32
938 #define ELF_ARCH        EM_68K
939 
940 /* ??? Does this need to do anything?
941    #define ELF_PLAT_INIT(_r) */
942 
943 static inline void init_thread(struct target_pt_regs *regs,
944                                struct image_info *infop)
945 {
946     regs->usp = infop->start_stack;
947     regs->sr = 0;
948     regs->pc = infop->entry;
949 }
950 
951 /* See linux kernel: arch/m68k/include/asm/elf.h.  */
952 #define ELF_NREG 20
953 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
954 
955 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUM68KState *env)
956 {
957     (*regs)[0] = tswapreg(env->dregs[1]);
958     (*regs)[1] = tswapreg(env->dregs[2]);
959     (*regs)[2] = tswapreg(env->dregs[3]);
960     (*regs)[3] = tswapreg(env->dregs[4]);
961     (*regs)[4] = tswapreg(env->dregs[5]);
962     (*regs)[5] = tswapreg(env->dregs[6]);
963     (*regs)[6] = tswapreg(env->dregs[7]);
964     (*regs)[7] = tswapreg(env->aregs[0]);
965     (*regs)[8] = tswapreg(env->aregs[1]);
966     (*regs)[9] = tswapreg(env->aregs[2]);
967     (*regs)[10] = tswapreg(env->aregs[3]);
968     (*regs)[11] = tswapreg(env->aregs[4]);
969     (*regs)[12] = tswapreg(env->aregs[5]);
970     (*regs)[13] = tswapreg(env->aregs[6]);
971     (*regs)[14] = tswapreg(env->dregs[0]);
972     (*regs)[15] = tswapreg(env->aregs[7]);
973     (*regs)[16] = tswapreg(env->dregs[0]); /* FIXME: orig_d0 */
974     (*regs)[17] = tswapreg(env->sr);
975     (*regs)[18] = tswapreg(env->pc);
976     (*regs)[19] = 0;  /* FIXME: regs->format | regs->vector */
977 }
978 
979 #define USE_ELF_CORE_DUMP
980 #define ELF_EXEC_PAGESIZE       8192
981 
982 #endif
983 
984 #ifdef TARGET_ALPHA
985 
986 #define ELF_START_MMAP (0x30000000000ULL)
987 
988 #define elf_check_arch(x) ( (x) == ELF_ARCH )
989 
990 #define ELF_CLASS      ELFCLASS64
991 #define ELF_ARCH       EM_ALPHA
992 
993 static inline void init_thread(struct target_pt_regs *regs,
994                                struct image_info *infop)
995 {
996     regs->pc = infop->entry;
997     regs->ps = 8;
998     regs->usp = infop->start_stack;
999 }
1000 
1001 #define ELF_EXEC_PAGESIZE        8192
1002 
1003 #endif /* TARGET_ALPHA */
1004 
1005 #ifdef TARGET_S390X
1006 
1007 #define ELF_START_MMAP (0x20000000000ULL)
1008 
1009 #define elf_check_arch(x) ( (x) == ELF_ARCH )
1010 
1011 #define ELF_CLASS	ELFCLASS64
1012 #define ELF_DATA	ELFDATA2MSB
1013 #define ELF_ARCH	EM_S390
1014 
1015 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
1016 {
1017     regs->psw.addr = infop->entry;
1018     regs->psw.mask = PSW_MASK_64 | PSW_MASK_32;
1019     regs->gprs[15] = infop->start_stack;
1020 }
1021 
1022 #endif /* TARGET_S390X */
1023 
1024 #ifndef ELF_PLATFORM
1025 #define ELF_PLATFORM (NULL)
1026 #endif
1027 
1028 #ifndef ELF_HWCAP
1029 #define ELF_HWCAP 0
1030 #endif
1031 
1032 #ifdef TARGET_ABI32
1033 #undef ELF_CLASS
1034 #define ELF_CLASS ELFCLASS32
1035 #undef bswaptls
1036 #define bswaptls(ptr) bswap32s(ptr)
1037 #endif
1038 
1039 #include "elf.h"
1040 
1041 struct exec
1042 {
1043     unsigned int a_info;   /* Use macros N_MAGIC, etc for access */
1044     unsigned int a_text;   /* length of text, in bytes */
1045     unsigned int a_data;   /* length of data, in bytes */
1046     unsigned int a_bss;    /* length of uninitialized data area, in bytes */
1047     unsigned int a_syms;   /* length of symbol table data in file, in bytes */
1048     unsigned int a_entry;  /* start address */
1049     unsigned int a_trsize; /* length of relocation info for text, in bytes */
1050     unsigned int a_drsize; /* length of relocation info for data, in bytes */
1051 };
1052 
1053 
1054 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
1055 #define OMAGIC 0407
1056 #define NMAGIC 0410
1057 #define ZMAGIC 0413
1058 #define QMAGIC 0314
1059 
1060 /* Necessary parameters */
1061 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
1062 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
1063 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
1064 
1065 #define DLINFO_ITEMS 13
1066 
1067 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
1068 {
1069     memcpy(to, from, n);
1070 }
1071 
1072 #ifdef BSWAP_NEEDED
1073 static void bswap_ehdr(struct elfhdr *ehdr)
1074 {
1075     bswap16s(&ehdr->e_type);            /* Object file type */
1076     bswap16s(&ehdr->e_machine);         /* Architecture */
1077     bswap32s(&ehdr->e_version);         /* Object file version */
1078     bswaptls(&ehdr->e_entry);           /* Entry point virtual address */
1079     bswaptls(&ehdr->e_phoff);           /* Program header table file offset */
1080     bswaptls(&ehdr->e_shoff);           /* Section header table file offset */
1081     bswap32s(&ehdr->e_flags);           /* Processor-specific flags */
1082     bswap16s(&ehdr->e_ehsize);          /* ELF header size in bytes */
1083     bswap16s(&ehdr->e_phentsize);       /* Program header table entry size */
1084     bswap16s(&ehdr->e_phnum);           /* Program header table entry count */
1085     bswap16s(&ehdr->e_shentsize);       /* Section header table entry size */
1086     bswap16s(&ehdr->e_shnum);           /* Section header table entry count */
1087     bswap16s(&ehdr->e_shstrndx);        /* Section header string table index */
1088 }
1089 
1090 static void bswap_phdr(struct elf_phdr *phdr, int phnum)
1091 {
1092     int i;
1093     for (i = 0; i < phnum; ++i, ++phdr) {
1094         bswap32s(&phdr->p_type);        /* Segment type */
1095         bswap32s(&phdr->p_flags);       /* Segment flags */
1096         bswaptls(&phdr->p_offset);      /* Segment file offset */
1097         bswaptls(&phdr->p_vaddr);       /* Segment virtual address */
1098         bswaptls(&phdr->p_paddr);       /* Segment physical address */
1099         bswaptls(&phdr->p_filesz);      /* Segment size in file */
1100         bswaptls(&phdr->p_memsz);       /* Segment size in memory */
1101         bswaptls(&phdr->p_align);       /* Segment alignment */
1102     }
1103 }
1104 
1105 static void bswap_shdr(struct elf_shdr *shdr, int shnum)
1106 {
1107     int i;
1108     for (i = 0; i < shnum; ++i, ++shdr) {
1109         bswap32s(&shdr->sh_name);
1110         bswap32s(&shdr->sh_type);
1111         bswaptls(&shdr->sh_flags);
1112         bswaptls(&shdr->sh_addr);
1113         bswaptls(&shdr->sh_offset);
1114         bswaptls(&shdr->sh_size);
1115         bswap32s(&shdr->sh_link);
1116         bswap32s(&shdr->sh_info);
1117         bswaptls(&shdr->sh_addralign);
1118         bswaptls(&shdr->sh_entsize);
1119     }
1120 }
1121 
1122 static void bswap_sym(struct elf_sym *sym)
1123 {
1124     bswap32s(&sym->st_name);
1125     bswaptls(&sym->st_value);
1126     bswaptls(&sym->st_size);
1127     bswap16s(&sym->st_shndx);
1128 }
1129 #else
1130 static inline void bswap_ehdr(struct elfhdr *ehdr) { }
1131 static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { }
1132 static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { }
1133 static inline void bswap_sym(struct elf_sym *sym) { }
1134 #endif
1135 
1136 #ifdef USE_ELF_CORE_DUMP
1137 static int elf_core_dump(int, const CPUArchState *);
1138 #endif /* USE_ELF_CORE_DUMP */
1139 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias);
1140 
1141 /* Verify the portions of EHDR within E_IDENT for the target.
1142    This can be performed before bswapping the entire header.  */
1143 static bool elf_check_ident(struct elfhdr *ehdr)
1144 {
1145     return (ehdr->e_ident[EI_MAG0] == ELFMAG0
1146             && ehdr->e_ident[EI_MAG1] == ELFMAG1
1147             && ehdr->e_ident[EI_MAG2] == ELFMAG2
1148             && ehdr->e_ident[EI_MAG3] == ELFMAG3
1149             && ehdr->e_ident[EI_CLASS] == ELF_CLASS
1150             && ehdr->e_ident[EI_DATA] == ELF_DATA
1151             && ehdr->e_ident[EI_VERSION] == EV_CURRENT);
1152 }
1153 
1154 /* Verify the portions of EHDR outside of E_IDENT for the target.
1155    This has to wait until after bswapping the header.  */
1156 static bool elf_check_ehdr(struct elfhdr *ehdr)
1157 {
1158     return (elf_check_arch(ehdr->e_machine)
1159             && ehdr->e_ehsize == sizeof(struct elfhdr)
1160             && ehdr->e_phentsize == sizeof(struct elf_phdr)
1161             && ehdr->e_shentsize == sizeof(struct elf_shdr)
1162             && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN));
1163 }
1164 
1165 /*
1166  * 'copy_elf_strings()' copies argument/envelope strings from user
1167  * memory to free pages in kernel mem. These are in a format ready
1168  * to be put directly into the top of new user memory.
1169  *
1170  */
1171 static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
1172                                   abi_ulong p)
1173 {
1174     char *tmp, *tmp1, *pag = NULL;
1175     int len, offset = 0;
1176 
1177     if (!p) {
1178         return 0;       /* bullet-proofing */
1179     }
1180     while (argc-- > 0) {
1181         tmp = argv[argc];
1182         if (!tmp) {
1183             fprintf(stderr, "VFS: argc is wrong");
1184             exit(-1);
1185         }
1186         tmp1 = tmp;
1187         while (*tmp++);
1188         len = tmp - tmp1;
1189         if (p < len) {  /* this shouldn't happen - 128kB */
1190             return 0;
1191         }
1192         while (len) {
1193             --p; --tmp; --len;
1194             if (--offset < 0) {
1195                 offset = p % TARGET_PAGE_SIZE;
1196                 pag = (char *)page[p/TARGET_PAGE_SIZE];
1197                 if (!pag) {
1198                     pag = g_try_malloc0(TARGET_PAGE_SIZE);
1199                     page[p/TARGET_PAGE_SIZE] = pag;
1200                     if (!pag)
1201                         return 0;
1202                 }
1203             }
1204             if (len == 0 || offset == 0) {
1205                 *(pag + offset) = *tmp;
1206             }
1207             else {
1208                 int bytes_to_copy = (len > offset) ? offset : len;
1209                 tmp -= bytes_to_copy;
1210                 p -= bytes_to_copy;
1211                 offset -= bytes_to_copy;
1212                 len -= bytes_to_copy;
1213                 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
1214             }
1215         }
1216     }
1217     return p;
1218 }
1219 
1220 static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
1221                                  struct image_info *info)
1222 {
1223     abi_ulong stack_base, size, error, guard;
1224     int i;
1225 
1226     /* Create enough stack to hold everything.  If we don't use
1227        it for args, we'll use it for something else.  */
1228     size = guest_stack_size;
1229     if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE) {
1230         size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1231     }
1232     guard = TARGET_PAGE_SIZE;
1233     if (guard < qemu_real_host_page_size) {
1234         guard = qemu_real_host_page_size;
1235     }
1236 
1237     error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE,
1238                         MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1239     if (error == -1) {
1240         perror("mmap stack");
1241         exit(-1);
1242     }
1243 
1244     /* We reserve one extra page at the top of the stack as guard.  */
1245     target_mprotect(error, guard, PROT_NONE);
1246 
1247     info->stack_limit = error + guard;
1248     stack_base = info->stack_limit + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1249     p += stack_base;
1250 
1251     for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
1252         if (bprm->page[i]) {
1253             info->rss++;
1254             /* FIXME - check return value of memcpy_to_target() for failure */
1255             memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
1256             g_free(bprm->page[i]);
1257         }
1258         stack_base += TARGET_PAGE_SIZE;
1259     }
1260     return p;
1261 }
1262 
1263 /* Map and zero the bss.  We need to explicitly zero any fractional pages
1264    after the data section (i.e. bss).  */
1265 static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
1266 {
1267     uintptr_t host_start, host_map_start, host_end;
1268 
1269     last_bss = TARGET_PAGE_ALIGN(last_bss);
1270 
1271     /* ??? There is confusion between qemu_real_host_page_size and
1272        qemu_host_page_size here and elsewhere in target_mmap, which
1273        may lead to the end of the data section mapping from the file
1274        not being mapped.  At least there was an explicit test and
1275        comment for that here, suggesting that "the file size must
1276        be known".  The comment probably pre-dates the introduction
1277        of the fstat system call in target_mmap which does in fact
1278        find out the size.  What isn't clear is if the workaround
1279        here is still actually needed.  For now, continue with it,
1280        but merge it with the "normal" mmap that would allocate the bss.  */
1281 
1282     host_start = (uintptr_t) g2h(elf_bss);
1283     host_end = (uintptr_t) g2h(last_bss);
1284     host_map_start = (host_start + qemu_real_host_page_size - 1);
1285     host_map_start &= -qemu_real_host_page_size;
1286 
1287     if (host_map_start < host_end) {
1288         void *p = mmap((void *)host_map_start, host_end - host_map_start,
1289                        prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1290         if (p == MAP_FAILED) {
1291             perror("cannot mmap brk");
1292             exit(-1);
1293         }
1294 
1295         /* Since we didn't use target_mmap, make sure to record
1296            the validity of the pages with qemu.  */
1297         page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot|PAGE_VALID);
1298     }
1299 
1300     if (host_start < host_map_start) {
1301         memset((void *)host_start, 0, host_map_start - host_start);
1302     }
1303 }
1304 
1305 #ifdef CONFIG_USE_FDPIC
1306 static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp)
1307 {
1308     uint16_t n;
1309     struct elf32_fdpic_loadseg *loadsegs = info->loadsegs;
1310 
1311     /* elf32_fdpic_loadseg */
1312     n = info->nsegs;
1313     while (n--) {
1314         sp -= 12;
1315         put_user_u32(loadsegs[n].addr, sp+0);
1316         put_user_u32(loadsegs[n].p_vaddr, sp+4);
1317         put_user_u32(loadsegs[n].p_memsz, sp+8);
1318     }
1319 
1320     /* elf32_fdpic_loadmap */
1321     sp -= 4;
1322     put_user_u16(0, sp+0); /* version */
1323     put_user_u16(info->nsegs, sp+2); /* nsegs */
1324 
1325     info->personality = PER_LINUX_FDPIC;
1326     info->loadmap_addr = sp;
1327 
1328     return sp;
1329 }
1330 #endif
1331 
1332 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
1333                                    struct elfhdr *exec,
1334                                    struct image_info *info,
1335                                    struct image_info *interp_info)
1336 {
1337     abi_ulong sp;
1338     abi_ulong sp_auxv;
1339     int size;
1340     int i;
1341     abi_ulong u_rand_bytes;
1342     uint8_t k_rand_bytes[16];
1343     abi_ulong u_platform;
1344     const char *k_platform;
1345     const int n = sizeof(elf_addr_t);
1346 
1347     sp = p;
1348 
1349 #ifdef CONFIG_USE_FDPIC
1350     /* Needs to be before we load the env/argc/... */
1351     if (elf_is_fdpic(exec)) {
1352         /* Need 4 byte alignment for these structs */
1353         sp &= ~3;
1354         sp = loader_build_fdpic_loadmap(info, sp);
1355         info->other_info = interp_info;
1356         if (interp_info) {
1357             interp_info->other_info = info;
1358             sp = loader_build_fdpic_loadmap(interp_info, sp);
1359         }
1360     }
1361 #endif
1362 
1363     u_platform = 0;
1364     k_platform = ELF_PLATFORM;
1365     if (k_platform) {
1366         size_t len = strlen(k_platform) + 1;
1367         sp -= (len + n - 1) & ~(n - 1);
1368         u_platform = sp;
1369         /* FIXME - check return value of memcpy_to_target() for failure */
1370         memcpy_to_target(sp, k_platform, len);
1371     }
1372 
1373     /*
1374      * Generate 16 random bytes for userspace PRNG seeding (not
1375      * cryptically secure but it's not the aim of QEMU).
1376      */
1377     srand((unsigned int) time(NULL));
1378     for (i = 0; i < 16; i++) {
1379         k_rand_bytes[i] = rand();
1380     }
1381     sp -= 16;
1382     u_rand_bytes = sp;
1383     /* FIXME - check return value of memcpy_to_target() for failure */
1384     memcpy_to_target(sp, k_rand_bytes, 16);
1385 
1386     /*
1387      * Force 16 byte _final_ alignment here for generality.
1388      */
1389     sp = sp &~ (abi_ulong)15;
1390     size = (DLINFO_ITEMS + 1) * 2;
1391     if (k_platform)
1392         size += 2;
1393 #ifdef DLINFO_ARCH_ITEMS
1394     size += DLINFO_ARCH_ITEMS * 2;
1395 #endif
1396     size += envc + argc + 2;
1397     size += 1;  /* argc itself */
1398     size *= n;
1399     if (size & 15)
1400         sp -= 16 - (size & 15);
1401 
1402     /* This is correct because Linux defines
1403      * elf_addr_t as Elf32_Off / Elf64_Off
1404      */
1405 #define NEW_AUX_ENT(id, val) do {               \
1406         sp -= n; put_user_ual(val, sp);         \
1407         sp -= n; put_user_ual(id, sp);          \
1408     } while(0)
1409 
1410     sp_auxv = sp;
1411     NEW_AUX_ENT (AT_NULL, 0);
1412 
1413     /* There must be exactly DLINFO_ITEMS entries here.  */
1414     NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff));
1415     NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
1416     NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
1417     NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
1418     NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0));
1419     NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
1420     NEW_AUX_ENT(AT_ENTRY, info->entry);
1421     NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
1422     NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
1423     NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
1424     NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
1425     NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
1426     NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
1427     NEW_AUX_ENT(AT_RANDOM, (abi_ulong) u_rand_bytes);
1428 
1429     if (k_platform)
1430         NEW_AUX_ENT(AT_PLATFORM, u_platform);
1431 #ifdef ARCH_DLINFO
1432     /*
1433      * ARCH_DLINFO must come last so platform specific code can enforce
1434      * special alignment requirements on the AUXV if necessary (eg. PPC).
1435      */
1436     ARCH_DLINFO;
1437 #endif
1438 #undef NEW_AUX_ENT
1439 
1440     info->saved_auxv = sp;
1441     info->auxv_len = sp_auxv - sp;
1442 
1443     sp = loader_build_argptr(envc, argc, sp, p, 0);
1444     return sp;
1445 }
1446 
1447 #ifndef TARGET_HAS_VALIDATE_GUEST_SPACE
1448 /* If the guest doesn't have a validation function just agree */
1449 static int validate_guest_space(unsigned long guest_base,
1450                                 unsigned long guest_size)
1451 {
1452     return 1;
1453 }
1454 #endif
1455 
1456 unsigned long init_guest_space(unsigned long host_start,
1457                                unsigned long host_size,
1458                                unsigned long guest_start,
1459                                bool fixed)
1460 {
1461     unsigned long current_start, real_start;
1462     int flags;
1463 
1464     assert(host_start || host_size);
1465 
1466     /* If just a starting address is given, then just verify that
1467      * address.  */
1468     if (host_start && !host_size) {
1469         if (validate_guest_space(host_start, host_size) == 1) {
1470             return host_start;
1471         } else {
1472             return (unsigned long)-1;
1473         }
1474     }
1475 
1476     /* Setup the initial flags and start address.  */
1477     current_start = host_start & qemu_host_page_mask;
1478     flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
1479     if (fixed) {
1480         flags |= MAP_FIXED;
1481     }
1482 
1483     /* Otherwise, a non-zero size region of memory needs to be mapped
1484      * and validated.  */
1485     while (1) {
1486         unsigned long real_size = host_size;
1487 
1488         /* Do not use mmap_find_vma here because that is limited to the
1489          * guest address space.  We are going to make the
1490          * guest address space fit whatever we're given.
1491          */
1492         real_start = (unsigned long)
1493             mmap((void *)current_start, host_size, PROT_NONE, flags, -1, 0);
1494         if (real_start == (unsigned long)-1) {
1495             return (unsigned long)-1;
1496         }
1497 
1498         /* Ensure the address is properly aligned.  */
1499         if (real_start & ~qemu_host_page_mask) {
1500             munmap((void *)real_start, host_size);
1501             real_size = host_size + qemu_host_page_size;
1502             real_start = (unsigned long)
1503                 mmap((void *)real_start, real_size, PROT_NONE, flags, -1, 0);
1504             if (real_start == (unsigned long)-1) {
1505                 return (unsigned long)-1;
1506             }
1507             real_start = HOST_PAGE_ALIGN(real_start);
1508         }
1509 
1510         /* Check to see if the address is valid.  */
1511         if (!host_start || real_start == current_start) {
1512             int valid = validate_guest_space(real_start - guest_start,
1513                                              real_size);
1514             if (valid == 1) {
1515                 break;
1516             } else if (valid == -1) {
1517                 return (unsigned long)-1;
1518             }
1519             /* valid == 0, so try again. */
1520         }
1521 
1522         /* That address didn't work.  Unmap and try a different one.
1523          * The address the host picked because is typically right at
1524          * the top of the host address space and leaves the guest with
1525          * no usable address space.  Resort to a linear search.  We
1526          * already compensated for mmap_min_addr, so this should not
1527          * happen often.  Probably means we got unlucky and host
1528          * address space randomization put a shared library somewhere
1529          * inconvenient.
1530          */
1531         munmap((void *)real_start, host_size);
1532         current_start += qemu_host_page_size;
1533         if (host_start == current_start) {
1534             /* Theoretically possible if host doesn't have any suitably
1535              * aligned areas.  Normally the first mmap will fail.
1536              */
1537             return (unsigned long)-1;
1538         }
1539     }
1540 
1541     qemu_log("Reserved 0x%lx bytes of guest address space\n", host_size);
1542 
1543     return real_start;
1544 }
1545 
1546 static void probe_guest_base(const char *image_name,
1547                              abi_ulong loaddr, abi_ulong hiaddr)
1548 {
1549     /* Probe for a suitable guest base address, if the user has not set
1550      * it explicitly, and set guest_base appropriately.
1551      * In case of error we will print a suitable message and exit.
1552      */
1553 #if defined(CONFIG_USE_GUEST_BASE)
1554     const char *errmsg;
1555     if (!have_guest_base && !reserved_va) {
1556         unsigned long host_start, real_start, host_size;
1557 
1558         /* Round addresses to page boundaries.  */
1559         loaddr &= qemu_host_page_mask;
1560         hiaddr = HOST_PAGE_ALIGN(hiaddr);
1561 
1562         if (loaddr < mmap_min_addr) {
1563             host_start = HOST_PAGE_ALIGN(mmap_min_addr);
1564         } else {
1565             host_start = loaddr;
1566             if (host_start != loaddr) {
1567                 errmsg = "Address overflow loading ELF binary";
1568                 goto exit_errmsg;
1569             }
1570         }
1571         host_size = hiaddr - loaddr;
1572 
1573         /* Setup the initial guest memory space with ranges gleaned from
1574          * the ELF image that is being loaded.
1575          */
1576         real_start = init_guest_space(host_start, host_size, loaddr, false);
1577         if (real_start == (unsigned long)-1) {
1578             errmsg = "Unable to find space for application";
1579             goto exit_errmsg;
1580         }
1581         guest_base = real_start - loaddr;
1582 
1583         qemu_log("Relocating guest address space from 0x"
1584                  TARGET_ABI_FMT_lx " to 0x%lx\n",
1585                  loaddr, real_start);
1586     }
1587     return;
1588 
1589 exit_errmsg:
1590     fprintf(stderr, "%s: %s\n", image_name, errmsg);
1591     exit(-1);
1592 #endif
1593 }
1594 
1595 
1596 /* Load an ELF image into the address space.
1597 
1598    IMAGE_NAME is the filename of the image, to use in error messages.
1599    IMAGE_FD is the open file descriptor for the image.
1600 
1601    BPRM_BUF is a copy of the beginning of the file; this of course
1602    contains the elf file header at offset 0.  It is assumed that this
1603    buffer is sufficiently aligned to present no problems to the host
1604    in accessing data at aligned offsets within the buffer.
1605 
1606    On return: INFO values will be filled in, as necessary or available.  */
1607 
1608 static void load_elf_image(const char *image_name, int image_fd,
1609                            struct image_info *info, char **pinterp_name,
1610                            char bprm_buf[BPRM_BUF_SIZE])
1611 {
1612     struct elfhdr *ehdr = (struct elfhdr *)bprm_buf;
1613     struct elf_phdr *phdr;
1614     abi_ulong load_addr, load_bias, loaddr, hiaddr, error;
1615     int i, retval;
1616     const char *errmsg;
1617 
1618     /* First of all, some simple consistency checks */
1619     errmsg = "Invalid ELF image for this architecture";
1620     if (!elf_check_ident(ehdr)) {
1621         goto exit_errmsg;
1622     }
1623     bswap_ehdr(ehdr);
1624     if (!elf_check_ehdr(ehdr)) {
1625         goto exit_errmsg;
1626     }
1627 
1628     i = ehdr->e_phnum * sizeof(struct elf_phdr);
1629     if (ehdr->e_phoff + i <= BPRM_BUF_SIZE) {
1630         phdr = (struct elf_phdr *)(bprm_buf + ehdr->e_phoff);
1631     } else {
1632         phdr = (struct elf_phdr *) alloca(i);
1633         retval = pread(image_fd, phdr, i, ehdr->e_phoff);
1634         if (retval != i) {
1635             goto exit_read;
1636         }
1637     }
1638     bswap_phdr(phdr, ehdr->e_phnum);
1639 
1640 #ifdef CONFIG_USE_FDPIC
1641     info->nsegs = 0;
1642     info->pt_dynamic_addr = 0;
1643 #endif
1644 
1645     /* Find the maximum size of the image and allocate an appropriate
1646        amount of memory to handle that.  */
1647     loaddr = -1, hiaddr = 0;
1648     for (i = 0; i < ehdr->e_phnum; ++i) {
1649         if (phdr[i].p_type == PT_LOAD) {
1650             abi_ulong a = phdr[i].p_vaddr;
1651             if (a < loaddr) {
1652                 loaddr = a;
1653             }
1654             a += phdr[i].p_memsz;
1655             if (a > hiaddr) {
1656                 hiaddr = a;
1657             }
1658 #ifdef CONFIG_USE_FDPIC
1659             ++info->nsegs;
1660 #endif
1661         }
1662     }
1663 
1664     load_addr = loaddr;
1665     if (ehdr->e_type == ET_DYN) {
1666         /* The image indicates that it can be loaded anywhere.  Find a
1667            location that can hold the memory space required.  If the
1668            image is pre-linked, LOADDR will be non-zero.  Since we do
1669            not supply MAP_FIXED here we'll use that address if and
1670            only if it remains available.  */
1671         load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE,
1672                                 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
1673                                 -1, 0);
1674         if (load_addr == -1) {
1675             goto exit_perror;
1676         }
1677     } else if (pinterp_name != NULL) {
1678         /* This is the main executable.  Make sure that the low
1679            address does not conflict with MMAP_MIN_ADDR or the
1680            QEMU application itself.  */
1681         probe_guest_base(image_name, loaddr, hiaddr);
1682     }
1683     load_bias = load_addr - loaddr;
1684 
1685 #ifdef CONFIG_USE_FDPIC
1686     {
1687         struct elf32_fdpic_loadseg *loadsegs = info->loadsegs =
1688             g_malloc(sizeof(*loadsegs) * info->nsegs);
1689 
1690         for (i = 0; i < ehdr->e_phnum; ++i) {
1691             switch (phdr[i].p_type) {
1692             case PT_DYNAMIC:
1693                 info->pt_dynamic_addr = phdr[i].p_vaddr + load_bias;
1694                 break;
1695             case PT_LOAD:
1696                 loadsegs->addr = phdr[i].p_vaddr + load_bias;
1697                 loadsegs->p_vaddr = phdr[i].p_vaddr;
1698                 loadsegs->p_memsz = phdr[i].p_memsz;
1699                 ++loadsegs;
1700                 break;
1701             }
1702         }
1703     }
1704 #endif
1705 
1706     info->load_bias = load_bias;
1707     info->load_addr = load_addr;
1708     info->entry = ehdr->e_entry + load_bias;
1709     info->start_code = -1;
1710     info->end_code = 0;
1711     info->start_data = -1;
1712     info->end_data = 0;
1713     info->brk = 0;
1714     info->elf_flags = ehdr->e_flags;
1715 
1716     for (i = 0; i < ehdr->e_phnum; i++) {
1717         struct elf_phdr *eppnt = phdr + i;
1718         if (eppnt->p_type == PT_LOAD) {
1719             abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em;
1720             int elf_prot = 0;
1721 
1722             if (eppnt->p_flags & PF_R) elf_prot =  PROT_READ;
1723             if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1724             if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1725 
1726             vaddr = load_bias + eppnt->p_vaddr;
1727             vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr);
1728             vaddr_ps = TARGET_ELF_PAGESTART(vaddr);
1729 
1730             error = target_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po,
1731                                 elf_prot, MAP_PRIVATE | MAP_FIXED,
1732                                 image_fd, eppnt->p_offset - vaddr_po);
1733             if (error == -1) {
1734                 goto exit_perror;
1735             }
1736 
1737             vaddr_ef = vaddr + eppnt->p_filesz;
1738             vaddr_em = vaddr + eppnt->p_memsz;
1739 
1740             /* If the load segment requests extra zeros (e.g. bss), map it.  */
1741             if (vaddr_ef < vaddr_em) {
1742                 zero_bss(vaddr_ef, vaddr_em, elf_prot);
1743             }
1744 
1745             /* Find the full program boundaries.  */
1746             if (elf_prot & PROT_EXEC) {
1747                 if (vaddr < info->start_code) {
1748                     info->start_code = vaddr;
1749                 }
1750                 if (vaddr_ef > info->end_code) {
1751                     info->end_code = vaddr_ef;
1752                 }
1753             }
1754             if (elf_prot & PROT_WRITE) {
1755                 if (vaddr < info->start_data) {
1756                     info->start_data = vaddr;
1757                 }
1758                 if (vaddr_ef > info->end_data) {
1759                     info->end_data = vaddr_ef;
1760                 }
1761                 if (vaddr_em > info->brk) {
1762                     info->brk = vaddr_em;
1763                 }
1764             }
1765         } else if (eppnt->p_type == PT_INTERP && pinterp_name) {
1766             char *interp_name;
1767 
1768             if (*pinterp_name) {
1769                 errmsg = "Multiple PT_INTERP entries";
1770                 goto exit_errmsg;
1771             }
1772             interp_name = malloc(eppnt->p_filesz);
1773             if (!interp_name) {
1774                 goto exit_perror;
1775             }
1776 
1777             if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) {
1778                 memcpy(interp_name, bprm_buf + eppnt->p_offset,
1779                        eppnt->p_filesz);
1780             } else {
1781                 retval = pread(image_fd, interp_name, eppnt->p_filesz,
1782                                eppnt->p_offset);
1783                 if (retval != eppnt->p_filesz) {
1784                     goto exit_perror;
1785                 }
1786             }
1787             if (interp_name[eppnt->p_filesz - 1] != 0) {
1788                 errmsg = "Invalid PT_INTERP entry";
1789                 goto exit_errmsg;
1790             }
1791             *pinterp_name = interp_name;
1792         }
1793     }
1794 
1795     if (info->end_data == 0) {
1796         info->start_data = info->end_code;
1797         info->end_data = info->end_code;
1798         info->brk = info->end_code;
1799     }
1800 
1801     if (qemu_log_enabled()) {
1802         load_symbols(ehdr, image_fd, load_bias);
1803     }
1804 
1805     close(image_fd);
1806     return;
1807 
1808  exit_read:
1809     if (retval >= 0) {
1810         errmsg = "Incomplete read of file header";
1811         goto exit_errmsg;
1812     }
1813  exit_perror:
1814     errmsg = strerror(errno);
1815  exit_errmsg:
1816     fprintf(stderr, "%s: %s\n", image_name, errmsg);
1817     exit(-1);
1818 }
1819 
1820 static void load_elf_interp(const char *filename, struct image_info *info,
1821                             char bprm_buf[BPRM_BUF_SIZE])
1822 {
1823     int fd, retval;
1824 
1825     fd = open(path(filename), O_RDONLY);
1826     if (fd < 0) {
1827         goto exit_perror;
1828     }
1829 
1830     retval = read(fd, bprm_buf, BPRM_BUF_SIZE);
1831     if (retval < 0) {
1832         goto exit_perror;
1833     }
1834     if (retval < BPRM_BUF_SIZE) {
1835         memset(bprm_buf + retval, 0, BPRM_BUF_SIZE - retval);
1836     }
1837 
1838     load_elf_image(filename, fd, info, NULL, bprm_buf);
1839     return;
1840 
1841  exit_perror:
1842     fprintf(stderr, "%s: %s\n", filename, strerror(errno));
1843     exit(-1);
1844 }
1845 
1846 static int symfind(const void *s0, const void *s1)
1847 {
1848     target_ulong addr = *(target_ulong *)s0;
1849     struct elf_sym *sym = (struct elf_sym *)s1;
1850     int result = 0;
1851     if (addr < sym->st_value) {
1852         result = -1;
1853     } else if (addr >= sym->st_value + sym->st_size) {
1854         result = 1;
1855     }
1856     return result;
1857 }
1858 
1859 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
1860 {
1861 #if ELF_CLASS == ELFCLASS32
1862     struct elf_sym *syms = s->disas_symtab.elf32;
1863 #else
1864     struct elf_sym *syms = s->disas_symtab.elf64;
1865 #endif
1866 
1867     // binary search
1868     struct elf_sym *sym;
1869 
1870     sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind);
1871     if (sym != NULL) {
1872         return s->disas_strtab + sym->st_name;
1873     }
1874 
1875     return "";
1876 }
1877 
1878 /* FIXME: This should use elf_ops.h  */
1879 static int symcmp(const void *s0, const void *s1)
1880 {
1881     struct elf_sym *sym0 = (struct elf_sym *)s0;
1882     struct elf_sym *sym1 = (struct elf_sym *)s1;
1883     return (sym0->st_value < sym1->st_value)
1884         ? -1
1885         : ((sym0->st_value > sym1->st_value) ? 1 : 0);
1886 }
1887 
1888 /* Best attempt to load symbols from this ELF object. */
1889 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias)
1890 {
1891     int i, shnum, nsyms, sym_idx = 0, str_idx = 0;
1892     struct elf_shdr *shdr;
1893     char *strings = NULL;
1894     struct syminfo *s = NULL;
1895     struct elf_sym *new_syms, *syms = NULL;
1896 
1897     shnum = hdr->e_shnum;
1898     i = shnum * sizeof(struct elf_shdr);
1899     shdr = (struct elf_shdr *)alloca(i);
1900     if (pread(fd, shdr, i, hdr->e_shoff) != i) {
1901         return;
1902     }
1903 
1904     bswap_shdr(shdr, shnum);
1905     for (i = 0; i < shnum; ++i) {
1906         if (shdr[i].sh_type == SHT_SYMTAB) {
1907             sym_idx = i;
1908             str_idx = shdr[i].sh_link;
1909             goto found;
1910         }
1911     }
1912 
1913     /* There will be no symbol table if the file was stripped.  */
1914     return;
1915 
1916  found:
1917     /* Now know where the strtab and symtab are.  Snarf them.  */
1918     s = malloc(sizeof(*s));
1919     if (!s) {
1920         goto give_up;
1921     }
1922 
1923     i = shdr[str_idx].sh_size;
1924     s->disas_strtab = strings = malloc(i);
1925     if (!strings || pread(fd, strings, i, shdr[str_idx].sh_offset) != i) {
1926         goto give_up;
1927     }
1928 
1929     i = shdr[sym_idx].sh_size;
1930     syms = malloc(i);
1931     if (!syms || pread(fd, syms, i, shdr[sym_idx].sh_offset) != i) {
1932         goto give_up;
1933     }
1934 
1935     nsyms = i / sizeof(struct elf_sym);
1936     for (i = 0; i < nsyms; ) {
1937         bswap_sym(syms + i);
1938         /* Throw away entries which we do not need.  */
1939         if (syms[i].st_shndx == SHN_UNDEF
1940             || syms[i].st_shndx >= SHN_LORESERVE
1941             || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
1942             if (i < --nsyms) {
1943                 syms[i] = syms[nsyms];
1944             }
1945         } else {
1946 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1947             /* The bottom address bit marks a Thumb or MIPS16 symbol.  */
1948             syms[i].st_value &= ~(target_ulong)1;
1949 #endif
1950             syms[i].st_value += load_bias;
1951             i++;
1952         }
1953     }
1954 
1955     /* No "useful" symbol.  */
1956     if (nsyms == 0) {
1957         goto give_up;
1958     }
1959 
1960     /* Attempt to free the storage associated with the local symbols
1961        that we threw away.  Whether or not this has any effect on the
1962        memory allocation depends on the malloc implementation and how
1963        many symbols we managed to discard.  */
1964     new_syms = realloc(syms, nsyms * sizeof(*syms));
1965     if (new_syms == NULL) {
1966         goto give_up;
1967     }
1968     syms = new_syms;
1969 
1970     qsort(syms, nsyms, sizeof(*syms), symcmp);
1971 
1972     s->disas_num_syms = nsyms;
1973 #if ELF_CLASS == ELFCLASS32
1974     s->disas_symtab.elf32 = syms;
1975 #else
1976     s->disas_symtab.elf64 = syms;
1977 #endif
1978     s->lookup_symbol = lookup_symbolxx;
1979     s->next = syminfos;
1980     syminfos = s;
1981 
1982     return;
1983 
1984 give_up:
1985     free(s);
1986     free(strings);
1987     free(syms);
1988 }
1989 
1990 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1991                     struct image_info * info)
1992 {
1993     struct image_info interp_info;
1994     struct elfhdr elf_ex;
1995     char *elf_interpreter = NULL;
1996 
1997     info->start_mmap = (abi_ulong)ELF_START_MMAP;
1998     info->mmap = 0;
1999     info->rss = 0;
2000 
2001     load_elf_image(bprm->filename, bprm->fd, info,
2002                    &elf_interpreter, bprm->buf);
2003 
2004     /* ??? We need a copy of the elf header for passing to create_elf_tables.
2005        If we do nothing, we'll have overwritten this when we re-use bprm->buf
2006        when we load the interpreter.  */
2007     elf_ex = *(struct elfhdr *)bprm->buf;
2008 
2009     bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
2010     bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
2011     bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
2012     if (!bprm->p) {
2013         fprintf(stderr, "%s: %s\n", bprm->filename, strerror(E2BIG));
2014         exit(-1);
2015     }
2016 
2017     /* Do this so that we can load the interpreter, if need be.  We will
2018        change some of these later */
2019     bprm->p = setup_arg_pages(bprm->p, bprm, info);
2020 
2021     if (elf_interpreter) {
2022         load_elf_interp(elf_interpreter, &interp_info, bprm->buf);
2023 
2024         /* If the program interpreter is one of these two, then assume
2025            an iBCS2 image.  Otherwise assume a native linux image.  */
2026 
2027         if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0
2028             || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) {
2029             info->personality = PER_SVR4;
2030 
2031             /* Why this, you ask???  Well SVr4 maps page 0 as read-only,
2032                and some applications "depend" upon this behavior.  Since
2033                we do not have the power to recompile these, we emulate
2034                the SVr4 behavior.  Sigh.  */
2035             target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
2036                         MAP_FIXED | MAP_PRIVATE, -1, 0);
2037         }
2038     }
2039 
2040     bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &elf_ex,
2041                                 info, (elf_interpreter ? &interp_info : NULL));
2042     info->start_stack = bprm->p;
2043 
2044     /* If we have an interpreter, set that as the program's entry point.
2045        Copy the load_bias as well, to help PPC64 interpret the entry
2046        point as a function descriptor.  Do this after creating elf tables
2047        so that we copy the original program entry point into the AUXV.  */
2048     if (elf_interpreter) {
2049         info->load_bias = interp_info.load_bias;
2050         info->entry = interp_info.entry;
2051         free(elf_interpreter);
2052     }
2053 
2054 #ifdef USE_ELF_CORE_DUMP
2055     bprm->core_dump = &elf_core_dump;
2056 #endif
2057 
2058     return 0;
2059 }
2060 
2061 #ifdef USE_ELF_CORE_DUMP
2062 /*
2063  * Definitions to generate Intel SVR4-like core files.
2064  * These mostly have the same names as the SVR4 types with "target_elf_"
2065  * tacked on the front to prevent clashes with linux definitions,
2066  * and the typedef forms have been avoided.  This is mostly like
2067  * the SVR4 structure, but more Linuxy, with things that Linux does
2068  * not support and which gdb doesn't really use excluded.
2069  *
2070  * Fields we don't dump (their contents is zero) in linux-user qemu
2071  * are marked with XXX.
2072  *
2073  * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
2074  *
2075  * Porting ELF coredump for target is (quite) simple process.  First you
2076  * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
2077  * the target resides):
2078  *
2079  * #define USE_ELF_CORE_DUMP
2080  *
2081  * Next you define type of register set used for dumping.  ELF specification
2082  * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
2083  *
2084  * typedef <target_regtype> target_elf_greg_t;
2085  * #define ELF_NREG <number of registers>
2086  * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
2087  *
2088  * Last step is to implement target specific function that copies registers
2089  * from given cpu into just specified register set.  Prototype is:
2090  *
2091  * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
2092  *                                const CPUArchState *env);
2093  *
2094  * Parameters:
2095  *     regs - copy register values into here (allocated and zeroed by caller)
2096  *     env - copy registers from here
2097  *
2098  * Example for ARM target is provided in this file.
2099  */
2100 
2101 /* An ELF note in memory */
2102 struct memelfnote {
2103     const char *name;
2104     size_t     namesz;
2105     size_t     namesz_rounded;
2106     int        type;
2107     size_t     datasz;
2108     size_t     datasz_rounded;
2109     void       *data;
2110     size_t     notesz;
2111 };
2112 
2113 struct target_elf_siginfo {
2114     abi_int    si_signo; /* signal number */
2115     abi_int    si_code;  /* extra code */
2116     abi_int    si_errno; /* errno */
2117 };
2118 
2119 struct target_elf_prstatus {
2120     struct target_elf_siginfo pr_info;      /* Info associated with signal */
2121     abi_short          pr_cursig;    /* Current signal */
2122     abi_ulong          pr_sigpend;   /* XXX */
2123     abi_ulong          pr_sighold;   /* XXX */
2124     target_pid_t       pr_pid;
2125     target_pid_t       pr_ppid;
2126     target_pid_t       pr_pgrp;
2127     target_pid_t       pr_sid;
2128     struct target_timeval pr_utime;  /* XXX User time */
2129     struct target_timeval pr_stime;  /* XXX System time */
2130     struct target_timeval pr_cutime; /* XXX Cumulative user time */
2131     struct target_timeval pr_cstime; /* XXX Cumulative system time */
2132     target_elf_gregset_t      pr_reg;       /* GP registers */
2133     abi_int            pr_fpvalid;   /* XXX */
2134 };
2135 
2136 #define ELF_PRARGSZ     (80) /* Number of chars for args */
2137 
2138 struct target_elf_prpsinfo {
2139     char         pr_state;       /* numeric process state */
2140     char         pr_sname;       /* char for pr_state */
2141     char         pr_zomb;        /* zombie */
2142     char         pr_nice;        /* nice val */
2143     abi_ulong    pr_flag;        /* flags */
2144     target_uid_t pr_uid;
2145     target_gid_t pr_gid;
2146     target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
2147     /* Lots missing */
2148     char    pr_fname[16];           /* filename of executable */
2149     char    pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
2150 };
2151 
2152 /* Here is the structure in which status of each thread is captured. */
2153 struct elf_thread_status {
2154     QTAILQ_ENTRY(elf_thread_status)  ets_link;
2155     struct target_elf_prstatus prstatus;   /* NT_PRSTATUS */
2156 #if 0
2157     elf_fpregset_t fpu;             /* NT_PRFPREG */
2158     struct task_struct *thread;
2159     elf_fpxregset_t xfpu;           /* ELF_CORE_XFPREG_TYPE */
2160 #endif
2161     struct memelfnote notes[1];
2162     int num_notes;
2163 };
2164 
2165 struct elf_note_info {
2166     struct memelfnote   *notes;
2167     struct target_elf_prstatus *prstatus;  /* NT_PRSTATUS */
2168     struct target_elf_prpsinfo *psinfo;    /* NT_PRPSINFO */
2169 
2170     QTAILQ_HEAD(thread_list_head, elf_thread_status) thread_list;
2171 #if 0
2172     /*
2173      * Current version of ELF coredump doesn't support
2174      * dumping fp regs etc.
2175      */
2176     elf_fpregset_t *fpu;
2177     elf_fpxregset_t *xfpu;
2178     int thread_status_size;
2179 #endif
2180     int notes_size;
2181     int numnote;
2182 };
2183 
2184 struct vm_area_struct {
2185     abi_ulong   vma_start;  /* start vaddr of memory region */
2186     abi_ulong   vma_end;    /* end vaddr of memory region */
2187     abi_ulong   vma_flags;  /* protection etc. flags for the region */
2188     QTAILQ_ENTRY(vm_area_struct) vma_link;
2189 };
2190 
2191 struct mm_struct {
2192     QTAILQ_HEAD(, vm_area_struct) mm_mmap;
2193     int mm_count;           /* number of mappings */
2194 };
2195 
2196 static struct mm_struct *vma_init(void);
2197 static void vma_delete(struct mm_struct *);
2198 static int vma_add_mapping(struct mm_struct *, abi_ulong,
2199                            abi_ulong, abi_ulong);
2200 static int vma_get_mapping_count(const struct mm_struct *);
2201 static struct vm_area_struct *vma_first(const struct mm_struct *);
2202 static struct vm_area_struct *vma_next(struct vm_area_struct *);
2203 static abi_ulong vma_dump_size(const struct vm_area_struct *);
2204 static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
2205                       unsigned long flags);
2206 
2207 static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
2208 static void fill_note(struct memelfnote *, const char *, int,
2209                       unsigned int, void *);
2210 static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
2211 static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
2212 static void fill_auxv_note(struct memelfnote *, const TaskState *);
2213 static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
2214 static size_t note_size(const struct memelfnote *);
2215 static void free_note_info(struct elf_note_info *);
2216 static int fill_note_info(struct elf_note_info *, long, const CPUArchState *);
2217 static void fill_thread_info(struct elf_note_info *, const CPUArchState *);
2218 static int core_dump_filename(const TaskState *, char *, size_t);
2219 
2220 static int dump_write(int, const void *, size_t);
2221 static int write_note(struct memelfnote *, int);
2222 static int write_note_info(struct elf_note_info *, int);
2223 
2224 #ifdef BSWAP_NEEDED
2225 static void bswap_prstatus(struct target_elf_prstatus *prstatus)
2226 {
2227     prstatus->pr_info.si_signo = tswap32(prstatus->pr_info.si_signo);
2228     prstatus->pr_info.si_code = tswap32(prstatus->pr_info.si_code);
2229     prstatus->pr_info.si_errno = tswap32(prstatus->pr_info.si_errno);
2230     prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
2231     prstatus->pr_sigpend = tswapal(prstatus->pr_sigpend);
2232     prstatus->pr_sighold = tswapal(prstatus->pr_sighold);
2233     prstatus->pr_pid = tswap32(prstatus->pr_pid);
2234     prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
2235     prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
2236     prstatus->pr_sid = tswap32(prstatus->pr_sid);
2237     /* cpu times are not filled, so we skip them */
2238     /* regs should be in correct format already */
2239     prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
2240 }
2241 
2242 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
2243 {
2244     psinfo->pr_flag = tswapal(psinfo->pr_flag);
2245     psinfo->pr_uid = tswap16(psinfo->pr_uid);
2246     psinfo->pr_gid = tswap16(psinfo->pr_gid);
2247     psinfo->pr_pid = tswap32(psinfo->pr_pid);
2248     psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
2249     psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
2250     psinfo->pr_sid = tswap32(psinfo->pr_sid);
2251 }
2252 
2253 static void bswap_note(struct elf_note *en)
2254 {
2255     bswap32s(&en->n_namesz);
2256     bswap32s(&en->n_descsz);
2257     bswap32s(&en->n_type);
2258 }
2259 #else
2260 static inline void bswap_prstatus(struct target_elf_prstatus *p) { }
2261 static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {}
2262 static inline void bswap_note(struct elf_note *en) { }
2263 #endif /* BSWAP_NEEDED */
2264 
2265 /*
2266  * Minimal support for linux memory regions.  These are needed
2267  * when we are finding out what memory exactly belongs to
2268  * emulated process.  No locks needed here, as long as
2269  * thread that received the signal is stopped.
2270  */
2271 
2272 static struct mm_struct *vma_init(void)
2273 {
2274     struct mm_struct *mm;
2275 
2276     if ((mm = g_malloc(sizeof (*mm))) == NULL)
2277         return (NULL);
2278 
2279     mm->mm_count = 0;
2280     QTAILQ_INIT(&mm->mm_mmap);
2281 
2282     return (mm);
2283 }
2284 
2285 static void vma_delete(struct mm_struct *mm)
2286 {
2287     struct vm_area_struct *vma;
2288 
2289     while ((vma = vma_first(mm)) != NULL) {
2290         QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
2291         g_free(vma);
2292     }
2293     g_free(mm);
2294 }
2295 
2296 static int vma_add_mapping(struct mm_struct *mm, abi_ulong start,
2297                            abi_ulong end, abi_ulong flags)
2298 {
2299     struct vm_area_struct *vma;
2300 
2301     if ((vma = g_malloc0(sizeof (*vma))) == NULL)
2302         return (-1);
2303 
2304     vma->vma_start = start;
2305     vma->vma_end = end;
2306     vma->vma_flags = flags;
2307 
2308     QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
2309     mm->mm_count++;
2310 
2311     return (0);
2312 }
2313 
2314 static struct vm_area_struct *vma_first(const struct mm_struct *mm)
2315 {
2316     return (QTAILQ_FIRST(&mm->mm_mmap));
2317 }
2318 
2319 static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
2320 {
2321     return (QTAILQ_NEXT(vma, vma_link));
2322 }
2323 
2324 static int vma_get_mapping_count(const struct mm_struct *mm)
2325 {
2326     return (mm->mm_count);
2327 }
2328 
2329 /*
2330  * Calculate file (dump) size of given memory region.
2331  */
2332 static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
2333 {
2334     /* if we cannot even read the first page, skip it */
2335     if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
2336         return (0);
2337 
2338     /*
2339      * Usually we don't dump executable pages as they contain
2340      * non-writable code that debugger can read directly from
2341      * target library etc.  However, thread stacks are marked
2342      * also executable so we read in first page of given region
2343      * and check whether it contains elf header.  If there is
2344      * no elf header, we dump it.
2345      */
2346     if (vma->vma_flags & PROT_EXEC) {
2347         char page[TARGET_PAGE_SIZE];
2348 
2349         copy_from_user(page, vma->vma_start, sizeof (page));
2350         if ((page[EI_MAG0] == ELFMAG0) &&
2351             (page[EI_MAG1] == ELFMAG1) &&
2352             (page[EI_MAG2] == ELFMAG2) &&
2353             (page[EI_MAG3] == ELFMAG3)) {
2354             /*
2355              * Mappings are possibly from ELF binary.  Don't dump
2356              * them.
2357              */
2358             return (0);
2359         }
2360     }
2361 
2362     return (vma->vma_end - vma->vma_start);
2363 }
2364 
2365 static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
2366                       unsigned long flags)
2367 {
2368     struct mm_struct *mm = (struct mm_struct *)priv;
2369 
2370     vma_add_mapping(mm, start, end, flags);
2371     return (0);
2372 }
2373 
2374 static void fill_note(struct memelfnote *note, const char *name, int type,
2375                       unsigned int sz, void *data)
2376 {
2377     unsigned int namesz;
2378 
2379     namesz = strlen(name) + 1;
2380     note->name = name;
2381     note->namesz = namesz;
2382     note->namesz_rounded = roundup(namesz, sizeof (int32_t));
2383     note->type = type;
2384     note->datasz = sz;
2385     note->datasz_rounded = roundup(sz, sizeof (int32_t));
2386 
2387     note->data = data;
2388 
2389     /*
2390      * We calculate rounded up note size here as specified by
2391      * ELF document.
2392      */
2393     note->notesz = sizeof (struct elf_note) +
2394         note->namesz_rounded + note->datasz_rounded;
2395 }
2396 
2397 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
2398                             uint32_t flags)
2399 {
2400     (void) memset(elf, 0, sizeof(*elf));
2401 
2402     (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
2403     elf->e_ident[EI_CLASS] = ELF_CLASS;
2404     elf->e_ident[EI_DATA] = ELF_DATA;
2405     elf->e_ident[EI_VERSION] = EV_CURRENT;
2406     elf->e_ident[EI_OSABI] = ELF_OSABI;
2407 
2408     elf->e_type = ET_CORE;
2409     elf->e_machine = machine;
2410     elf->e_version = EV_CURRENT;
2411     elf->e_phoff = sizeof(struct elfhdr);
2412     elf->e_flags = flags;
2413     elf->e_ehsize = sizeof(struct elfhdr);
2414     elf->e_phentsize = sizeof(struct elf_phdr);
2415     elf->e_phnum = segs;
2416 
2417     bswap_ehdr(elf);
2418 }
2419 
2420 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
2421 {
2422     phdr->p_type = PT_NOTE;
2423     phdr->p_offset = offset;
2424     phdr->p_vaddr = 0;
2425     phdr->p_paddr = 0;
2426     phdr->p_filesz = sz;
2427     phdr->p_memsz = 0;
2428     phdr->p_flags = 0;
2429     phdr->p_align = 0;
2430 
2431     bswap_phdr(phdr, 1);
2432 }
2433 
2434 static size_t note_size(const struct memelfnote *note)
2435 {
2436     return (note->notesz);
2437 }
2438 
2439 static void fill_prstatus(struct target_elf_prstatus *prstatus,
2440                           const TaskState *ts, int signr)
2441 {
2442     (void) memset(prstatus, 0, sizeof (*prstatus));
2443     prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
2444     prstatus->pr_pid = ts->ts_tid;
2445     prstatus->pr_ppid = getppid();
2446     prstatus->pr_pgrp = getpgrp();
2447     prstatus->pr_sid = getsid(0);
2448 
2449     bswap_prstatus(prstatus);
2450 }
2451 
2452 static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
2453 {
2454     char *base_filename;
2455     unsigned int i, len;
2456 
2457     (void) memset(psinfo, 0, sizeof (*psinfo));
2458 
2459     len = ts->info->arg_end - ts->info->arg_start;
2460     if (len >= ELF_PRARGSZ)
2461         len = ELF_PRARGSZ - 1;
2462     if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_start, len))
2463         return -EFAULT;
2464     for (i = 0; i < len; i++)
2465         if (psinfo->pr_psargs[i] == 0)
2466             psinfo->pr_psargs[i] = ' ';
2467     psinfo->pr_psargs[len] = 0;
2468 
2469     psinfo->pr_pid = getpid();
2470     psinfo->pr_ppid = getppid();
2471     psinfo->pr_pgrp = getpgrp();
2472     psinfo->pr_sid = getsid(0);
2473     psinfo->pr_uid = getuid();
2474     psinfo->pr_gid = getgid();
2475 
2476     base_filename = g_path_get_basename(ts->bprm->filename);
2477     /*
2478      * Using strncpy here is fine: at max-length,
2479      * this field is not NUL-terminated.
2480      */
2481     (void) strncpy(psinfo->pr_fname, base_filename,
2482                    sizeof(psinfo->pr_fname));
2483 
2484     g_free(base_filename);
2485     bswap_psinfo(psinfo);
2486     return (0);
2487 }
2488 
2489 static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
2490 {
2491     elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
2492     elf_addr_t orig_auxv = auxv;
2493     void *ptr;
2494     int len = ts->info->auxv_len;
2495 
2496     /*
2497      * Auxiliary vector is stored in target process stack.  It contains
2498      * {type, value} pairs that we need to dump into note.  This is not
2499      * strictly necessary but we do it here for sake of completeness.
2500      */
2501 
2502     /* read in whole auxv vector and copy it to memelfnote */
2503     ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
2504     if (ptr != NULL) {
2505         fill_note(note, "CORE", NT_AUXV, len, ptr);
2506         unlock_user(ptr, auxv, len);
2507     }
2508 }
2509 
2510 /*
2511  * Constructs name of coredump file.  We have following convention
2512  * for the name:
2513  *     qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
2514  *
2515  * Returns 0 in case of success, -1 otherwise (errno is set).
2516  */
2517 static int core_dump_filename(const TaskState *ts, char *buf,
2518                               size_t bufsize)
2519 {
2520     char timestamp[64];
2521     char *filename = NULL;
2522     char *base_filename = NULL;
2523     struct timeval tv;
2524     struct tm tm;
2525 
2526     assert(bufsize >= PATH_MAX);
2527 
2528     if (gettimeofday(&tv, NULL) < 0) {
2529         (void) fprintf(stderr, "unable to get current timestamp: %s",
2530                        strerror(errno));
2531         return (-1);
2532     }
2533 
2534     filename = strdup(ts->bprm->filename);
2535     base_filename = strdup(basename(filename));
2536     (void) strftime(timestamp, sizeof (timestamp), "%Y%m%d-%H%M%S",
2537                     localtime_r(&tv.tv_sec, &tm));
2538     (void) snprintf(buf, bufsize, "qemu_%s_%s_%d.core",
2539                     base_filename, timestamp, (int)getpid());
2540     free(base_filename);
2541     free(filename);
2542 
2543     return (0);
2544 }
2545 
2546 static int dump_write(int fd, const void *ptr, size_t size)
2547 {
2548     const char *bufp = (const char *)ptr;
2549     ssize_t bytes_written, bytes_left;
2550     struct rlimit dumpsize;
2551     off_t pos;
2552 
2553     bytes_written = 0;
2554     getrlimit(RLIMIT_CORE, &dumpsize);
2555     if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
2556         if (errno == ESPIPE) { /* not a seekable stream */
2557             bytes_left = size;
2558         } else {
2559             return pos;
2560         }
2561     } else {
2562         if (dumpsize.rlim_cur <= pos) {
2563             return -1;
2564         } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
2565             bytes_left = size;
2566         } else {
2567             size_t limit_left=dumpsize.rlim_cur - pos;
2568             bytes_left = limit_left >= size ? size : limit_left ;
2569         }
2570     }
2571 
2572     /*
2573      * In normal conditions, single write(2) should do but
2574      * in case of socket etc. this mechanism is more portable.
2575      */
2576     do {
2577         bytes_written = write(fd, bufp, bytes_left);
2578         if (bytes_written < 0) {
2579             if (errno == EINTR)
2580                 continue;
2581             return (-1);
2582         } else if (bytes_written == 0) { /* eof */
2583             return (-1);
2584         }
2585         bufp += bytes_written;
2586         bytes_left -= bytes_written;
2587     } while (bytes_left > 0);
2588 
2589     return (0);
2590 }
2591 
2592 static int write_note(struct memelfnote *men, int fd)
2593 {
2594     struct elf_note en;
2595 
2596     en.n_namesz = men->namesz;
2597     en.n_type = men->type;
2598     en.n_descsz = men->datasz;
2599 
2600     bswap_note(&en);
2601 
2602     if (dump_write(fd, &en, sizeof(en)) != 0)
2603         return (-1);
2604     if (dump_write(fd, men->name, men->namesz_rounded) != 0)
2605         return (-1);
2606     if (dump_write(fd, men->data, men->datasz_rounded) != 0)
2607         return (-1);
2608 
2609     return (0);
2610 }
2611 
2612 static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env)
2613 {
2614     TaskState *ts = (TaskState *)env->opaque;
2615     struct elf_thread_status *ets;
2616 
2617     ets = g_malloc0(sizeof (*ets));
2618     ets->num_notes = 1; /* only prstatus is dumped */
2619     fill_prstatus(&ets->prstatus, ts, 0);
2620     elf_core_copy_regs(&ets->prstatus.pr_reg, env);
2621     fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
2622               &ets->prstatus);
2623 
2624     QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
2625 
2626     info->notes_size += note_size(&ets->notes[0]);
2627 }
2628 
2629 static int fill_note_info(struct elf_note_info *info,
2630                           long signr, const CPUArchState *env)
2631 {
2632 #define NUMNOTES 3
2633     CPUState *cpu = NULL;
2634     TaskState *ts = (TaskState *)env->opaque;
2635     int i;
2636 
2637     (void) memset(info, 0, sizeof (*info));
2638 
2639     QTAILQ_INIT(&info->thread_list);
2640 
2641     info->notes = g_malloc0(NUMNOTES * sizeof (struct memelfnote));
2642     if (info->notes == NULL)
2643         return (-ENOMEM);
2644     info->prstatus = g_malloc0(sizeof (*info->prstatus));
2645     if (info->prstatus == NULL)
2646         return (-ENOMEM);
2647     info->psinfo = g_malloc0(sizeof (*info->psinfo));
2648     if (info->prstatus == NULL)
2649         return (-ENOMEM);
2650 
2651     /*
2652      * First fill in status (and registers) of current thread
2653      * including process info & aux vector.
2654      */
2655     fill_prstatus(info->prstatus, ts, signr);
2656     elf_core_copy_regs(&info->prstatus->pr_reg, env);
2657     fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
2658               sizeof (*info->prstatus), info->prstatus);
2659     fill_psinfo(info->psinfo, ts);
2660     fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
2661               sizeof (*info->psinfo), info->psinfo);
2662     fill_auxv_note(&info->notes[2], ts);
2663     info->numnote = 3;
2664 
2665     info->notes_size = 0;
2666     for (i = 0; i < info->numnote; i++)
2667         info->notes_size += note_size(&info->notes[i]);
2668 
2669     /* read and fill status of all threads */
2670     cpu_list_lock();
2671     for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2672         if (cpu == thread_cpu) {
2673             continue;
2674         }
2675         fill_thread_info(info, (CPUArchState *)cpu->env_ptr);
2676     }
2677     cpu_list_unlock();
2678 
2679     return (0);
2680 }
2681 
2682 static void free_note_info(struct elf_note_info *info)
2683 {
2684     struct elf_thread_status *ets;
2685 
2686     while (!QTAILQ_EMPTY(&info->thread_list)) {
2687         ets = QTAILQ_FIRST(&info->thread_list);
2688         QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
2689         g_free(ets);
2690     }
2691 
2692     g_free(info->prstatus);
2693     g_free(info->psinfo);
2694     g_free(info->notes);
2695 }
2696 
2697 static int write_note_info(struct elf_note_info *info, int fd)
2698 {
2699     struct elf_thread_status *ets;
2700     int i, error = 0;
2701 
2702     /* write prstatus, psinfo and auxv for current thread */
2703     for (i = 0; i < info->numnote; i++)
2704         if ((error = write_note(&info->notes[i], fd)) != 0)
2705             return (error);
2706 
2707     /* write prstatus for each thread */
2708     for (ets = info->thread_list.tqh_first; ets != NULL;
2709          ets = ets->ets_link.tqe_next) {
2710         if ((error = write_note(&ets->notes[0], fd)) != 0)
2711             return (error);
2712     }
2713 
2714     return (0);
2715 }
2716 
2717 /*
2718  * Write out ELF coredump.
2719  *
2720  * See documentation of ELF object file format in:
2721  * http://www.caldera.com/developers/devspecs/gabi41.pdf
2722  *
2723  * Coredump format in linux is following:
2724  *
2725  * 0   +----------------------+         \
2726  *     | ELF header           | ET_CORE  |
2727  *     +----------------------+          |
2728  *     | ELF program headers  |          |--- headers
2729  *     | - NOTE section       |          |
2730  *     | - PT_LOAD sections   |          |
2731  *     +----------------------+         /
2732  *     | NOTEs:               |
2733  *     | - NT_PRSTATUS        |
2734  *     | - NT_PRSINFO         |
2735  *     | - NT_AUXV            |
2736  *     +----------------------+ <-- aligned to target page
2737  *     | Process memory dump  |
2738  *     :                      :
2739  *     .                      .
2740  *     :                      :
2741  *     |                      |
2742  *     +----------------------+
2743  *
2744  * NT_PRSTATUS -> struct elf_prstatus (per thread)
2745  * NT_PRSINFO  -> struct elf_prpsinfo
2746  * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
2747  *
2748  * Format follows System V format as close as possible.  Current
2749  * version limitations are as follows:
2750  *     - no floating point registers are dumped
2751  *
2752  * Function returns 0 in case of success, negative errno otherwise.
2753  *
2754  * TODO: make this work also during runtime: it should be
2755  * possible to force coredump from running process and then
2756  * continue processing.  For example qemu could set up SIGUSR2
2757  * handler (provided that target process haven't registered
2758  * handler for that) that does the dump when signal is received.
2759  */
2760 static int elf_core_dump(int signr, const CPUArchState *env)
2761 {
2762     const TaskState *ts = (const TaskState *)env->opaque;
2763     struct vm_area_struct *vma = NULL;
2764     char corefile[PATH_MAX];
2765     struct elf_note_info info;
2766     struct elfhdr elf;
2767     struct elf_phdr phdr;
2768     struct rlimit dumpsize;
2769     struct mm_struct *mm = NULL;
2770     off_t offset = 0, data_offset = 0;
2771     int segs = 0;
2772     int fd = -1;
2773 
2774     errno = 0;
2775     getrlimit(RLIMIT_CORE, &dumpsize);
2776     if (dumpsize.rlim_cur == 0)
2777         return 0;
2778 
2779     if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0)
2780         return (-errno);
2781 
2782     if ((fd = open(corefile, O_WRONLY | O_CREAT,
2783                    S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
2784         return (-errno);
2785 
2786     /*
2787      * Walk through target process memory mappings and
2788      * set up structure containing this information.  After
2789      * this point vma_xxx functions can be used.
2790      */
2791     if ((mm = vma_init()) == NULL)
2792         goto out;
2793 
2794     walk_memory_regions(mm, vma_walker);
2795     segs = vma_get_mapping_count(mm);
2796 
2797     /*
2798      * Construct valid coredump ELF header.  We also
2799      * add one more segment for notes.
2800      */
2801     fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
2802     if (dump_write(fd, &elf, sizeof (elf)) != 0)
2803         goto out;
2804 
2805     /* fill in in-memory version of notes */
2806     if (fill_note_info(&info, signr, env) < 0)
2807         goto out;
2808 
2809     offset += sizeof (elf);                             /* elf header */
2810     offset += (segs + 1) * sizeof (struct elf_phdr);    /* program headers */
2811 
2812     /* write out notes program header */
2813     fill_elf_note_phdr(&phdr, info.notes_size, offset);
2814 
2815     offset += info.notes_size;
2816     if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
2817         goto out;
2818 
2819     /*
2820      * ELF specification wants data to start at page boundary so
2821      * we align it here.
2822      */
2823     data_offset = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2824 
2825     /*
2826      * Write program headers for memory regions mapped in
2827      * the target process.
2828      */
2829     for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2830         (void) memset(&phdr, 0, sizeof (phdr));
2831 
2832         phdr.p_type = PT_LOAD;
2833         phdr.p_offset = offset;
2834         phdr.p_vaddr = vma->vma_start;
2835         phdr.p_paddr = 0;
2836         phdr.p_filesz = vma_dump_size(vma);
2837         offset += phdr.p_filesz;
2838         phdr.p_memsz = vma->vma_end - vma->vma_start;
2839         phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
2840         if (vma->vma_flags & PROT_WRITE)
2841             phdr.p_flags |= PF_W;
2842         if (vma->vma_flags & PROT_EXEC)
2843             phdr.p_flags |= PF_X;
2844         phdr.p_align = ELF_EXEC_PAGESIZE;
2845 
2846         bswap_phdr(&phdr, 1);
2847         dump_write(fd, &phdr, sizeof (phdr));
2848     }
2849 
2850     /*
2851      * Next we write notes just after program headers.  No
2852      * alignment needed here.
2853      */
2854     if (write_note_info(&info, fd) < 0)
2855         goto out;
2856 
2857     /* align data to page boundary */
2858     if (lseek(fd, data_offset, SEEK_SET) != data_offset)
2859         goto out;
2860 
2861     /*
2862      * Finally we can dump process memory into corefile as well.
2863      */
2864     for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2865         abi_ulong addr;
2866         abi_ulong end;
2867 
2868         end = vma->vma_start + vma_dump_size(vma);
2869 
2870         for (addr = vma->vma_start; addr < end;
2871              addr += TARGET_PAGE_SIZE) {
2872             char page[TARGET_PAGE_SIZE];
2873             int error;
2874 
2875             /*
2876              *  Read in page from target process memory and
2877              *  write it to coredump file.
2878              */
2879             error = copy_from_user(page, addr, sizeof (page));
2880             if (error != 0) {
2881                 (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
2882                                addr);
2883                 errno = -error;
2884                 goto out;
2885             }
2886             if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
2887                 goto out;
2888         }
2889     }
2890 
2891  out:
2892     free_note_info(&info);
2893     if (mm != NULL)
2894         vma_delete(mm);
2895     (void) close(fd);
2896 
2897     if (errno != 0)
2898         return (-errno);
2899     return (0);
2900 }
2901 #endif /* USE_ELF_CORE_DUMP */
2902 
2903 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
2904 {
2905     init_thread(regs, infop);
2906 }
2907