xref: /openbmc/qemu/linux-user/elfload.c (revision 03feae73056ba3223151c31871860e30630645ac)
1 /* This is the Linux kernel elf-loading code, ported into user space */
2 #include <sys/time.h>
3 #include <sys/param.h>
4 
5 #include <stdio.h>
6 #include <sys/types.h>
7 #include <fcntl.h>
8 #include <errno.h>
9 #include <unistd.h>
10 #include <sys/mman.h>
11 #include <sys/resource.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <time.h>
15 
16 #include "qemu.h"
17 #include "disas.h"
18 
19 #ifdef _ARCH_PPC64
20 #undef ARCH_DLINFO
21 #undef ELF_PLATFORM
22 #undef ELF_HWCAP
23 #undef ELF_CLASS
24 #undef ELF_DATA
25 #undef ELF_ARCH
26 #endif
27 
28 #define ELF_OSABI   ELFOSABI_SYSV
29 
30 /* from personality.h */
31 
32 /*
33  * Flags for bug emulation.
34  *
35  * These occupy the top three bytes.
36  */
37 enum {
38     ADDR_NO_RANDOMIZE = 0x0040000,      /* disable randomization of VA space */
39     FDPIC_FUNCPTRS =    0x0080000,      /* userspace function ptrs point to
40                                            descriptors (signal handling) */
41     MMAP_PAGE_ZERO =    0x0100000,
42     ADDR_COMPAT_LAYOUT = 0x0200000,
43     READ_IMPLIES_EXEC = 0x0400000,
44     ADDR_LIMIT_32BIT =  0x0800000,
45     SHORT_INODE =       0x1000000,
46     WHOLE_SECONDS =     0x2000000,
47     STICKY_TIMEOUTS =   0x4000000,
48     ADDR_LIMIT_3GB =    0x8000000,
49 };
50 
51 /*
52  * Personality types.
53  *
54  * These go in the low byte.  Avoid using the top bit, it will
55  * conflict with error returns.
56  */
57 enum {
58     PER_LINUX =         0x0000,
59     PER_LINUX_32BIT =   0x0000 | ADDR_LIMIT_32BIT,
60     PER_LINUX_FDPIC =   0x0000 | FDPIC_FUNCPTRS,
61     PER_SVR4 =          0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
62     PER_SVR3 =          0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
63     PER_SCOSVR3 =       0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE,
64     PER_OSR5 =          0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
65     PER_WYSEV386 =      0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
66     PER_ISCR4 =         0x0005 | STICKY_TIMEOUTS,
67     PER_BSD =           0x0006,
68     PER_SUNOS =         0x0006 | STICKY_TIMEOUTS,
69     PER_XENIX =         0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
70     PER_LINUX32 =       0x0008,
71     PER_LINUX32_3GB =   0x0008 | ADDR_LIMIT_3GB,
72     PER_IRIX32 =        0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
73     PER_IRIXN32 =       0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
74     PER_IRIX64 =        0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
75     PER_RISCOS =        0x000c,
76     PER_SOLARIS =       0x000d | STICKY_TIMEOUTS,
77     PER_UW7 =           0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
78     PER_OSF4 =          0x000f,                  /* OSF/1 v4 */
79     PER_HPUX =          0x0010,
80     PER_MASK =          0x00ff,
81 };
82 
83 /*
84  * Return the base personality without flags.
85  */
86 #define personality(pers)       (pers & PER_MASK)
87 
88 /* this flag is uneffective under linux too, should be deleted */
89 #ifndef MAP_DENYWRITE
90 #define MAP_DENYWRITE 0
91 #endif
92 
93 /* should probably go in elf.h */
94 #ifndef ELIBBAD
95 #define ELIBBAD 80
96 #endif
97 
98 #ifdef TARGET_WORDS_BIGENDIAN
99 #define ELF_DATA        ELFDATA2MSB
100 #else
101 #define ELF_DATA        ELFDATA2LSB
102 #endif
103 
104 typedef target_ulong    target_elf_greg_t;
105 #ifdef USE_UID16
106 typedef target_ushort   target_uid_t;
107 typedef target_ushort   target_gid_t;
108 #else
109 typedef target_uint     target_uid_t;
110 typedef target_uint     target_gid_t;
111 #endif
112 typedef target_int      target_pid_t;
113 
114 #ifdef TARGET_I386
115 
116 #define ELF_PLATFORM get_elf_platform()
117 
118 static const char *get_elf_platform(void)
119 {
120     static char elf_platform[] = "i386";
121     int family = (thread_env->cpuid_version >> 8) & 0xff;
122     if (family > 6)
123         family = 6;
124     if (family >= 3)
125         elf_platform[1] = '0' + family;
126     return elf_platform;
127 }
128 
129 #define ELF_HWCAP get_elf_hwcap()
130 
131 static uint32_t get_elf_hwcap(void)
132 {
133     return thread_env->cpuid_features;
134 }
135 
136 #ifdef TARGET_X86_64
137 #define ELF_START_MMAP 0x2aaaaab000ULL
138 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
139 
140 #define ELF_CLASS      ELFCLASS64
141 #define ELF_ARCH       EM_X86_64
142 
143 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
144 {
145     regs->rax = 0;
146     regs->rsp = infop->start_stack;
147     regs->rip = infop->entry;
148 }
149 
150 #define ELF_NREG    27
151 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
152 
153 /*
154  * Note that ELF_NREG should be 29 as there should be place for
155  * TRAPNO and ERR "registers" as well but linux doesn't dump
156  * those.
157  *
158  * See linux kernel: arch/x86/include/asm/elf.h
159  */
160 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
161 {
162     (*regs)[0] = env->regs[15];
163     (*regs)[1] = env->regs[14];
164     (*regs)[2] = env->regs[13];
165     (*regs)[3] = env->regs[12];
166     (*regs)[4] = env->regs[R_EBP];
167     (*regs)[5] = env->regs[R_EBX];
168     (*regs)[6] = env->regs[11];
169     (*regs)[7] = env->regs[10];
170     (*regs)[8] = env->regs[9];
171     (*regs)[9] = env->regs[8];
172     (*regs)[10] = env->regs[R_EAX];
173     (*regs)[11] = env->regs[R_ECX];
174     (*regs)[12] = env->regs[R_EDX];
175     (*regs)[13] = env->regs[R_ESI];
176     (*regs)[14] = env->regs[R_EDI];
177     (*regs)[15] = env->regs[R_EAX]; /* XXX */
178     (*regs)[16] = env->eip;
179     (*regs)[17] = env->segs[R_CS].selector & 0xffff;
180     (*regs)[18] = env->eflags;
181     (*regs)[19] = env->regs[R_ESP];
182     (*regs)[20] = env->segs[R_SS].selector & 0xffff;
183     (*regs)[21] = env->segs[R_FS].selector & 0xffff;
184     (*regs)[22] = env->segs[R_GS].selector & 0xffff;
185     (*regs)[23] = env->segs[R_DS].selector & 0xffff;
186     (*regs)[24] = env->segs[R_ES].selector & 0xffff;
187     (*regs)[25] = env->segs[R_FS].selector & 0xffff;
188     (*regs)[26] = env->segs[R_GS].selector & 0xffff;
189 }
190 
191 #else
192 
193 #define ELF_START_MMAP 0x80000000
194 
195 /*
196  * This is used to ensure we don't load something for the wrong architecture.
197  */
198 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
199 
200 /*
201  * These are used to set parameters in the core dumps.
202  */
203 #define ELF_CLASS       ELFCLASS32
204 #define ELF_ARCH        EM_386
205 
206 static inline void init_thread(struct target_pt_regs *regs,
207                                struct image_info *infop)
208 {
209     regs->esp = infop->start_stack;
210     regs->eip = infop->entry;
211 
212     /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
213        starts %edx contains a pointer to a function which might be
214        registered using `atexit'.  This provides a mean for the
215        dynamic linker to call DT_FINI functions for shared libraries
216        that have been loaded before the code runs.
217 
218        A value of 0 tells we have no such handler.  */
219     regs->edx = 0;
220 }
221 
222 #define ELF_NREG    17
223 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
224 
225 /*
226  * Note that ELF_NREG should be 19 as there should be place for
227  * TRAPNO and ERR "registers" as well but linux doesn't dump
228  * those.
229  *
230  * See linux kernel: arch/x86/include/asm/elf.h
231  */
232 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
233 {
234     (*regs)[0] = env->regs[R_EBX];
235     (*regs)[1] = env->regs[R_ECX];
236     (*regs)[2] = env->regs[R_EDX];
237     (*regs)[3] = env->regs[R_ESI];
238     (*regs)[4] = env->regs[R_EDI];
239     (*regs)[5] = env->regs[R_EBP];
240     (*regs)[6] = env->regs[R_EAX];
241     (*regs)[7] = env->segs[R_DS].selector & 0xffff;
242     (*regs)[8] = env->segs[R_ES].selector & 0xffff;
243     (*regs)[9] = env->segs[R_FS].selector & 0xffff;
244     (*regs)[10] = env->segs[R_GS].selector & 0xffff;
245     (*regs)[11] = env->regs[R_EAX]; /* XXX */
246     (*regs)[12] = env->eip;
247     (*regs)[13] = env->segs[R_CS].selector & 0xffff;
248     (*regs)[14] = env->eflags;
249     (*regs)[15] = env->regs[R_ESP];
250     (*regs)[16] = env->segs[R_SS].selector & 0xffff;
251 }
252 #endif
253 
254 #define USE_ELF_CORE_DUMP
255 #define ELF_EXEC_PAGESIZE       4096
256 
257 #endif
258 
259 #ifdef TARGET_ARM
260 
261 #define ELF_START_MMAP 0x80000000
262 
263 #define elf_check_arch(x) ( (x) == EM_ARM )
264 
265 #define ELF_CLASS       ELFCLASS32
266 #define ELF_ARCH        EM_ARM
267 
268 static inline void init_thread(struct target_pt_regs *regs,
269                                struct image_info *infop)
270 {
271     abi_long stack = infop->start_stack;
272     memset(regs, 0, sizeof(*regs));
273     regs->ARM_cpsr = 0x10;
274     if (infop->entry & 1)
275         regs->ARM_cpsr |= CPSR_T;
276     regs->ARM_pc = infop->entry & 0xfffffffe;
277     regs->ARM_sp = infop->start_stack;
278     /* FIXME - what to for failure of get_user()? */
279     get_user_ual(regs->ARM_r2, stack + 8); /* envp */
280     get_user_ual(regs->ARM_r1, stack + 4); /* envp */
281     /* XXX: it seems that r0 is zeroed after ! */
282     regs->ARM_r0 = 0;
283     /* For uClinux PIC binaries.  */
284     /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
285     regs->ARM_r10 = infop->start_data;
286 }
287 
288 #define ELF_NREG    18
289 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
290 
291 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
292 {
293     (*regs)[0] = tswapl(env->regs[0]);
294     (*regs)[1] = tswapl(env->regs[1]);
295     (*regs)[2] = tswapl(env->regs[2]);
296     (*regs)[3] = tswapl(env->regs[3]);
297     (*regs)[4] = tswapl(env->regs[4]);
298     (*regs)[5] = tswapl(env->regs[5]);
299     (*regs)[6] = tswapl(env->regs[6]);
300     (*regs)[7] = tswapl(env->regs[7]);
301     (*regs)[8] = tswapl(env->regs[8]);
302     (*regs)[9] = tswapl(env->regs[9]);
303     (*regs)[10] = tswapl(env->regs[10]);
304     (*regs)[11] = tswapl(env->regs[11]);
305     (*regs)[12] = tswapl(env->regs[12]);
306     (*regs)[13] = tswapl(env->regs[13]);
307     (*regs)[14] = tswapl(env->regs[14]);
308     (*regs)[15] = tswapl(env->regs[15]);
309 
310     (*regs)[16] = tswapl(cpsr_read((CPUState *)env));
311     (*regs)[17] = tswapl(env->regs[0]); /* XXX */
312 }
313 
314 #define USE_ELF_CORE_DUMP
315 #define ELF_EXEC_PAGESIZE       4096
316 
317 enum
318 {
319     ARM_HWCAP_ARM_SWP       = 1 << 0,
320     ARM_HWCAP_ARM_HALF      = 1 << 1,
321     ARM_HWCAP_ARM_THUMB     = 1 << 2,
322     ARM_HWCAP_ARM_26BIT     = 1 << 3,
323     ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
324     ARM_HWCAP_ARM_FPA       = 1 << 5,
325     ARM_HWCAP_ARM_VFP       = 1 << 6,
326     ARM_HWCAP_ARM_EDSP      = 1 << 7,
327     ARM_HWCAP_ARM_JAVA      = 1 << 8,
328     ARM_HWCAP_ARM_IWMMXT    = 1 << 9,
329     ARM_HWCAP_ARM_THUMBEE   = 1 << 10,
330     ARM_HWCAP_ARM_NEON      = 1 << 11,
331     ARM_HWCAP_ARM_VFPv3     = 1 << 12,
332     ARM_HWCAP_ARM_VFPv3D16  = 1 << 13,
333 };
334 
335 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF               \
336                    | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT      \
337                    | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP              \
338                    | ARM_HWCAP_ARM_NEON | ARM_HWCAP_ARM_VFPv3 )
339 
340 #endif
341 
342 #ifdef TARGET_SPARC
343 #ifdef TARGET_SPARC64
344 
345 #define ELF_START_MMAP 0x80000000
346 
347 #ifndef TARGET_ABI32
348 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
349 #else
350 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
351 #endif
352 
353 #define ELF_CLASS   ELFCLASS64
354 #define ELF_ARCH    EM_SPARCV9
355 
356 #define STACK_BIAS              2047
357 
358 static inline void init_thread(struct target_pt_regs *regs,
359                                struct image_info *infop)
360 {
361 #ifndef TARGET_ABI32
362     regs->tstate = 0;
363 #endif
364     regs->pc = infop->entry;
365     regs->npc = regs->pc + 4;
366     regs->y = 0;
367 #ifdef TARGET_ABI32
368     regs->u_regs[14] = infop->start_stack - 16 * 4;
369 #else
370     if (personality(infop->personality) == PER_LINUX32)
371         regs->u_regs[14] = infop->start_stack - 16 * 4;
372     else
373         regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
374 #endif
375 }
376 
377 #else
378 #define ELF_START_MMAP 0x80000000
379 
380 #define elf_check_arch(x) ( (x) == EM_SPARC )
381 
382 #define ELF_CLASS   ELFCLASS32
383 #define ELF_ARCH    EM_SPARC
384 
385 static inline void init_thread(struct target_pt_regs *regs,
386                                struct image_info *infop)
387 {
388     regs->psr = 0;
389     regs->pc = infop->entry;
390     regs->npc = regs->pc + 4;
391     regs->y = 0;
392     regs->u_regs[14] = infop->start_stack - 16 * 4;
393 }
394 
395 #endif
396 #endif
397 
398 #ifdef TARGET_PPC
399 
400 #define ELF_START_MMAP 0x80000000
401 
402 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
403 
404 #define elf_check_arch(x) ( (x) == EM_PPC64 )
405 
406 #define ELF_CLASS       ELFCLASS64
407 
408 #else
409 
410 #define elf_check_arch(x) ( (x) == EM_PPC )
411 
412 #define ELF_CLASS       ELFCLASS32
413 
414 #endif
415 
416 #define ELF_ARCH        EM_PPC
417 
418 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
419    See arch/powerpc/include/asm/cputable.h.  */
420 enum {
421     QEMU_PPC_FEATURE_32 = 0x80000000,
422     QEMU_PPC_FEATURE_64 = 0x40000000,
423     QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
424     QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
425     QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
426     QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
427     QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
428     QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
429     QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
430     QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
431     QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
432     QEMU_PPC_FEATURE_NO_TB = 0x00100000,
433     QEMU_PPC_FEATURE_POWER4 = 0x00080000,
434     QEMU_PPC_FEATURE_POWER5 = 0x00040000,
435     QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
436     QEMU_PPC_FEATURE_CELL = 0x00010000,
437     QEMU_PPC_FEATURE_BOOKE = 0x00008000,
438     QEMU_PPC_FEATURE_SMT = 0x00004000,
439     QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
440     QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
441     QEMU_PPC_FEATURE_PA6T = 0x00000800,
442     QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
443     QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
444     QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
445     QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
446     QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
447 
448     QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
449     QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
450 };
451 
452 #define ELF_HWCAP get_elf_hwcap()
453 
454 static uint32_t get_elf_hwcap(void)
455 {
456     CPUState *e = thread_env;
457     uint32_t features = 0;
458 
459     /* We don't have to be terribly complete here; the high points are
460        Altivec/FP/SPE support.  Anything else is just a bonus.  */
461 #define GET_FEATURE(flag, feature)                                      \
462     do {if (e->insns_flags & flag) features |= feature; } while(0)
463     GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
464     GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
465     GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
466     GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
467     GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
468     GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
469     GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
470     GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
471 #undef GET_FEATURE
472 
473     return features;
474 }
475 
476 /*
477  * The requirements here are:
478  * - keep the final alignment of sp (sp & 0xf)
479  * - make sure the 32-bit value at the first 16 byte aligned position of
480  *   AUXV is greater than 16 for glibc compatibility.
481  *   AT_IGNOREPPC is used for that.
482  * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
483  *   even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
484  */
485 #define DLINFO_ARCH_ITEMS       5
486 #define ARCH_DLINFO                                     \
487     do {                                                \
488         NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20);              \
489         NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20);              \
490         NEW_AUX_ENT(AT_UCACHEBSIZE, 0);                 \
491         /*                                              \
492          * Now handle glibc compatibility.              \
493          */                                             \
494         NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);        \
495         NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);        \
496     } while (0)
497 
498 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
499 {
500     _regs->gpr[1] = infop->start_stack;
501 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
502     _regs->gpr[2] = ldq_raw(infop->entry + 8) + infop->load_addr;
503     infop->entry = ldq_raw(infop->entry) + infop->load_addr;
504 #endif
505     _regs->nip = infop->entry;
506 }
507 
508 /* See linux kernel: arch/powerpc/include/asm/elf.h.  */
509 #define ELF_NREG 48
510 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
511 
512 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
513 {
514     int i;
515     target_ulong ccr = 0;
516 
517     for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
518         (*regs)[i] = tswapl(env->gpr[i]);
519     }
520 
521     (*regs)[32] = tswapl(env->nip);
522     (*regs)[33] = tswapl(env->msr);
523     (*regs)[35] = tswapl(env->ctr);
524     (*regs)[36] = tswapl(env->lr);
525     (*regs)[37] = tswapl(env->xer);
526 
527     for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
528         ccr |= env->crf[i] << (32 - ((i + 1) * 4));
529     }
530     (*regs)[38] = tswapl(ccr);
531 }
532 
533 #define USE_ELF_CORE_DUMP
534 #define ELF_EXEC_PAGESIZE       4096
535 
536 #endif
537 
538 #ifdef TARGET_MIPS
539 
540 #define ELF_START_MMAP 0x80000000
541 
542 #define elf_check_arch(x) ( (x) == EM_MIPS )
543 
544 #ifdef TARGET_MIPS64
545 #define ELF_CLASS   ELFCLASS64
546 #else
547 #define ELF_CLASS   ELFCLASS32
548 #endif
549 #define ELF_ARCH    EM_MIPS
550 
551 static inline void init_thread(struct target_pt_regs *regs,
552                                struct image_info *infop)
553 {
554     regs->cp0_status = 2 << CP0St_KSU;
555     regs->cp0_epc = infop->entry;
556     regs->regs[29] = infop->start_stack;
557 }
558 
559 /* See linux kernel: arch/mips/include/asm/elf.h.  */
560 #define ELF_NREG 45
561 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
562 
563 /* See linux kernel: arch/mips/include/asm/reg.h.  */
564 enum {
565 #ifdef TARGET_MIPS64
566     TARGET_EF_R0 = 0,
567 #else
568     TARGET_EF_R0 = 6,
569 #endif
570     TARGET_EF_R26 = TARGET_EF_R0 + 26,
571     TARGET_EF_R27 = TARGET_EF_R0 + 27,
572     TARGET_EF_LO = TARGET_EF_R0 + 32,
573     TARGET_EF_HI = TARGET_EF_R0 + 33,
574     TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34,
575     TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35,
576     TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36,
577     TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37
578 };
579 
580 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
581 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
582 {
583     int i;
584 
585     for (i = 0; i < TARGET_EF_R0; i++) {
586         (*regs)[i] = 0;
587     }
588     (*regs)[TARGET_EF_R0] = 0;
589 
590     for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) {
591         (*regs)[TARGET_EF_R0 + i] = tswapl(env->active_tc.gpr[i]);
592     }
593 
594     (*regs)[TARGET_EF_R26] = 0;
595     (*regs)[TARGET_EF_R27] = 0;
596     (*regs)[TARGET_EF_LO] = tswapl(env->active_tc.LO[0]);
597     (*regs)[TARGET_EF_HI] = tswapl(env->active_tc.HI[0]);
598     (*regs)[TARGET_EF_CP0_EPC] = tswapl(env->active_tc.PC);
599     (*regs)[TARGET_EF_CP0_BADVADDR] = tswapl(env->CP0_BadVAddr);
600     (*regs)[TARGET_EF_CP0_STATUS] = tswapl(env->CP0_Status);
601     (*regs)[TARGET_EF_CP0_CAUSE] = tswapl(env->CP0_Cause);
602 }
603 
604 #define USE_ELF_CORE_DUMP
605 #define ELF_EXEC_PAGESIZE        4096
606 
607 #endif /* TARGET_MIPS */
608 
609 #ifdef TARGET_MICROBLAZE
610 
611 #define ELF_START_MMAP 0x80000000
612 
613 #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD)
614 
615 #define ELF_CLASS   ELFCLASS32
616 #define ELF_ARCH    EM_MICROBLAZE
617 
618 static inline void init_thread(struct target_pt_regs *regs,
619                                struct image_info *infop)
620 {
621     regs->pc = infop->entry;
622     regs->r1 = infop->start_stack;
623 
624 }
625 
626 #define ELF_EXEC_PAGESIZE        4096
627 
628 #define USE_ELF_CORE_DUMP
629 #define ELF_NREG 38
630 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
631 
632 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
633 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
634 {
635     int i, pos = 0;
636 
637     for (i = 0; i < 32; i++) {
638         (*regs)[pos++] = tswapl(env->regs[i]);
639     }
640 
641     for (i = 0; i < 6; i++) {
642         (*regs)[pos++] = tswapl(env->sregs[i]);
643     }
644 }
645 
646 #endif /* TARGET_MICROBLAZE */
647 
648 #ifdef TARGET_SH4
649 
650 #define ELF_START_MMAP 0x80000000
651 
652 #define elf_check_arch(x) ( (x) == EM_SH )
653 
654 #define ELF_CLASS ELFCLASS32
655 #define ELF_ARCH  EM_SH
656 
657 static inline void init_thread(struct target_pt_regs *regs,
658                                struct image_info *infop)
659 {
660     /* Check other registers XXXXX */
661     regs->pc = infop->entry;
662     regs->regs[15] = infop->start_stack;
663 }
664 
665 /* See linux kernel: arch/sh/include/asm/elf.h.  */
666 #define ELF_NREG 23
667 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
668 
669 /* See linux kernel: arch/sh/include/asm/ptrace.h.  */
670 enum {
671     TARGET_REG_PC = 16,
672     TARGET_REG_PR = 17,
673     TARGET_REG_SR = 18,
674     TARGET_REG_GBR = 19,
675     TARGET_REG_MACH = 20,
676     TARGET_REG_MACL = 21,
677     TARGET_REG_SYSCALL = 22
678 };
679 
680 static inline void elf_core_copy_regs(target_elf_gregset_t *regs,
681                                       const CPUState *env)
682 {
683     int i;
684 
685     for (i = 0; i < 16; i++) {
686         (*regs[i]) = tswapl(env->gregs[i]);
687     }
688 
689     (*regs)[TARGET_REG_PC] = tswapl(env->pc);
690     (*regs)[TARGET_REG_PR] = tswapl(env->pr);
691     (*regs)[TARGET_REG_SR] = tswapl(env->sr);
692     (*regs)[TARGET_REG_GBR] = tswapl(env->gbr);
693     (*regs)[TARGET_REG_MACH] = tswapl(env->mach);
694     (*regs)[TARGET_REG_MACL] = tswapl(env->macl);
695     (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */
696 }
697 
698 #define USE_ELF_CORE_DUMP
699 #define ELF_EXEC_PAGESIZE        4096
700 
701 #endif
702 
703 #ifdef TARGET_CRIS
704 
705 #define ELF_START_MMAP 0x80000000
706 
707 #define elf_check_arch(x) ( (x) == EM_CRIS )
708 
709 #define ELF_CLASS ELFCLASS32
710 #define ELF_ARCH  EM_CRIS
711 
712 static inline void init_thread(struct target_pt_regs *regs,
713                                struct image_info *infop)
714 {
715     regs->erp = infop->entry;
716 }
717 
718 #define ELF_EXEC_PAGESIZE        8192
719 
720 #endif
721 
722 #ifdef TARGET_M68K
723 
724 #define ELF_START_MMAP 0x80000000
725 
726 #define elf_check_arch(x) ( (x) == EM_68K )
727 
728 #define ELF_CLASS       ELFCLASS32
729 #define ELF_ARCH        EM_68K
730 
731 /* ??? Does this need to do anything?
732    #define ELF_PLAT_INIT(_r) */
733 
734 static inline void init_thread(struct target_pt_regs *regs,
735                                struct image_info *infop)
736 {
737     regs->usp = infop->start_stack;
738     regs->sr = 0;
739     regs->pc = infop->entry;
740 }
741 
742 /* See linux kernel: arch/m68k/include/asm/elf.h.  */
743 #define ELF_NREG 20
744 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
745 
746 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
747 {
748     (*regs)[0] = tswapl(env->dregs[1]);
749     (*regs)[1] = tswapl(env->dregs[2]);
750     (*regs)[2] = tswapl(env->dregs[3]);
751     (*regs)[3] = tswapl(env->dregs[4]);
752     (*regs)[4] = tswapl(env->dregs[5]);
753     (*regs)[5] = tswapl(env->dregs[6]);
754     (*regs)[6] = tswapl(env->dregs[7]);
755     (*regs)[7] = tswapl(env->aregs[0]);
756     (*regs)[8] = tswapl(env->aregs[1]);
757     (*regs)[9] = tswapl(env->aregs[2]);
758     (*regs)[10] = tswapl(env->aregs[3]);
759     (*regs)[11] = tswapl(env->aregs[4]);
760     (*regs)[12] = tswapl(env->aregs[5]);
761     (*regs)[13] = tswapl(env->aregs[6]);
762     (*regs)[14] = tswapl(env->dregs[0]);
763     (*regs)[15] = tswapl(env->aregs[7]);
764     (*regs)[16] = tswapl(env->dregs[0]); /* FIXME: orig_d0 */
765     (*regs)[17] = tswapl(env->sr);
766     (*regs)[18] = tswapl(env->pc);
767     (*regs)[19] = 0;  /* FIXME: regs->format | regs->vector */
768 }
769 
770 #define USE_ELF_CORE_DUMP
771 #define ELF_EXEC_PAGESIZE       8192
772 
773 #endif
774 
775 #ifdef TARGET_ALPHA
776 
777 #define ELF_START_MMAP (0x30000000000ULL)
778 
779 #define elf_check_arch(x) ( (x) == ELF_ARCH )
780 
781 #define ELF_CLASS      ELFCLASS64
782 #define ELF_ARCH       EM_ALPHA
783 
784 static inline void init_thread(struct target_pt_regs *regs,
785                                struct image_info *infop)
786 {
787     regs->pc = infop->entry;
788     regs->ps = 8;
789     regs->usp = infop->start_stack;
790 }
791 
792 #define ELF_EXEC_PAGESIZE        8192
793 
794 #endif /* TARGET_ALPHA */
795 
796 #ifndef ELF_PLATFORM
797 #define ELF_PLATFORM (NULL)
798 #endif
799 
800 #ifndef ELF_HWCAP
801 #define ELF_HWCAP 0
802 #endif
803 
804 #ifdef TARGET_ABI32
805 #undef ELF_CLASS
806 #define ELF_CLASS ELFCLASS32
807 #undef bswaptls
808 #define bswaptls(ptr) bswap32s(ptr)
809 #endif
810 
811 #include "elf.h"
812 
813 struct exec
814 {
815     unsigned int a_info;   /* Use macros N_MAGIC, etc for access */
816     unsigned int a_text;   /* length of text, in bytes */
817     unsigned int a_data;   /* length of data, in bytes */
818     unsigned int a_bss;    /* length of uninitialized data area, in bytes */
819     unsigned int a_syms;   /* length of symbol table data in file, in bytes */
820     unsigned int a_entry;  /* start address */
821     unsigned int a_trsize; /* length of relocation info for text, in bytes */
822     unsigned int a_drsize; /* length of relocation info for data, in bytes */
823 };
824 
825 
826 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
827 #define OMAGIC 0407
828 #define NMAGIC 0410
829 #define ZMAGIC 0413
830 #define QMAGIC 0314
831 
832 /* Necessary parameters */
833 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
834 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
835 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
836 
837 #define DLINFO_ITEMS 12
838 
839 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
840 {
841     memcpy(to, from, n);
842 }
843 
844 #ifdef BSWAP_NEEDED
845 static void bswap_ehdr(struct elfhdr *ehdr)
846 {
847     bswap16s(&ehdr->e_type);            /* Object file type */
848     bswap16s(&ehdr->e_machine);         /* Architecture */
849     bswap32s(&ehdr->e_version);         /* Object file version */
850     bswaptls(&ehdr->e_entry);           /* Entry point virtual address */
851     bswaptls(&ehdr->e_phoff);           /* Program header table file offset */
852     bswaptls(&ehdr->e_shoff);           /* Section header table file offset */
853     bswap32s(&ehdr->e_flags);           /* Processor-specific flags */
854     bswap16s(&ehdr->e_ehsize);          /* ELF header size in bytes */
855     bswap16s(&ehdr->e_phentsize);       /* Program header table entry size */
856     bswap16s(&ehdr->e_phnum);           /* Program header table entry count */
857     bswap16s(&ehdr->e_shentsize);       /* Section header table entry size */
858     bswap16s(&ehdr->e_shnum);           /* Section header table entry count */
859     bswap16s(&ehdr->e_shstrndx);        /* Section header string table index */
860 }
861 
862 static void bswap_phdr(struct elf_phdr *phdr, int phnum)
863 {
864     int i;
865     for (i = 0; i < phnum; ++i, ++phdr) {
866         bswap32s(&phdr->p_type);        /* Segment type */
867         bswap32s(&phdr->p_flags);       /* Segment flags */
868         bswaptls(&phdr->p_offset);      /* Segment file offset */
869         bswaptls(&phdr->p_vaddr);       /* Segment virtual address */
870         bswaptls(&phdr->p_paddr);       /* Segment physical address */
871         bswaptls(&phdr->p_filesz);      /* Segment size in file */
872         bswaptls(&phdr->p_memsz);       /* Segment size in memory */
873         bswaptls(&phdr->p_align);       /* Segment alignment */
874     }
875 }
876 
877 static void bswap_shdr(struct elf_shdr *shdr, int shnum)
878 {
879     int i;
880     for (i = 0; i < shnum; ++i, ++shdr) {
881         bswap32s(&shdr->sh_name);
882         bswap32s(&shdr->sh_type);
883         bswaptls(&shdr->sh_flags);
884         bswaptls(&shdr->sh_addr);
885         bswaptls(&shdr->sh_offset);
886         bswaptls(&shdr->sh_size);
887         bswap32s(&shdr->sh_link);
888         bswap32s(&shdr->sh_info);
889         bswaptls(&shdr->sh_addralign);
890         bswaptls(&shdr->sh_entsize);
891     }
892 }
893 
894 static void bswap_sym(struct elf_sym *sym)
895 {
896     bswap32s(&sym->st_name);
897     bswaptls(&sym->st_value);
898     bswaptls(&sym->st_size);
899     bswap16s(&sym->st_shndx);
900 }
901 #else
902 static inline void bswap_ehdr(struct elfhdr *ehdr) { }
903 static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { }
904 static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { }
905 static inline void bswap_sym(struct elf_sym *sym) { }
906 #endif
907 
908 #ifdef USE_ELF_CORE_DUMP
909 static int elf_core_dump(int, const CPUState *);
910 #endif /* USE_ELF_CORE_DUMP */
911 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias);
912 
913 /* Verify the portions of EHDR within E_IDENT for the target.
914    This can be performed before bswapping the entire header.  */
915 static bool elf_check_ident(struct elfhdr *ehdr)
916 {
917     return (ehdr->e_ident[EI_MAG0] == ELFMAG0
918             && ehdr->e_ident[EI_MAG1] == ELFMAG1
919             && ehdr->e_ident[EI_MAG2] == ELFMAG2
920             && ehdr->e_ident[EI_MAG3] == ELFMAG3
921             && ehdr->e_ident[EI_CLASS] == ELF_CLASS
922             && ehdr->e_ident[EI_DATA] == ELF_DATA
923             && ehdr->e_ident[EI_VERSION] == EV_CURRENT);
924 }
925 
926 /* Verify the portions of EHDR outside of E_IDENT for the target.
927    This has to wait until after bswapping the header.  */
928 static bool elf_check_ehdr(struct elfhdr *ehdr)
929 {
930     return (elf_check_arch(ehdr->e_machine)
931             && ehdr->e_ehsize == sizeof(struct elfhdr)
932             && ehdr->e_phentsize == sizeof(struct elf_phdr)
933             && ehdr->e_shentsize == sizeof(struct elf_shdr)
934             && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN));
935 }
936 
937 /*
938  * 'copy_elf_strings()' copies argument/envelope strings from user
939  * memory to free pages in kernel mem. These are in a format ready
940  * to be put directly into the top of new user memory.
941  *
942  */
943 static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
944                                   abi_ulong p)
945 {
946     char *tmp, *tmp1, *pag = NULL;
947     int len, offset = 0;
948 
949     if (!p) {
950         return 0;       /* bullet-proofing */
951     }
952     while (argc-- > 0) {
953         tmp = argv[argc];
954         if (!tmp) {
955             fprintf(stderr, "VFS: argc is wrong");
956             exit(-1);
957         }
958         tmp1 = tmp;
959         while (*tmp++);
960         len = tmp - tmp1;
961         if (p < len) {  /* this shouldn't happen - 128kB */
962             return 0;
963         }
964         while (len) {
965             --p; --tmp; --len;
966             if (--offset < 0) {
967                 offset = p % TARGET_PAGE_SIZE;
968                 pag = (char *)page[p/TARGET_PAGE_SIZE];
969                 if (!pag) {
970                     pag = (char *)malloc(TARGET_PAGE_SIZE);
971                     memset(pag, 0, TARGET_PAGE_SIZE);
972                     page[p/TARGET_PAGE_SIZE] = pag;
973                     if (!pag)
974                         return 0;
975                 }
976             }
977             if (len == 0 || offset == 0) {
978                 *(pag + offset) = *tmp;
979             }
980             else {
981                 int bytes_to_copy = (len > offset) ? offset : len;
982                 tmp -= bytes_to_copy;
983                 p -= bytes_to_copy;
984                 offset -= bytes_to_copy;
985                 len -= bytes_to_copy;
986                 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
987             }
988         }
989     }
990     return p;
991 }
992 
993 static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
994                                  struct image_info *info)
995 {
996     abi_ulong stack_base, size, error, guard;
997     int i;
998 
999     /* Create enough stack to hold everything.  If we don't use
1000        it for args, we'll use it for something else.  */
1001     size = guest_stack_size;
1002     if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE) {
1003         size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1004     }
1005     guard = TARGET_PAGE_SIZE;
1006     if (guard < qemu_real_host_page_size) {
1007         guard = qemu_real_host_page_size;
1008     }
1009 
1010     error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE,
1011                         MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1012     if (error == -1) {
1013         perror("mmap stack");
1014         exit(-1);
1015     }
1016 
1017     /* We reserve one extra page at the top of the stack as guard.  */
1018     target_mprotect(error, guard, PROT_NONE);
1019 
1020     info->stack_limit = error + guard;
1021     stack_base = info->stack_limit + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1022     p += stack_base;
1023 
1024     for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
1025         if (bprm->page[i]) {
1026             info->rss++;
1027             /* FIXME - check return value of memcpy_to_target() for failure */
1028             memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
1029             free(bprm->page[i]);
1030         }
1031         stack_base += TARGET_PAGE_SIZE;
1032     }
1033     return p;
1034 }
1035 
1036 /* Map and zero the bss.  We need to explicitly zero any fractional pages
1037    after the data section (i.e. bss).  */
1038 static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
1039 {
1040     uintptr_t host_start, host_map_start, host_end;
1041 
1042     last_bss = TARGET_PAGE_ALIGN(last_bss);
1043 
1044     /* ??? There is confusion between qemu_real_host_page_size and
1045        qemu_host_page_size here and elsewhere in target_mmap, which
1046        may lead to the end of the data section mapping from the file
1047        not being mapped.  At least there was an explicit test and
1048        comment for that here, suggesting that "the file size must
1049        be known".  The comment probably pre-dates the introduction
1050        of the fstat system call in target_mmap which does in fact
1051        find out the size.  What isn't clear is if the workaround
1052        here is still actually needed.  For now, continue with it,
1053        but merge it with the "normal" mmap that would allocate the bss.  */
1054 
1055     host_start = (uintptr_t) g2h(elf_bss);
1056     host_end = (uintptr_t) g2h(last_bss);
1057     host_map_start = (host_start + qemu_real_host_page_size - 1);
1058     host_map_start &= -qemu_real_host_page_size;
1059 
1060     if (host_map_start < host_end) {
1061         void *p = mmap((void *)host_map_start, host_end - host_map_start,
1062                        prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1063         if (p == MAP_FAILED) {
1064             perror("cannot mmap brk");
1065             exit(-1);
1066         }
1067 
1068         /* Since we didn't use target_mmap, make sure to record
1069            the validity of the pages with qemu.  */
1070         page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot|PAGE_VALID);
1071     }
1072 
1073     if (host_start < host_map_start) {
1074         memset((void *)host_start, 0, host_map_start - host_start);
1075     }
1076 }
1077 
1078 #ifdef CONFIG_USE_FDPIC
1079 static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp)
1080 {
1081     uint16_t n;
1082     struct elf32_fdpic_loadseg *loadsegs = info->loadsegs;
1083 
1084     /* elf32_fdpic_loadseg */
1085     n = info->nsegs;
1086     while (n--) {
1087         sp -= 12;
1088         put_user_u32(loadsegs[n].addr, sp+0);
1089         put_user_u32(loadsegs[n].p_vaddr, sp+4);
1090         put_user_u32(loadsegs[n].p_memsz, sp+8);
1091     }
1092 
1093     /* elf32_fdpic_loadmap */
1094     sp -= 4;
1095     put_user_u16(0, sp+0); /* version */
1096     put_user_u16(info->nsegs, sp+2); /* nsegs */
1097 
1098     info->personality = PER_LINUX_FDPIC;
1099     info->loadmap_addr = sp;
1100 
1101     return sp;
1102 }
1103 #endif
1104 
1105 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
1106                                    struct elfhdr *exec,
1107                                    struct image_info *info,
1108                                    struct image_info *interp_info)
1109 {
1110     abi_ulong sp;
1111     int size;
1112     abi_ulong u_platform;
1113     const char *k_platform;
1114     const int n = sizeof(elf_addr_t);
1115 
1116     sp = p;
1117 
1118 #ifdef CONFIG_USE_FDPIC
1119     /* Needs to be before we load the env/argc/... */
1120     if (elf_is_fdpic(exec)) {
1121         /* Need 4 byte alignment for these structs */
1122         sp &= ~3;
1123         sp = loader_build_fdpic_loadmap(info, sp);
1124         info->other_info = interp_info;
1125         if (interp_info) {
1126             interp_info->other_info = info;
1127             sp = loader_build_fdpic_loadmap(interp_info, sp);
1128         }
1129     }
1130 #endif
1131 
1132     u_platform = 0;
1133     k_platform = ELF_PLATFORM;
1134     if (k_platform) {
1135         size_t len = strlen(k_platform) + 1;
1136         sp -= (len + n - 1) & ~(n - 1);
1137         u_platform = sp;
1138         /* FIXME - check return value of memcpy_to_target() for failure */
1139         memcpy_to_target(sp, k_platform, len);
1140     }
1141     /*
1142      * Force 16 byte _final_ alignment here for generality.
1143      */
1144     sp = sp &~ (abi_ulong)15;
1145     size = (DLINFO_ITEMS + 1) * 2;
1146     if (k_platform)
1147         size += 2;
1148 #ifdef DLINFO_ARCH_ITEMS
1149     size += DLINFO_ARCH_ITEMS * 2;
1150 #endif
1151     size += envc + argc + 2;
1152     size += 1;  /* argc itself */
1153     size *= n;
1154     if (size & 15)
1155         sp -= 16 - (size & 15);
1156 
1157     /* This is correct because Linux defines
1158      * elf_addr_t as Elf32_Off / Elf64_Off
1159      */
1160 #define NEW_AUX_ENT(id, val) do {               \
1161         sp -= n; put_user_ual(val, sp);         \
1162         sp -= n; put_user_ual(id, sp);          \
1163     } while(0)
1164 
1165     NEW_AUX_ENT (AT_NULL, 0);
1166 
1167     /* There must be exactly DLINFO_ITEMS entries here.  */
1168     NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff));
1169     NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
1170     NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
1171     NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
1172     NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0));
1173     NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
1174     NEW_AUX_ENT(AT_ENTRY, info->entry);
1175     NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
1176     NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
1177     NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
1178     NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
1179     NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
1180     NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
1181     if (k_platform)
1182         NEW_AUX_ENT(AT_PLATFORM, u_platform);
1183 #ifdef ARCH_DLINFO
1184     /*
1185      * ARCH_DLINFO must come last so platform specific code can enforce
1186      * special alignment requirements on the AUXV if necessary (eg. PPC).
1187      */
1188     ARCH_DLINFO;
1189 #endif
1190 #undef NEW_AUX_ENT
1191 
1192     info->saved_auxv = sp;
1193 
1194     sp = loader_build_argptr(envc, argc, sp, p, 0);
1195     return sp;
1196 }
1197 
1198 /* Load an ELF image into the address space.
1199 
1200    IMAGE_NAME is the filename of the image, to use in error messages.
1201    IMAGE_FD is the open file descriptor for the image.
1202 
1203    BPRM_BUF is a copy of the beginning of the file; this of course
1204    contains the elf file header at offset 0.  It is assumed that this
1205    buffer is sufficiently aligned to present no problems to the host
1206    in accessing data at aligned offsets within the buffer.
1207 
1208    On return: INFO values will be filled in, as necessary or available.  */
1209 
1210 static void load_elf_image(const char *image_name, int image_fd,
1211                            struct image_info *info, char **pinterp_name,
1212                            char bprm_buf[BPRM_BUF_SIZE])
1213 {
1214     struct elfhdr *ehdr = (struct elfhdr *)bprm_buf;
1215     struct elf_phdr *phdr;
1216     abi_ulong load_addr, load_bias, loaddr, hiaddr, error;
1217     int i, retval;
1218     const char *errmsg;
1219 
1220     /* First of all, some simple consistency checks */
1221     errmsg = "Invalid ELF image for this architecture";
1222     if (!elf_check_ident(ehdr)) {
1223         goto exit_errmsg;
1224     }
1225     bswap_ehdr(ehdr);
1226     if (!elf_check_ehdr(ehdr)) {
1227         goto exit_errmsg;
1228     }
1229 
1230     i = ehdr->e_phnum * sizeof(struct elf_phdr);
1231     if (ehdr->e_phoff + i <= BPRM_BUF_SIZE) {
1232         phdr = (struct elf_phdr *)(bprm_buf + ehdr->e_phoff);
1233     } else {
1234         phdr = (struct elf_phdr *) alloca(i);
1235         retval = pread(image_fd, phdr, i, ehdr->e_phoff);
1236         if (retval != i) {
1237             goto exit_read;
1238         }
1239     }
1240     bswap_phdr(phdr, ehdr->e_phnum);
1241 
1242 #ifdef CONFIG_USE_FDPIC
1243     info->nsegs = 0;
1244     info->pt_dynamic_addr = 0;
1245 #endif
1246 
1247     /* Find the maximum size of the image and allocate an appropriate
1248        amount of memory to handle that.  */
1249     loaddr = -1, hiaddr = 0;
1250     for (i = 0; i < ehdr->e_phnum; ++i) {
1251         if (phdr[i].p_type == PT_LOAD) {
1252             abi_ulong a = phdr[i].p_vaddr;
1253             if (a < loaddr) {
1254                 loaddr = a;
1255             }
1256             a += phdr[i].p_memsz;
1257             if (a > hiaddr) {
1258                 hiaddr = a;
1259             }
1260 #ifdef CONFIG_USE_FDPIC
1261             ++info->nsegs;
1262 #endif
1263         }
1264     }
1265 
1266     load_addr = loaddr;
1267     if (ehdr->e_type == ET_DYN) {
1268         /* The image indicates that it can be loaded anywhere.  Find a
1269            location that can hold the memory space required.  If the
1270            image is pre-linked, LOADDR will be non-zero.  Since we do
1271            not supply MAP_FIXED here we'll use that address if and
1272            only if it remains available.  */
1273         load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE,
1274                                 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
1275                                 -1, 0);
1276         if (load_addr == -1) {
1277             goto exit_perror;
1278         }
1279     } else if (pinterp_name != NULL) {
1280         /* This is the main executable.  Make sure that the low
1281            address does not conflict with MMAP_MIN_ADDR or the
1282            QEMU application itself.  */
1283 #if defined(CONFIG_USE_GUEST_BASE)
1284         /*
1285          * In case where user has not explicitly set the guest_base, we
1286          * probe here that should we set it automatically.
1287          */
1288         if (!have_guest_base && !reserved_va) {
1289             unsigned long host_start, real_start, host_size;
1290 
1291             /* Round addresses to page boundaries.  */
1292             loaddr &= qemu_host_page_mask;
1293             hiaddr = HOST_PAGE_ALIGN(hiaddr);
1294 
1295             if (loaddr < mmap_min_addr) {
1296                 host_start = HOST_PAGE_ALIGN(mmap_min_addr);
1297             } else {
1298                 host_start = loaddr;
1299                 if (host_start != loaddr) {
1300                     errmsg = "Address overflow loading ELF binary";
1301                     goto exit_errmsg;
1302                 }
1303             }
1304             host_size = hiaddr - loaddr;
1305             while (1) {
1306                 /* Do not use mmap_find_vma here because that is limited to the
1307                    guest address space.  We are going to make the
1308                    guest address space fit whatever we're given.  */
1309                 real_start = (unsigned long)
1310                     mmap((void *)host_start, host_size, PROT_NONE,
1311                          MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
1312                 if (real_start == (unsigned long)-1) {
1313                     goto exit_perror;
1314                 }
1315                 if (real_start == host_start) {
1316                     break;
1317                 }
1318                 /* That address didn't work.  Unmap and try a different one.
1319                    The address the host picked because is typically right at
1320                    the top of the host address space and leaves the guest with
1321                    no usable address space.  Resort to a linear search.  We
1322                    already compensated for mmap_min_addr, so this should not
1323                    happen often.  Probably means we got unlucky and host
1324                    address space randomization put a shared library somewhere
1325                    inconvenient.  */
1326                 munmap((void *)real_start, host_size);
1327                 host_start += qemu_host_page_size;
1328                 if (host_start == loaddr) {
1329                     /* Theoretically possible if host doesn't have any suitably
1330                        aligned areas.  Normally the first mmap will fail.  */
1331                     errmsg = "Unable to find space for application";
1332                     goto exit_errmsg;
1333                 }
1334             }
1335             qemu_log("Relocating guest address space from 0x"
1336                      TARGET_ABI_FMT_lx " to 0x%lx\n", loaddr, real_start);
1337             guest_base = real_start - loaddr;
1338         }
1339 #endif
1340     }
1341     load_bias = load_addr - loaddr;
1342 
1343 #ifdef CONFIG_USE_FDPIC
1344     {
1345         struct elf32_fdpic_loadseg *loadsegs = info->loadsegs =
1346             qemu_malloc(sizeof(*loadsegs) * info->nsegs);
1347 
1348         for (i = 0; i < ehdr->e_phnum; ++i) {
1349             switch (phdr[i].p_type) {
1350             case PT_DYNAMIC:
1351                 info->pt_dynamic_addr = phdr[i].p_vaddr + load_bias;
1352                 break;
1353             case PT_LOAD:
1354                 loadsegs->addr = phdr[i].p_vaddr + load_bias;
1355                 loadsegs->p_vaddr = phdr[i].p_vaddr;
1356                 loadsegs->p_memsz = phdr[i].p_memsz;
1357                 ++loadsegs;
1358                 break;
1359             }
1360         }
1361     }
1362 #endif
1363 
1364     info->load_bias = load_bias;
1365     info->load_addr = load_addr;
1366     info->entry = ehdr->e_entry + load_bias;
1367     info->start_code = -1;
1368     info->end_code = 0;
1369     info->start_data = -1;
1370     info->end_data = 0;
1371     info->brk = 0;
1372 
1373     for (i = 0; i < ehdr->e_phnum; i++) {
1374         struct elf_phdr *eppnt = phdr + i;
1375         if (eppnt->p_type == PT_LOAD) {
1376             abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em;
1377             int elf_prot = 0;
1378 
1379             if (eppnt->p_flags & PF_R) elf_prot =  PROT_READ;
1380             if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1381             if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1382 
1383             vaddr = load_bias + eppnt->p_vaddr;
1384             vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr);
1385             vaddr_ps = TARGET_ELF_PAGESTART(vaddr);
1386 
1387             error = target_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po,
1388                                 elf_prot, MAP_PRIVATE | MAP_FIXED,
1389                                 image_fd, eppnt->p_offset - vaddr_po);
1390             if (error == -1) {
1391                 goto exit_perror;
1392             }
1393 
1394             vaddr_ef = vaddr + eppnt->p_filesz;
1395             vaddr_em = vaddr + eppnt->p_memsz;
1396 
1397             /* If the load segment requests extra zeros (e.g. bss), map it.  */
1398             if (vaddr_ef < vaddr_em) {
1399                 zero_bss(vaddr_ef, vaddr_em, elf_prot);
1400             }
1401 
1402             /* Find the full program boundaries.  */
1403             if (elf_prot & PROT_EXEC) {
1404                 if (vaddr < info->start_code) {
1405                     info->start_code = vaddr;
1406                 }
1407                 if (vaddr_ef > info->end_code) {
1408                     info->end_code = vaddr_ef;
1409                 }
1410             }
1411             if (elf_prot & PROT_WRITE) {
1412                 if (vaddr < info->start_data) {
1413                     info->start_data = vaddr;
1414                 }
1415                 if (vaddr_ef > info->end_data) {
1416                     info->end_data = vaddr_ef;
1417                 }
1418                 if (vaddr_em > info->brk) {
1419                     info->brk = vaddr_em;
1420                 }
1421             }
1422         } else if (eppnt->p_type == PT_INTERP && pinterp_name) {
1423             char *interp_name;
1424 
1425             if (*pinterp_name) {
1426                 errmsg = "Multiple PT_INTERP entries";
1427                 goto exit_errmsg;
1428             }
1429             interp_name = malloc(eppnt->p_filesz);
1430             if (!interp_name) {
1431                 goto exit_perror;
1432             }
1433 
1434             if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) {
1435                 memcpy(interp_name, bprm_buf + eppnt->p_offset,
1436                        eppnt->p_filesz);
1437             } else {
1438                 retval = pread(image_fd, interp_name, eppnt->p_filesz,
1439                                eppnt->p_offset);
1440                 if (retval != eppnt->p_filesz) {
1441                     goto exit_perror;
1442                 }
1443             }
1444             if (interp_name[eppnt->p_filesz - 1] != 0) {
1445                 errmsg = "Invalid PT_INTERP entry";
1446                 goto exit_errmsg;
1447             }
1448             *pinterp_name = interp_name;
1449         }
1450     }
1451 
1452     if (info->end_data == 0) {
1453         info->start_data = info->end_code;
1454         info->end_data = info->end_code;
1455         info->brk = info->end_code;
1456     }
1457 
1458     if (qemu_log_enabled()) {
1459         load_symbols(ehdr, image_fd, load_bias);
1460     }
1461 
1462     close(image_fd);
1463     return;
1464 
1465  exit_read:
1466     if (retval >= 0) {
1467         errmsg = "Incomplete read of file header";
1468         goto exit_errmsg;
1469     }
1470  exit_perror:
1471     errmsg = strerror(errno);
1472  exit_errmsg:
1473     fprintf(stderr, "%s: %s\n", image_name, errmsg);
1474     exit(-1);
1475 }
1476 
1477 static void load_elf_interp(const char *filename, struct image_info *info,
1478                             char bprm_buf[BPRM_BUF_SIZE])
1479 {
1480     int fd, retval;
1481 
1482     fd = open(path(filename), O_RDONLY);
1483     if (fd < 0) {
1484         goto exit_perror;
1485     }
1486 
1487     retval = read(fd, bprm_buf, BPRM_BUF_SIZE);
1488     if (retval < 0) {
1489         goto exit_perror;
1490     }
1491     if (retval < BPRM_BUF_SIZE) {
1492         memset(bprm_buf + retval, 0, BPRM_BUF_SIZE - retval);
1493     }
1494 
1495     load_elf_image(filename, fd, info, NULL, bprm_buf);
1496     return;
1497 
1498  exit_perror:
1499     fprintf(stderr, "%s: %s\n", filename, strerror(errno));
1500     exit(-1);
1501 }
1502 
1503 static int symfind(const void *s0, const void *s1)
1504 {
1505     struct elf_sym *key = (struct elf_sym *)s0;
1506     struct elf_sym *sym = (struct elf_sym *)s1;
1507     int result = 0;
1508     if (key->st_value < sym->st_value) {
1509         result = -1;
1510     } else if (key->st_value >= sym->st_value + sym->st_size) {
1511         result = 1;
1512     }
1513     return result;
1514 }
1515 
1516 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
1517 {
1518 #if ELF_CLASS == ELFCLASS32
1519     struct elf_sym *syms = s->disas_symtab.elf32;
1520 #else
1521     struct elf_sym *syms = s->disas_symtab.elf64;
1522 #endif
1523 
1524     // binary search
1525     struct elf_sym key;
1526     struct elf_sym *sym;
1527 
1528     key.st_value = orig_addr;
1529 
1530     sym = bsearch(&key, syms, s->disas_num_syms, sizeof(*syms), symfind);
1531     if (sym != NULL) {
1532         return s->disas_strtab + sym->st_name;
1533     }
1534 
1535     return "";
1536 }
1537 
1538 /* FIXME: This should use elf_ops.h  */
1539 static int symcmp(const void *s0, const void *s1)
1540 {
1541     struct elf_sym *sym0 = (struct elf_sym *)s0;
1542     struct elf_sym *sym1 = (struct elf_sym *)s1;
1543     return (sym0->st_value < sym1->st_value)
1544         ? -1
1545         : ((sym0->st_value > sym1->st_value) ? 1 : 0);
1546 }
1547 
1548 /* Best attempt to load symbols from this ELF object. */
1549 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias)
1550 {
1551     int i, shnum, nsyms, sym_idx = 0, str_idx = 0;
1552     struct elf_shdr *shdr;
1553     char *strings;
1554     struct syminfo *s;
1555     struct elf_sym *syms, *new_syms;
1556 
1557     shnum = hdr->e_shnum;
1558     i = shnum * sizeof(struct elf_shdr);
1559     shdr = (struct elf_shdr *)alloca(i);
1560     if (pread(fd, shdr, i, hdr->e_shoff) != i) {
1561         return;
1562     }
1563 
1564     bswap_shdr(shdr, shnum);
1565     for (i = 0; i < shnum; ++i) {
1566         if (shdr[i].sh_type == SHT_SYMTAB) {
1567             sym_idx = i;
1568             str_idx = shdr[i].sh_link;
1569             goto found;
1570         }
1571     }
1572 
1573     /* There will be no symbol table if the file was stripped.  */
1574     return;
1575 
1576  found:
1577     /* Now know where the strtab and symtab are.  Snarf them.  */
1578     s = malloc(sizeof(*s));
1579     if (!s) {
1580         return;
1581     }
1582 
1583     i = shdr[str_idx].sh_size;
1584     s->disas_strtab = strings = malloc(i);
1585     if (!strings || pread(fd, strings, i, shdr[str_idx].sh_offset) != i) {
1586         free(s);
1587         free(strings);
1588         return;
1589     }
1590 
1591     i = shdr[sym_idx].sh_size;
1592     syms = malloc(i);
1593     if (!syms || pread(fd, syms, i, shdr[sym_idx].sh_offset) != i) {
1594         free(s);
1595         free(strings);
1596         free(syms);
1597         return;
1598     }
1599 
1600     nsyms = i / sizeof(struct elf_sym);
1601     for (i = 0; i < nsyms; ) {
1602         bswap_sym(syms + i);
1603         /* Throw away entries which we do not need.  */
1604         if (syms[i].st_shndx == SHN_UNDEF
1605             || syms[i].st_shndx >= SHN_LORESERVE
1606             || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
1607             if (i < --nsyms) {
1608                 syms[i] = syms[nsyms];
1609             }
1610         } else {
1611 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1612             /* The bottom address bit marks a Thumb or MIPS16 symbol.  */
1613             syms[i].st_value &= ~(target_ulong)1;
1614 #endif
1615             syms[i].st_value += load_bias;
1616             i++;
1617         }
1618     }
1619 
1620     /* Attempt to free the storage associated with the local symbols
1621        that we threw away.  Whether or not this has any effect on the
1622        memory allocation depends on the malloc implementation and how
1623        many symbols we managed to discard.  */
1624     new_syms = realloc(syms, nsyms * sizeof(*syms));
1625     if (new_syms == NULL) {
1626         free(s);
1627         free(syms);
1628         free(strings);
1629         return;
1630     }
1631     syms = new_syms;
1632 
1633     qsort(syms, nsyms, sizeof(*syms), symcmp);
1634 
1635     s->disas_num_syms = nsyms;
1636 #if ELF_CLASS == ELFCLASS32
1637     s->disas_symtab.elf32 = syms;
1638 #else
1639     s->disas_symtab.elf64 = syms;
1640 #endif
1641     s->lookup_symbol = lookup_symbolxx;
1642     s->next = syminfos;
1643     syminfos = s;
1644 }
1645 
1646 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1647                     struct image_info * info)
1648 {
1649     struct image_info interp_info;
1650     struct elfhdr elf_ex;
1651     char *elf_interpreter = NULL;
1652 
1653     info->start_mmap = (abi_ulong)ELF_START_MMAP;
1654     info->mmap = 0;
1655     info->rss = 0;
1656 
1657     load_elf_image(bprm->filename, bprm->fd, info,
1658                    &elf_interpreter, bprm->buf);
1659 
1660     /* ??? We need a copy of the elf header for passing to create_elf_tables.
1661        If we do nothing, we'll have overwritten this when we re-use bprm->buf
1662        when we load the interpreter.  */
1663     elf_ex = *(struct elfhdr *)bprm->buf;
1664 
1665     bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
1666     bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
1667     bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
1668     if (!bprm->p) {
1669         fprintf(stderr, "%s: %s\n", bprm->filename, strerror(E2BIG));
1670         exit(-1);
1671     }
1672 
1673     /* Do this so that we can load the interpreter, if need be.  We will
1674        change some of these later */
1675     bprm->p = setup_arg_pages(bprm->p, bprm, info);
1676 
1677     if (elf_interpreter) {
1678         load_elf_interp(elf_interpreter, &interp_info, bprm->buf);
1679 
1680         /* If the program interpreter is one of these two, then assume
1681            an iBCS2 image.  Otherwise assume a native linux image.  */
1682 
1683         if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0
1684             || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) {
1685             info->personality = PER_SVR4;
1686 
1687             /* Why this, you ask???  Well SVr4 maps page 0 as read-only,
1688                and some applications "depend" upon this behavior.  Since
1689                we do not have the power to recompile these, we emulate
1690                the SVr4 behavior.  Sigh.  */
1691             target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1692                         MAP_FIXED | MAP_PRIVATE, -1, 0);
1693         }
1694     }
1695 
1696     bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &elf_ex,
1697                                 info, (elf_interpreter ? &interp_info : NULL));
1698     info->start_stack = bprm->p;
1699 
1700     /* If we have an interpreter, set that as the program's entry point.
1701        Copy the load_addr as well, to help PPC64 interpret the entry
1702        point as a function descriptor.  Do this after creating elf tables
1703        so that we copy the original program entry point into the AUXV.  */
1704     if (elf_interpreter) {
1705         info->load_addr = interp_info.load_addr;
1706         info->entry = interp_info.entry;
1707         free(elf_interpreter);
1708     }
1709 
1710 #ifdef USE_ELF_CORE_DUMP
1711     bprm->core_dump = &elf_core_dump;
1712 #endif
1713 
1714     return 0;
1715 }
1716 
1717 #ifdef USE_ELF_CORE_DUMP
1718 /*
1719  * Definitions to generate Intel SVR4-like core files.
1720  * These mostly have the same names as the SVR4 types with "target_elf_"
1721  * tacked on the front to prevent clashes with linux definitions,
1722  * and the typedef forms have been avoided.  This is mostly like
1723  * the SVR4 structure, but more Linuxy, with things that Linux does
1724  * not support and which gdb doesn't really use excluded.
1725  *
1726  * Fields we don't dump (their contents is zero) in linux-user qemu
1727  * are marked with XXX.
1728  *
1729  * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
1730  *
1731  * Porting ELF coredump for target is (quite) simple process.  First you
1732  * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
1733  * the target resides):
1734  *
1735  * #define USE_ELF_CORE_DUMP
1736  *
1737  * Next you define type of register set used for dumping.  ELF specification
1738  * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
1739  *
1740  * typedef <target_regtype> target_elf_greg_t;
1741  * #define ELF_NREG <number of registers>
1742  * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
1743  *
1744  * Last step is to implement target specific function that copies registers
1745  * from given cpu into just specified register set.  Prototype is:
1746  *
1747  * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
1748  *                                const CPUState *env);
1749  *
1750  * Parameters:
1751  *     regs - copy register values into here (allocated and zeroed by caller)
1752  *     env - copy registers from here
1753  *
1754  * Example for ARM target is provided in this file.
1755  */
1756 
1757 /* An ELF note in memory */
1758 struct memelfnote {
1759     const char *name;
1760     size_t     namesz;
1761     size_t     namesz_rounded;
1762     int        type;
1763     size_t     datasz;
1764     size_t     datasz_rounded;
1765     void       *data;
1766     size_t     notesz;
1767 };
1768 
1769 struct target_elf_siginfo {
1770     target_int  si_signo; /* signal number */
1771     target_int  si_code;  /* extra code */
1772     target_int  si_errno; /* errno */
1773 };
1774 
1775 struct target_elf_prstatus {
1776     struct target_elf_siginfo pr_info;      /* Info associated with signal */
1777     target_short       pr_cursig;    /* Current signal */
1778     target_ulong       pr_sigpend;   /* XXX */
1779     target_ulong       pr_sighold;   /* XXX */
1780     target_pid_t       pr_pid;
1781     target_pid_t       pr_ppid;
1782     target_pid_t       pr_pgrp;
1783     target_pid_t       pr_sid;
1784     struct target_timeval pr_utime;  /* XXX User time */
1785     struct target_timeval pr_stime;  /* XXX System time */
1786     struct target_timeval pr_cutime; /* XXX Cumulative user time */
1787     struct target_timeval pr_cstime; /* XXX Cumulative system time */
1788     target_elf_gregset_t      pr_reg;       /* GP registers */
1789     target_int         pr_fpvalid;   /* XXX */
1790 };
1791 
1792 #define ELF_PRARGSZ     (80) /* Number of chars for args */
1793 
1794 struct target_elf_prpsinfo {
1795     char         pr_state;       /* numeric process state */
1796     char         pr_sname;       /* char for pr_state */
1797     char         pr_zomb;        /* zombie */
1798     char         pr_nice;        /* nice val */
1799     target_ulong pr_flag;        /* flags */
1800     target_uid_t pr_uid;
1801     target_gid_t pr_gid;
1802     target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
1803     /* Lots missing */
1804     char    pr_fname[16];           /* filename of executable */
1805     char    pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
1806 };
1807 
1808 /* Here is the structure in which status of each thread is captured. */
1809 struct elf_thread_status {
1810     QTAILQ_ENTRY(elf_thread_status)  ets_link;
1811     struct target_elf_prstatus prstatus;   /* NT_PRSTATUS */
1812 #if 0
1813     elf_fpregset_t fpu;             /* NT_PRFPREG */
1814     struct task_struct *thread;
1815     elf_fpxregset_t xfpu;           /* ELF_CORE_XFPREG_TYPE */
1816 #endif
1817     struct memelfnote notes[1];
1818     int num_notes;
1819 };
1820 
1821 struct elf_note_info {
1822     struct memelfnote   *notes;
1823     struct target_elf_prstatus *prstatus;  /* NT_PRSTATUS */
1824     struct target_elf_prpsinfo *psinfo;    /* NT_PRPSINFO */
1825 
1826     QTAILQ_HEAD(thread_list_head, elf_thread_status) thread_list;
1827 #if 0
1828     /*
1829      * Current version of ELF coredump doesn't support
1830      * dumping fp regs etc.
1831      */
1832     elf_fpregset_t *fpu;
1833     elf_fpxregset_t *xfpu;
1834     int thread_status_size;
1835 #endif
1836     int notes_size;
1837     int numnote;
1838 };
1839 
1840 struct vm_area_struct {
1841     abi_ulong   vma_start;  /* start vaddr of memory region */
1842     abi_ulong   vma_end;    /* end vaddr of memory region */
1843     abi_ulong   vma_flags;  /* protection etc. flags for the region */
1844     QTAILQ_ENTRY(vm_area_struct) vma_link;
1845 };
1846 
1847 struct mm_struct {
1848     QTAILQ_HEAD(, vm_area_struct) mm_mmap;
1849     int mm_count;           /* number of mappings */
1850 };
1851 
1852 static struct mm_struct *vma_init(void);
1853 static void vma_delete(struct mm_struct *);
1854 static int vma_add_mapping(struct mm_struct *, abi_ulong,
1855                            abi_ulong, abi_ulong);
1856 static int vma_get_mapping_count(const struct mm_struct *);
1857 static struct vm_area_struct *vma_first(const struct mm_struct *);
1858 static struct vm_area_struct *vma_next(struct vm_area_struct *);
1859 static abi_ulong vma_dump_size(const struct vm_area_struct *);
1860 static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
1861                       unsigned long flags);
1862 
1863 static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
1864 static void fill_note(struct memelfnote *, const char *, int,
1865                       unsigned int, void *);
1866 static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
1867 static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
1868 static void fill_auxv_note(struct memelfnote *, const TaskState *);
1869 static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
1870 static size_t note_size(const struct memelfnote *);
1871 static void free_note_info(struct elf_note_info *);
1872 static int fill_note_info(struct elf_note_info *, long, const CPUState *);
1873 static void fill_thread_info(struct elf_note_info *, const CPUState *);
1874 static int core_dump_filename(const TaskState *, char *, size_t);
1875 
1876 static int dump_write(int, const void *, size_t);
1877 static int write_note(struct memelfnote *, int);
1878 static int write_note_info(struct elf_note_info *, int);
1879 
1880 #ifdef BSWAP_NEEDED
1881 static void bswap_prstatus(struct target_elf_prstatus *prstatus)
1882 {
1883     prstatus->pr_info.si_signo = tswapl(prstatus->pr_info.si_signo);
1884     prstatus->pr_info.si_code = tswapl(prstatus->pr_info.si_code);
1885     prstatus->pr_info.si_errno = tswapl(prstatus->pr_info.si_errno);
1886     prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
1887     prstatus->pr_sigpend = tswapl(prstatus->pr_sigpend);
1888     prstatus->pr_sighold = tswapl(prstatus->pr_sighold);
1889     prstatus->pr_pid = tswap32(prstatus->pr_pid);
1890     prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
1891     prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
1892     prstatus->pr_sid = tswap32(prstatus->pr_sid);
1893     /* cpu times are not filled, so we skip them */
1894     /* regs should be in correct format already */
1895     prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
1896 }
1897 
1898 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
1899 {
1900     psinfo->pr_flag = tswapl(psinfo->pr_flag);
1901     psinfo->pr_uid = tswap16(psinfo->pr_uid);
1902     psinfo->pr_gid = tswap16(psinfo->pr_gid);
1903     psinfo->pr_pid = tswap32(psinfo->pr_pid);
1904     psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
1905     psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
1906     psinfo->pr_sid = tswap32(psinfo->pr_sid);
1907 }
1908 
1909 static void bswap_note(struct elf_note *en)
1910 {
1911     bswap32s(&en->n_namesz);
1912     bswap32s(&en->n_descsz);
1913     bswap32s(&en->n_type);
1914 }
1915 #else
1916 static inline void bswap_prstatus(struct target_elf_prstatus *p) { }
1917 static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {}
1918 static inline void bswap_note(struct elf_note *en) { }
1919 #endif /* BSWAP_NEEDED */
1920 
1921 /*
1922  * Minimal support for linux memory regions.  These are needed
1923  * when we are finding out what memory exactly belongs to
1924  * emulated process.  No locks needed here, as long as
1925  * thread that received the signal is stopped.
1926  */
1927 
1928 static struct mm_struct *vma_init(void)
1929 {
1930     struct mm_struct *mm;
1931 
1932     if ((mm = qemu_malloc(sizeof (*mm))) == NULL)
1933         return (NULL);
1934 
1935     mm->mm_count = 0;
1936     QTAILQ_INIT(&mm->mm_mmap);
1937 
1938     return (mm);
1939 }
1940 
1941 static void vma_delete(struct mm_struct *mm)
1942 {
1943     struct vm_area_struct *vma;
1944 
1945     while ((vma = vma_first(mm)) != NULL) {
1946         QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
1947         qemu_free(vma);
1948     }
1949     qemu_free(mm);
1950 }
1951 
1952 static int vma_add_mapping(struct mm_struct *mm, abi_ulong start,
1953                            abi_ulong end, abi_ulong flags)
1954 {
1955     struct vm_area_struct *vma;
1956 
1957     if ((vma = qemu_mallocz(sizeof (*vma))) == NULL)
1958         return (-1);
1959 
1960     vma->vma_start = start;
1961     vma->vma_end = end;
1962     vma->vma_flags = flags;
1963 
1964     QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
1965     mm->mm_count++;
1966 
1967     return (0);
1968 }
1969 
1970 static struct vm_area_struct *vma_first(const struct mm_struct *mm)
1971 {
1972     return (QTAILQ_FIRST(&mm->mm_mmap));
1973 }
1974 
1975 static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
1976 {
1977     return (QTAILQ_NEXT(vma, vma_link));
1978 }
1979 
1980 static int vma_get_mapping_count(const struct mm_struct *mm)
1981 {
1982     return (mm->mm_count);
1983 }
1984 
1985 /*
1986  * Calculate file (dump) size of given memory region.
1987  */
1988 static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
1989 {
1990     /* if we cannot even read the first page, skip it */
1991     if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
1992         return (0);
1993 
1994     /*
1995      * Usually we don't dump executable pages as they contain
1996      * non-writable code that debugger can read directly from
1997      * target library etc.  However, thread stacks are marked
1998      * also executable so we read in first page of given region
1999      * and check whether it contains elf header.  If there is
2000      * no elf header, we dump it.
2001      */
2002     if (vma->vma_flags & PROT_EXEC) {
2003         char page[TARGET_PAGE_SIZE];
2004 
2005         copy_from_user(page, vma->vma_start, sizeof (page));
2006         if ((page[EI_MAG0] == ELFMAG0) &&
2007             (page[EI_MAG1] == ELFMAG1) &&
2008             (page[EI_MAG2] == ELFMAG2) &&
2009             (page[EI_MAG3] == ELFMAG3)) {
2010             /*
2011              * Mappings are possibly from ELF binary.  Don't dump
2012              * them.
2013              */
2014             return (0);
2015         }
2016     }
2017 
2018     return (vma->vma_end - vma->vma_start);
2019 }
2020 
2021 static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
2022                       unsigned long flags)
2023 {
2024     struct mm_struct *mm = (struct mm_struct *)priv;
2025 
2026     vma_add_mapping(mm, start, end, flags);
2027     return (0);
2028 }
2029 
2030 static void fill_note(struct memelfnote *note, const char *name, int type,
2031                       unsigned int sz, void *data)
2032 {
2033     unsigned int namesz;
2034 
2035     namesz = strlen(name) + 1;
2036     note->name = name;
2037     note->namesz = namesz;
2038     note->namesz_rounded = roundup(namesz, sizeof (int32_t));
2039     note->type = type;
2040     note->datasz = sz;
2041     note->datasz_rounded = roundup(sz, sizeof (int32_t));
2042 
2043     note->data = data;
2044 
2045     /*
2046      * We calculate rounded up note size here as specified by
2047      * ELF document.
2048      */
2049     note->notesz = sizeof (struct elf_note) +
2050         note->namesz_rounded + note->datasz_rounded;
2051 }
2052 
2053 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
2054                             uint32_t flags)
2055 {
2056     (void) memset(elf, 0, sizeof(*elf));
2057 
2058     (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
2059     elf->e_ident[EI_CLASS] = ELF_CLASS;
2060     elf->e_ident[EI_DATA] = ELF_DATA;
2061     elf->e_ident[EI_VERSION] = EV_CURRENT;
2062     elf->e_ident[EI_OSABI] = ELF_OSABI;
2063 
2064     elf->e_type = ET_CORE;
2065     elf->e_machine = machine;
2066     elf->e_version = EV_CURRENT;
2067     elf->e_phoff = sizeof(struct elfhdr);
2068     elf->e_flags = flags;
2069     elf->e_ehsize = sizeof(struct elfhdr);
2070     elf->e_phentsize = sizeof(struct elf_phdr);
2071     elf->e_phnum = segs;
2072 
2073     bswap_ehdr(elf);
2074 }
2075 
2076 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
2077 {
2078     phdr->p_type = PT_NOTE;
2079     phdr->p_offset = offset;
2080     phdr->p_vaddr = 0;
2081     phdr->p_paddr = 0;
2082     phdr->p_filesz = sz;
2083     phdr->p_memsz = 0;
2084     phdr->p_flags = 0;
2085     phdr->p_align = 0;
2086 
2087     bswap_phdr(phdr, 1);
2088 }
2089 
2090 static size_t note_size(const struct memelfnote *note)
2091 {
2092     return (note->notesz);
2093 }
2094 
2095 static void fill_prstatus(struct target_elf_prstatus *prstatus,
2096                           const TaskState *ts, int signr)
2097 {
2098     (void) memset(prstatus, 0, sizeof (*prstatus));
2099     prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
2100     prstatus->pr_pid = ts->ts_tid;
2101     prstatus->pr_ppid = getppid();
2102     prstatus->pr_pgrp = getpgrp();
2103     prstatus->pr_sid = getsid(0);
2104 
2105     bswap_prstatus(prstatus);
2106 }
2107 
2108 static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
2109 {
2110     char *filename, *base_filename;
2111     unsigned int i, len;
2112 
2113     (void) memset(psinfo, 0, sizeof (*psinfo));
2114 
2115     len = ts->info->arg_end - ts->info->arg_start;
2116     if (len >= ELF_PRARGSZ)
2117         len = ELF_PRARGSZ - 1;
2118     if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_start, len))
2119         return -EFAULT;
2120     for (i = 0; i < len; i++)
2121         if (psinfo->pr_psargs[i] == 0)
2122             psinfo->pr_psargs[i] = ' ';
2123     psinfo->pr_psargs[len] = 0;
2124 
2125     psinfo->pr_pid = getpid();
2126     psinfo->pr_ppid = getppid();
2127     psinfo->pr_pgrp = getpgrp();
2128     psinfo->pr_sid = getsid(0);
2129     psinfo->pr_uid = getuid();
2130     psinfo->pr_gid = getgid();
2131 
2132     filename = strdup(ts->bprm->filename);
2133     base_filename = strdup(basename(filename));
2134     (void) strncpy(psinfo->pr_fname, base_filename,
2135                    sizeof(psinfo->pr_fname));
2136     free(base_filename);
2137     free(filename);
2138 
2139     bswap_psinfo(psinfo);
2140     return (0);
2141 }
2142 
2143 static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
2144 {
2145     elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
2146     elf_addr_t orig_auxv = auxv;
2147     abi_ulong val;
2148     void *ptr;
2149     int i, len;
2150 
2151     /*
2152      * Auxiliary vector is stored in target process stack.  It contains
2153      * {type, value} pairs that we need to dump into note.  This is not
2154      * strictly necessary but we do it here for sake of completeness.
2155      */
2156 
2157     /* find out lenght of the vector, AT_NULL is terminator */
2158     i = len = 0;
2159     do {
2160         get_user_ual(val, auxv);
2161         i += 2;
2162         auxv += 2 * sizeof (elf_addr_t);
2163     } while (val != AT_NULL);
2164     len = i * sizeof (elf_addr_t);
2165 
2166     /* read in whole auxv vector and copy it to memelfnote */
2167     ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
2168     if (ptr != NULL) {
2169         fill_note(note, "CORE", NT_AUXV, len, ptr);
2170         unlock_user(ptr, auxv, len);
2171     }
2172 }
2173 
2174 /*
2175  * Constructs name of coredump file.  We have following convention
2176  * for the name:
2177  *     qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
2178  *
2179  * Returns 0 in case of success, -1 otherwise (errno is set).
2180  */
2181 static int core_dump_filename(const TaskState *ts, char *buf,
2182                               size_t bufsize)
2183 {
2184     char timestamp[64];
2185     char *filename = NULL;
2186     char *base_filename = NULL;
2187     struct timeval tv;
2188     struct tm tm;
2189 
2190     assert(bufsize >= PATH_MAX);
2191 
2192     if (gettimeofday(&tv, NULL) < 0) {
2193         (void) fprintf(stderr, "unable to get current timestamp: %s",
2194                        strerror(errno));
2195         return (-1);
2196     }
2197 
2198     filename = strdup(ts->bprm->filename);
2199     base_filename = strdup(basename(filename));
2200     (void) strftime(timestamp, sizeof (timestamp), "%Y%m%d-%H%M%S",
2201                     localtime_r(&tv.tv_sec, &tm));
2202     (void) snprintf(buf, bufsize, "qemu_%s_%s_%d.core",
2203                     base_filename, timestamp, (int)getpid());
2204     free(base_filename);
2205     free(filename);
2206 
2207     return (0);
2208 }
2209 
2210 static int dump_write(int fd, const void *ptr, size_t size)
2211 {
2212     const char *bufp = (const char *)ptr;
2213     ssize_t bytes_written, bytes_left;
2214     struct rlimit dumpsize;
2215     off_t pos;
2216 
2217     bytes_written = 0;
2218     getrlimit(RLIMIT_CORE, &dumpsize);
2219     if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
2220         if (errno == ESPIPE) { /* not a seekable stream */
2221             bytes_left = size;
2222         } else {
2223             return pos;
2224         }
2225     } else {
2226         if (dumpsize.rlim_cur <= pos) {
2227             return -1;
2228         } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
2229             bytes_left = size;
2230         } else {
2231             size_t limit_left=dumpsize.rlim_cur - pos;
2232             bytes_left = limit_left >= size ? size : limit_left ;
2233         }
2234     }
2235 
2236     /*
2237      * In normal conditions, single write(2) should do but
2238      * in case of socket etc. this mechanism is more portable.
2239      */
2240     do {
2241         bytes_written = write(fd, bufp, bytes_left);
2242         if (bytes_written < 0) {
2243             if (errno == EINTR)
2244                 continue;
2245             return (-1);
2246         } else if (bytes_written == 0) { /* eof */
2247             return (-1);
2248         }
2249         bufp += bytes_written;
2250         bytes_left -= bytes_written;
2251     } while (bytes_left > 0);
2252 
2253     return (0);
2254 }
2255 
2256 static int write_note(struct memelfnote *men, int fd)
2257 {
2258     struct elf_note en;
2259 
2260     en.n_namesz = men->namesz;
2261     en.n_type = men->type;
2262     en.n_descsz = men->datasz;
2263 
2264     bswap_note(&en);
2265 
2266     if (dump_write(fd, &en, sizeof(en)) != 0)
2267         return (-1);
2268     if (dump_write(fd, men->name, men->namesz_rounded) != 0)
2269         return (-1);
2270     if (dump_write(fd, men->data, men->datasz_rounded) != 0)
2271         return (-1);
2272 
2273     return (0);
2274 }
2275 
2276 static void fill_thread_info(struct elf_note_info *info, const CPUState *env)
2277 {
2278     TaskState *ts = (TaskState *)env->opaque;
2279     struct elf_thread_status *ets;
2280 
2281     ets = qemu_mallocz(sizeof (*ets));
2282     ets->num_notes = 1; /* only prstatus is dumped */
2283     fill_prstatus(&ets->prstatus, ts, 0);
2284     elf_core_copy_regs(&ets->prstatus.pr_reg, env);
2285     fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
2286               &ets->prstatus);
2287 
2288     QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
2289 
2290     info->notes_size += note_size(&ets->notes[0]);
2291 }
2292 
2293 static int fill_note_info(struct elf_note_info *info,
2294                           long signr, const CPUState *env)
2295 {
2296 #define NUMNOTES 3
2297     CPUState *cpu = NULL;
2298     TaskState *ts = (TaskState *)env->opaque;
2299     int i;
2300 
2301     (void) memset(info, 0, sizeof (*info));
2302 
2303     QTAILQ_INIT(&info->thread_list);
2304 
2305     info->notes = qemu_mallocz(NUMNOTES * sizeof (struct memelfnote));
2306     if (info->notes == NULL)
2307         return (-ENOMEM);
2308     info->prstatus = qemu_mallocz(sizeof (*info->prstatus));
2309     if (info->prstatus == NULL)
2310         return (-ENOMEM);
2311     info->psinfo = qemu_mallocz(sizeof (*info->psinfo));
2312     if (info->prstatus == NULL)
2313         return (-ENOMEM);
2314 
2315     /*
2316      * First fill in status (and registers) of current thread
2317      * including process info & aux vector.
2318      */
2319     fill_prstatus(info->prstatus, ts, signr);
2320     elf_core_copy_regs(&info->prstatus->pr_reg, env);
2321     fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
2322               sizeof (*info->prstatus), info->prstatus);
2323     fill_psinfo(info->psinfo, ts);
2324     fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
2325               sizeof (*info->psinfo), info->psinfo);
2326     fill_auxv_note(&info->notes[2], ts);
2327     info->numnote = 3;
2328 
2329     info->notes_size = 0;
2330     for (i = 0; i < info->numnote; i++)
2331         info->notes_size += note_size(&info->notes[i]);
2332 
2333     /* read and fill status of all threads */
2334     cpu_list_lock();
2335     for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2336         if (cpu == thread_env)
2337             continue;
2338         fill_thread_info(info, cpu);
2339     }
2340     cpu_list_unlock();
2341 
2342     return (0);
2343 }
2344 
2345 static void free_note_info(struct elf_note_info *info)
2346 {
2347     struct elf_thread_status *ets;
2348 
2349     while (!QTAILQ_EMPTY(&info->thread_list)) {
2350         ets = QTAILQ_FIRST(&info->thread_list);
2351         QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
2352         qemu_free(ets);
2353     }
2354 
2355     qemu_free(info->prstatus);
2356     qemu_free(info->psinfo);
2357     qemu_free(info->notes);
2358 }
2359 
2360 static int write_note_info(struct elf_note_info *info, int fd)
2361 {
2362     struct elf_thread_status *ets;
2363     int i, error = 0;
2364 
2365     /* write prstatus, psinfo and auxv for current thread */
2366     for (i = 0; i < info->numnote; i++)
2367         if ((error = write_note(&info->notes[i], fd)) != 0)
2368             return (error);
2369 
2370     /* write prstatus for each thread */
2371     for (ets = info->thread_list.tqh_first; ets != NULL;
2372          ets = ets->ets_link.tqe_next) {
2373         if ((error = write_note(&ets->notes[0], fd)) != 0)
2374             return (error);
2375     }
2376 
2377     return (0);
2378 }
2379 
2380 /*
2381  * Write out ELF coredump.
2382  *
2383  * See documentation of ELF object file format in:
2384  * http://www.caldera.com/developers/devspecs/gabi41.pdf
2385  *
2386  * Coredump format in linux is following:
2387  *
2388  * 0   +----------------------+         \
2389  *     | ELF header           | ET_CORE  |
2390  *     +----------------------+          |
2391  *     | ELF program headers  |          |--- headers
2392  *     | - NOTE section       |          |
2393  *     | - PT_LOAD sections   |          |
2394  *     +----------------------+         /
2395  *     | NOTEs:               |
2396  *     | - NT_PRSTATUS        |
2397  *     | - NT_PRSINFO         |
2398  *     | - NT_AUXV            |
2399  *     +----------------------+ <-- aligned to target page
2400  *     | Process memory dump  |
2401  *     :                      :
2402  *     .                      .
2403  *     :                      :
2404  *     |                      |
2405  *     +----------------------+
2406  *
2407  * NT_PRSTATUS -> struct elf_prstatus (per thread)
2408  * NT_PRSINFO  -> struct elf_prpsinfo
2409  * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
2410  *
2411  * Format follows System V format as close as possible.  Current
2412  * version limitations are as follows:
2413  *     - no floating point registers are dumped
2414  *
2415  * Function returns 0 in case of success, negative errno otherwise.
2416  *
2417  * TODO: make this work also during runtime: it should be
2418  * possible to force coredump from running process and then
2419  * continue processing.  For example qemu could set up SIGUSR2
2420  * handler (provided that target process haven't registered
2421  * handler for that) that does the dump when signal is received.
2422  */
2423 static int elf_core_dump(int signr, const CPUState *env)
2424 {
2425     const TaskState *ts = (const TaskState *)env->opaque;
2426     struct vm_area_struct *vma = NULL;
2427     char corefile[PATH_MAX];
2428     struct elf_note_info info;
2429     struct elfhdr elf;
2430     struct elf_phdr phdr;
2431     struct rlimit dumpsize;
2432     struct mm_struct *mm = NULL;
2433     off_t offset = 0, data_offset = 0;
2434     int segs = 0;
2435     int fd = -1;
2436 
2437     errno = 0;
2438     getrlimit(RLIMIT_CORE, &dumpsize);
2439     if (dumpsize.rlim_cur == 0)
2440         return 0;
2441 
2442     if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0)
2443         return (-errno);
2444 
2445     if ((fd = open(corefile, O_WRONLY | O_CREAT,
2446                    S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
2447         return (-errno);
2448 
2449     /*
2450      * Walk through target process memory mappings and
2451      * set up structure containing this information.  After
2452      * this point vma_xxx functions can be used.
2453      */
2454     if ((mm = vma_init()) == NULL)
2455         goto out;
2456 
2457     walk_memory_regions(mm, vma_walker);
2458     segs = vma_get_mapping_count(mm);
2459 
2460     /*
2461      * Construct valid coredump ELF header.  We also
2462      * add one more segment for notes.
2463      */
2464     fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
2465     if (dump_write(fd, &elf, sizeof (elf)) != 0)
2466         goto out;
2467 
2468     /* fill in in-memory version of notes */
2469     if (fill_note_info(&info, signr, env) < 0)
2470         goto out;
2471 
2472     offset += sizeof (elf);                             /* elf header */
2473     offset += (segs + 1) * sizeof (struct elf_phdr);    /* program headers */
2474 
2475     /* write out notes program header */
2476     fill_elf_note_phdr(&phdr, info.notes_size, offset);
2477 
2478     offset += info.notes_size;
2479     if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
2480         goto out;
2481 
2482     /*
2483      * ELF specification wants data to start at page boundary so
2484      * we align it here.
2485      */
2486     data_offset = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2487 
2488     /*
2489      * Write program headers for memory regions mapped in
2490      * the target process.
2491      */
2492     for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2493         (void) memset(&phdr, 0, sizeof (phdr));
2494 
2495         phdr.p_type = PT_LOAD;
2496         phdr.p_offset = offset;
2497         phdr.p_vaddr = vma->vma_start;
2498         phdr.p_paddr = 0;
2499         phdr.p_filesz = vma_dump_size(vma);
2500         offset += phdr.p_filesz;
2501         phdr.p_memsz = vma->vma_end - vma->vma_start;
2502         phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
2503         if (vma->vma_flags & PROT_WRITE)
2504             phdr.p_flags |= PF_W;
2505         if (vma->vma_flags & PROT_EXEC)
2506             phdr.p_flags |= PF_X;
2507         phdr.p_align = ELF_EXEC_PAGESIZE;
2508 
2509         bswap_phdr(&phdr, 1);
2510         dump_write(fd, &phdr, sizeof (phdr));
2511     }
2512 
2513     /*
2514      * Next we write notes just after program headers.  No
2515      * alignment needed here.
2516      */
2517     if (write_note_info(&info, fd) < 0)
2518         goto out;
2519 
2520     /* align data to page boundary */
2521     if (lseek(fd, data_offset, SEEK_SET) != data_offset)
2522         goto out;
2523 
2524     /*
2525      * Finally we can dump process memory into corefile as well.
2526      */
2527     for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2528         abi_ulong addr;
2529         abi_ulong end;
2530 
2531         end = vma->vma_start + vma_dump_size(vma);
2532 
2533         for (addr = vma->vma_start; addr < end;
2534              addr += TARGET_PAGE_SIZE) {
2535             char page[TARGET_PAGE_SIZE];
2536             int error;
2537 
2538             /*
2539              *  Read in page from target process memory and
2540              *  write it to coredump file.
2541              */
2542             error = copy_from_user(page, addr, sizeof (page));
2543             if (error != 0) {
2544                 (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
2545                                addr);
2546                 errno = -error;
2547                 goto out;
2548             }
2549             if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
2550                 goto out;
2551         }
2552     }
2553 
2554  out:
2555     free_note_info(&info);
2556     if (mm != NULL)
2557         vma_delete(mm);
2558     (void) close(fd);
2559 
2560     if (errno != 0)
2561         return (-errno);
2562     return (0);
2563 }
2564 #endif /* USE_ELF_CORE_DUMP */
2565 
2566 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
2567 {
2568     init_thread(regs, infop);
2569 }
2570