1 /* 2 * linux/fs/binfmt_elf.c 3 * 4 * These are the functions used to load ELF format executables as used 5 * on SVr4 machines. Information on the format may be found in the book 6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support 7 * Tools". 8 * 9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com). 10 */ 11 12 #include <linux/module.h> 13 #include <linux/kernel.h> 14 #include <linux/fs.h> 15 #include <linux/mm.h> 16 #include <linux/mman.h> 17 #include <linux/errno.h> 18 #include <linux/signal.h> 19 #include <linux/binfmts.h> 20 #include <linux/string.h> 21 #include <linux/file.h> 22 #include <linux/slab.h> 23 #include <linux/personality.h> 24 #include <linux/elfcore.h> 25 #include <linux/init.h> 26 #include <linux/highuid.h> 27 #include <linux/compiler.h> 28 #include <linux/highmem.h> 29 #include <linux/pagemap.h> 30 #include <linux/vmalloc.h> 31 #include <linux/security.h> 32 #include <linux/random.h> 33 #include <linux/elf.h> 34 #include <linux/elf-randomize.h> 35 #include <linux/utsname.h> 36 #include <linux/coredump.h> 37 #include <linux/sched.h> 38 #include <linux/sched/coredump.h> 39 #include <linux/sched/task_stack.h> 40 #include <linux/sched/cputime.h> 41 #include <linux/cred.h> 42 #include <linux/dax.h> 43 #include <linux/uaccess.h> 44 #include <asm/param.h> 45 #include <asm/page.h> 46 47 #ifndef user_long_t 48 #define user_long_t long 49 #endif 50 #ifndef user_siginfo_t 51 #define user_siginfo_t siginfo_t 52 #endif 53 54 /* That's for binfmt_elf_fdpic to deal with */ 55 #ifndef elf_check_fdpic 56 #define elf_check_fdpic(ex) false 57 #endif 58 59 static int load_elf_binary(struct linux_binprm *bprm); 60 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *, 61 int, int, unsigned long); 62 63 #ifdef CONFIG_USELIB 64 static int load_elf_library(struct file *); 65 #else 66 #define load_elf_library NULL 67 #endif 68 69 /* 70 * If we don't support core dumping, then supply a NULL so we 71 * don't even try. 72 */ 73 #ifdef CONFIG_ELF_CORE 74 static int elf_core_dump(struct coredump_params *cprm); 75 #else 76 #define elf_core_dump NULL 77 #endif 78 79 #if ELF_EXEC_PAGESIZE > PAGE_SIZE 80 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE 81 #else 82 #define ELF_MIN_ALIGN PAGE_SIZE 83 #endif 84 85 #ifndef ELF_CORE_EFLAGS 86 #define ELF_CORE_EFLAGS 0 87 #endif 88 89 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1)) 90 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1)) 91 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1)) 92 93 static struct linux_binfmt elf_format = { 94 .module = THIS_MODULE, 95 .load_binary = load_elf_binary, 96 .load_shlib = load_elf_library, 97 .core_dump = elf_core_dump, 98 .min_coredump = ELF_EXEC_PAGESIZE, 99 }; 100 101 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE) 102 103 static int set_brk(unsigned long start, unsigned long end, int prot) 104 { 105 start = ELF_PAGEALIGN(start); 106 end = ELF_PAGEALIGN(end); 107 if (end > start) { 108 /* 109 * Map the last of the bss segment. 110 * If the header is requesting these pages to be 111 * executable, honour that (ppc32 needs this). 112 */ 113 int error = vm_brk_flags(start, end - start, 114 prot & PROT_EXEC ? VM_EXEC : 0); 115 if (error) 116 return error; 117 } 118 current->mm->start_brk = current->mm->brk = end; 119 return 0; 120 } 121 122 /* We need to explicitly zero any fractional pages 123 after the data section (i.e. bss). This would 124 contain the junk from the file that should not 125 be in memory 126 */ 127 static int padzero(unsigned long elf_bss) 128 { 129 unsigned long nbyte; 130 131 nbyte = ELF_PAGEOFFSET(elf_bss); 132 if (nbyte) { 133 nbyte = ELF_MIN_ALIGN - nbyte; 134 if (clear_user((void __user *) elf_bss, nbyte)) 135 return -EFAULT; 136 } 137 return 0; 138 } 139 140 /* Let's use some macros to make this stack manipulation a little clearer */ 141 #ifdef CONFIG_STACK_GROWSUP 142 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items)) 143 #define STACK_ROUND(sp, items) \ 144 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL) 145 #define STACK_ALLOC(sp, len) ({ \ 146 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \ 147 old_sp; }) 148 #else 149 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items)) 150 #define STACK_ROUND(sp, items) \ 151 (((unsigned long) (sp - items)) &~ 15UL) 152 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; }) 153 #endif 154 155 #ifndef ELF_BASE_PLATFORM 156 /* 157 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture. 158 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value 159 * will be copied to the user stack in the same manner as AT_PLATFORM. 160 */ 161 #define ELF_BASE_PLATFORM NULL 162 #endif 163 164 static int 165 create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, 166 unsigned long load_addr, unsigned long interp_load_addr) 167 { 168 unsigned long p = bprm->p; 169 int argc = bprm->argc; 170 int envc = bprm->envc; 171 elf_addr_t __user *sp; 172 elf_addr_t __user *u_platform; 173 elf_addr_t __user *u_base_platform; 174 elf_addr_t __user *u_rand_bytes; 175 const char *k_platform = ELF_PLATFORM; 176 const char *k_base_platform = ELF_BASE_PLATFORM; 177 unsigned char k_rand_bytes[16]; 178 int items; 179 elf_addr_t *elf_info; 180 int ei_index = 0; 181 const struct cred *cred = current_cred(); 182 struct vm_area_struct *vma; 183 184 /* 185 * In some cases (e.g. Hyper-Threading), we want to avoid L1 186 * evictions by the processes running on the same package. One 187 * thing we can do is to shuffle the initial stack for them. 188 */ 189 190 p = arch_align_stack(p); 191 192 /* 193 * If this architecture has a platform capability string, copy it 194 * to userspace. In some cases (Sparc), this info is impossible 195 * for userspace to get any other way, in others (i386) it is 196 * merely difficult. 197 */ 198 u_platform = NULL; 199 if (k_platform) { 200 size_t len = strlen(k_platform) + 1; 201 202 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); 203 if (__copy_to_user(u_platform, k_platform, len)) 204 return -EFAULT; 205 } 206 207 /* 208 * If this architecture has a "base" platform capability 209 * string, copy it to userspace. 210 */ 211 u_base_platform = NULL; 212 if (k_base_platform) { 213 size_t len = strlen(k_base_platform) + 1; 214 215 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); 216 if (__copy_to_user(u_base_platform, k_base_platform, len)) 217 return -EFAULT; 218 } 219 220 /* 221 * Generate 16 random bytes for userspace PRNG seeding. 222 */ 223 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes)); 224 u_rand_bytes = (elf_addr_t __user *) 225 STACK_ALLOC(p, sizeof(k_rand_bytes)); 226 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes))) 227 return -EFAULT; 228 229 /* Create the ELF interpreter info */ 230 elf_info = (elf_addr_t *)current->mm->saved_auxv; 231 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */ 232 #define NEW_AUX_ENT(id, val) \ 233 do { \ 234 elf_info[ei_index++] = id; \ 235 elf_info[ei_index++] = val; \ 236 } while (0) 237 238 #ifdef ARCH_DLINFO 239 /* 240 * ARCH_DLINFO must come first so PPC can do its special alignment of 241 * AUXV. 242 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in 243 * ARCH_DLINFO changes 244 */ 245 ARCH_DLINFO; 246 #endif 247 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP); 248 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE); 249 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC); 250 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff); 251 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr)); 252 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum); 253 NEW_AUX_ENT(AT_BASE, interp_load_addr); 254 NEW_AUX_ENT(AT_FLAGS, 0); 255 NEW_AUX_ENT(AT_ENTRY, exec->e_entry); 256 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid)); 257 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid)); 258 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid)); 259 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid)); 260 NEW_AUX_ENT(AT_SECURE, bprm->secureexec); 261 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes); 262 #ifdef ELF_HWCAP2 263 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2); 264 #endif 265 NEW_AUX_ENT(AT_EXECFN, bprm->exec); 266 if (k_platform) { 267 NEW_AUX_ENT(AT_PLATFORM, 268 (elf_addr_t)(unsigned long)u_platform); 269 } 270 if (k_base_platform) { 271 NEW_AUX_ENT(AT_BASE_PLATFORM, 272 (elf_addr_t)(unsigned long)u_base_platform); 273 } 274 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) { 275 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data); 276 } 277 #undef NEW_AUX_ENT 278 /* AT_NULL is zero; clear the rest too */ 279 memset(&elf_info[ei_index], 0, 280 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]); 281 282 /* And advance past the AT_NULL entry. */ 283 ei_index += 2; 284 285 sp = STACK_ADD(p, ei_index); 286 287 items = (argc + 1) + (envc + 1) + 1; 288 bprm->p = STACK_ROUND(sp, items); 289 290 /* Point sp at the lowest address on the stack */ 291 #ifdef CONFIG_STACK_GROWSUP 292 sp = (elf_addr_t __user *)bprm->p - items - ei_index; 293 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */ 294 #else 295 sp = (elf_addr_t __user *)bprm->p; 296 #endif 297 298 299 /* 300 * Grow the stack manually; some architectures have a limit on how 301 * far ahead a user-space access may be in order to grow the stack. 302 */ 303 vma = find_extend_vma(current->mm, bprm->p); 304 if (!vma) 305 return -EFAULT; 306 307 /* Now, let's put argc (and argv, envp if appropriate) on the stack */ 308 if (__put_user(argc, sp++)) 309 return -EFAULT; 310 311 /* Populate list of argv pointers back to argv strings. */ 312 p = current->mm->arg_end = current->mm->arg_start; 313 while (argc-- > 0) { 314 size_t len; 315 if (__put_user((elf_addr_t)p, sp++)) 316 return -EFAULT; 317 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN); 318 if (!len || len > MAX_ARG_STRLEN) 319 return -EINVAL; 320 p += len; 321 } 322 if (__put_user(0, sp++)) 323 return -EFAULT; 324 current->mm->arg_end = p; 325 326 /* Populate list of envp pointers back to envp strings. */ 327 current->mm->env_end = current->mm->env_start = p; 328 while (envc-- > 0) { 329 size_t len; 330 if (__put_user((elf_addr_t)p, sp++)) 331 return -EFAULT; 332 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN); 333 if (!len || len > MAX_ARG_STRLEN) 334 return -EINVAL; 335 p += len; 336 } 337 if (__put_user(0, sp++)) 338 return -EFAULT; 339 current->mm->env_end = p; 340 341 /* Put the elf_info on the stack in the right place. */ 342 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t))) 343 return -EFAULT; 344 return 0; 345 } 346 347 #ifndef elf_map 348 349 static unsigned long elf_map(struct file *filep, unsigned long addr, 350 struct elf_phdr *eppnt, int prot, int type, 351 unsigned long total_size) 352 { 353 unsigned long map_addr; 354 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr); 355 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr); 356 addr = ELF_PAGESTART(addr); 357 size = ELF_PAGEALIGN(size); 358 359 /* mmap() will return -EINVAL if given a zero size, but a 360 * segment with zero filesize is perfectly valid */ 361 if (!size) 362 return addr; 363 364 /* 365 * total_size is the size of the ELF (interpreter) image. 366 * The _first_ mmap needs to know the full size, otherwise 367 * randomization might put this image into an overlapping 368 * position with the ELF binary image. (since size < total_size) 369 * So we first map the 'big' image - and unmap the remainder at 370 * the end. (which unmap is needed for ELF images with holes.) 371 */ 372 if (total_size) { 373 total_size = ELF_PAGEALIGN(total_size); 374 map_addr = vm_mmap(filep, addr, total_size, prot, type, off); 375 if (!BAD_ADDR(map_addr)) 376 vm_munmap(map_addr+size, total_size-size); 377 } else 378 map_addr = vm_mmap(filep, addr, size, prot, type, off); 379 380 return(map_addr); 381 } 382 383 #endif /* !elf_map */ 384 385 static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr) 386 { 387 int i, first_idx = -1, last_idx = -1; 388 389 for (i = 0; i < nr; i++) { 390 if (cmds[i].p_type == PT_LOAD) { 391 last_idx = i; 392 if (first_idx == -1) 393 first_idx = i; 394 } 395 } 396 if (first_idx == -1) 397 return 0; 398 399 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz - 400 ELF_PAGESTART(cmds[first_idx].p_vaddr); 401 } 402 403 /** 404 * load_elf_phdrs() - load ELF program headers 405 * @elf_ex: ELF header of the binary whose program headers should be loaded 406 * @elf_file: the opened ELF binary file 407 * 408 * Loads ELF program headers from the binary file elf_file, which has the ELF 409 * header pointed to by elf_ex, into a newly allocated array. The caller is 410 * responsible for freeing the allocated data. Returns an ERR_PTR upon failure. 411 */ 412 static struct elf_phdr *load_elf_phdrs(struct elfhdr *elf_ex, 413 struct file *elf_file) 414 { 415 struct elf_phdr *elf_phdata = NULL; 416 int retval, size, err = -1; 417 loff_t pos = elf_ex->e_phoff; 418 419 /* 420 * If the size of this structure has changed, then punt, since 421 * we will be doing the wrong thing. 422 */ 423 if (elf_ex->e_phentsize != sizeof(struct elf_phdr)) 424 goto out; 425 426 /* Sanity check the number of program headers... */ 427 if (elf_ex->e_phnum < 1 || 428 elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr)) 429 goto out; 430 431 /* ...and their total size. */ 432 size = sizeof(struct elf_phdr) * elf_ex->e_phnum; 433 if (size > ELF_MIN_ALIGN) 434 goto out; 435 436 elf_phdata = kmalloc(size, GFP_KERNEL); 437 if (!elf_phdata) 438 goto out; 439 440 /* Read in the program headers */ 441 retval = kernel_read(elf_file, elf_phdata, size, &pos); 442 if (retval != size) { 443 err = (retval < 0) ? retval : -EIO; 444 goto out; 445 } 446 447 /* Success! */ 448 err = 0; 449 out: 450 if (err) { 451 kfree(elf_phdata); 452 elf_phdata = NULL; 453 } 454 return elf_phdata; 455 } 456 457 #ifndef CONFIG_ARCH_BINFMT_ELF_STATE 458 459 /** 460 * struct arch_elf_state - arch-specific ELF loading state 461 * 462 * This structure is used to preserve architecture specific data during 463 * the loading of an ELF file, throughout the checking of architecture 464 * specific ELF headers & through to the point where the ELF load is 465 * known to be proceeding (ie. SET_PERSONALITY). 466 * 467 * This implementation is a dummy for architectures which require no 468 * specific state. 469 */ 470 struct arch_elf_state { 471 }; 472 473 #define INIT_ARCH_ELF_STATE {} 474 475 /** 476 * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header 477 * @ehdr: The main ELF header 478 * @phdr: The program header to check 479 * @elf: The open ELF file 480 * @is_interp: True if the phdr is from the interpreter of the ELF being 481 * loaded, else false. 482 * @state: Architecture-specific state preserved throughout the process 483 * of loading the ELF. 484 * 485 * Inspects the program header phdr to validate its correctness and/or 486 * suitability for the system. Called once per ELF program header in the 487 * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its 488 * interpreter. 489 * 490 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load 491 * with that return code. 492 */ 493 static inline int arch_elf_pt_proc(struct elfhdr *ehdr, 494 struct elf_phdr *phdr, 495 struct file *elf, bool is_interp, 496 struct arch_elf_state *state) 497 { 498 /* Dummy implementation, always proceed */ 499 return 0; 500 } 501 502 /** 503 * arch_check_elf() - check an ELF executable 504 * @ehdr: The main ELF header 505 * @has_interp: True if the ELF has an interpreter, else false. 506 * @interp_ehdr: The interpreter's ELF header 507 * @state: Architecture-specific state preserved throughout the process 508 * of loading the ELF. 509 * 510 * Provides a final opportunity for architecture code to reject the loading 511 * of the ELF & cause an exec syscall to return an error. This is called after 512 * all program headers to be checked by arch_elf_pt_proc have been. 513 * 514 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load 515 * with that return code. 516 */ 517 static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp, 518 struct elfhdr *interp_ehdr, 519 struct arch_elf_state *state) 520 { 521 /* Dummy implementation, always proceed */ 522 return 0; 523 } 524 525 #endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */ 526 527 /* This is much more generalized than the library routine read function, 528 so we keep this separate. Technically the library read function 529 is only provided so that we can read a.out libraries that have 530 an ELF header */ 531 532 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, 533 struct file *interpreter, unsigned long *interp_map_addr, 534 unsigned long no_base, struct elf_phdr *interp_elf_phdata) 535 { 536 struct elf_phdr *eppnt; 537 unsigned long load_addr = 0; 538 int load_addr_set = 0; 539 unsigned long last_bss = 0, elf_bss = 0; 540 int bss_prot = 0; 541 unsigned long error = ~0UL; 542 unsigned long total_size; 543 int i; 544 545 /* First of all, some simple consistency checks */ 546 if (interp_elf_ex->e_type != ET_EXEC && 547 interp_elf_ex->e_type != ET_DYN) 548 goto out; 549 if (!elf_check_arch(interp_elf_ex) || 550 elf_check_fdpic(interp_elf_ex)) 551 goto out; 552 if (!interpreter->f_op->mmap) 553 goto out; 554 555 total_size = total_mapping_size(interp_elf_phdata, 556 interp_elf_ex->e_phnum); 557 if (!total_size) { 558 error = -EINVAL; 559 goto out; 560 } 561 562 eppnt = interp_elf_phdata; 563 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { 564 if (eppnt->p_type == PT_LOAD) { 565 int elf_type = MAP_PRIVATE | MAP_DENYWRITE; 566 int elf_prot = 0; 567 unsigned long vaddr = 0; 568 unsigned long k, map_addr; 569 570 if (eppnt->p_flags & PF_R) 571 elf_prot = PROT_READ; 572 if (eppnt->p_flags & PF_W) 573 elf_prot |= PROT_WRITE; 574 if (eppnt->p_flags & PF_X) 575 elf_prot |= PROT_EXEC; 576 vaddr = eppnt->p_vaddr; 577 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) 578 elf_type |= MAP_FIXED; 579 else if (no_base && interp_elf_ex->e_type == ET_DYN) 580 load_addr = -vaddr; 581 582 map_addr = elf_map(interpreter, load_addr + vaddr, 583 eppnt, elf_prot, elf_type, total_size); 584 total_size = 0; 585 if (!*interp_map_addr) 586 *interp_map_addr = map_addr; 587 error = map_addr; 588 if (BAD_ADDR(map_addr)) 589 goto out; 590 591 if (!load_addr_set && 592 interp_elf_ex->e_type == ET_DYN) { 593 load_addr = map_addr - ELF_PAGESTART(vaddr); 594 load_addr_set = 1; 595 } 596 597 /* 598 * Check to see if the section's size will overflow the 599 * allowed task size. Note that p_filesz must always be 600 * <= p_memsize so it's only necessary to check p_memsz. 601 */ 602 k = load_addr + eppnt->p_vaddr; 603 if (BAD_ADDR(k) || 604 eppnt->p_filesz > eppnt->p_memsz || 605 eppnt->p_memsz > TASK_SIZE || 606 TASK_SIZE - eppnt->p_memsz < k) { 607 error = -ENOMEM; 608 goto out; 609 } 610 611 /* 612 * Find the end of the file mapping for this phdr, and 613 * keep track of the largest address we see for this. 614 */ 615 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz; 616 if (k > elf_bss) 617 elf_bss = k; 618 619 /* 620 * Do the same thing for the memory mapping - between 621 * elf_bss and last_bss is the bss section. 622 */ 623 k = load_addr + eppnt->p_vaddr + eppnt->p_memsz; 624 if (k > last_bss) { 625 last_bss = k; 626 bss_prot = elf_prot; 627 } 628 } 629 } 630 631 /* 632 * Now fill out the bss section: first pad the last page from 633 * the file up to the page boundary, and zero it from elf_bss 634 * up to the end of the page. 635 */ 636 if (padzero(elf_bss)) { 637 error = -EFAULT; 638 goto out; 639 } 640 /* 641 * Next, align both the file and mem bss up to the page size, 642 * since this is where elf_bss was just zeroed up to, and where 643 * last_bss will end after the vm_brk_flags() below. 644 */ 645 elf_bss = ELF_PAGEALIGN(elf_bss); 646 last_bss = ELF_PAGEALIGN(last_bss); 647 /* Finally, if there is still more bss to allocate, do it. */ 648 if (last_bss > elf_bss) { 649 error = vm_brk_flags(elf_bss, last_bss - elf_bss, 650 bss_prot & PROT_EXEC ? VM_EXEC : 0); 651 if (error) 652 goto out; 653 } 654 655 error = load_addr; 656 out: 657 return error; 658 } 659 660 /* 661 * These are the functions used to load ELF style executables and shared 662 * libraries. There is no binary dependent code anywhere else. 663 */ 664 665 #ifndef STACK_RND_MASK 666 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */ 667 #endif 668 669 static unsigned long randomize_stack_top(unsigned long stack_top) 670 { 671 unsigned long random_variable = 0; 672 673 if (current->flags & PF_RANDOMIZE) { 674 random_variable = get_random_long(); 675 random_variable &= STACK_RND_MASK; 676 random_variable <<= PAGE_SHIFT; 677 } 678 #ifdef CONFIG_STACK_GROWSUP 679 return PAGE_ALIGN(stack_top) + random_variable; 680 #else 681 return PAGE_ALIGN(stack_top) - random_variable; 682 #endif 683 } 684 685 static int load_elf_binary(struct linux_binprm *bprm) 686 { 687 struct file *interpreter = NULL; /* to shut gcc up */ 688 unsigned long load_addr = 0, load_bias = 0; 689 int load_addr_set = 0; 690 char * elf_interpreter = NULL; 691 unsigned long error; 692 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL; 693 unsigned long elf_bss, elf_brk; 694 int bss_prot = 0; 695 int retval, i; 696 unsigned long elf_entry; 697 unsigned long interp_load_addr = 0; 698 unsigned long start_code, end_code, start_data, end_data; 699 unsigned long reloc_func_desc __maybe_unused = 0; 700 int executable_stack = EXSTACK_DEFAULT; 701 struct pt_regs *regs = current_pt_regs(); 702 struct { 703 struct elfhdr elf_ex; 704 struct elfhdr interp_elf_ex; 705 } *loc; 706 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE; 707 loff_t pos; 708 709 loc = kmalloc(sizeof(*loc), GFP_KERNEL); 710 if (!loc) { 711 retval = -ENOMEM; 712 goto out_ret; 713 } 714 715 /* Get the exec-header */ 716 loc->elf_ex = *((struct elfhdr *)bprm->buf); 717 718 retval = -ENOEXEC; 719 /* First of all, some simple consistency checks */ 720 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0) 721 goto out; 722 723 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN) 724 goto out; 725 if (!elf_check_arch(&loc->elf_ex)) 726 goto out; 727 if (elf_check_fdpic(&loc->elf_ex)) 728 goto out; 729 if (!bprm->file->f_op->mmap) 730 goto out; 731 732 elf_phdata = load_elf_phdrs(&loc->elf_ex, bprm->file); 733 if (!elf_phdata) 734 goto out; 735 736 elf_ppnt = elf_phdata; 737 elf_bss = 0; 738 elf_brk = 0; 739 740 start_code = ~0UL; 741 end_code = 0; 742 start_data = 0; 743 end_data = 0; 744 745 for (i = 0; i < loc->elf_ex.e_phnum; i++) { 746 if (elf_ppnt->p_type == PT_INTERP) { 747 /* This is the program interpreter used for 748 * shared libraries - for now assume that this 749 * is an a.out format binary 750 */ 751 retval = -ENOEXEC; 752 if (elf_ppnt->p_filesz > PATH_MAX || 753 elf_ppnt->p_filesz < 2) 754 goto out_free_ph; 755 756 retval = -ENOMEM; 757 elf_interpreter = kmalloc(elf_ppnt->p_filesz, 758 GFP_KERNEL); 759 if (!elf_interpreter) 760 goto out_free_ph; 761 762 pos = elf_ppnt->p_offset; 763 retval = kernel_read(bprm->file, elf_interpreter, 764 elf_ppnt->p_filesz, &pos); 765 if (retval != elf_ppnt->p_filesz) { 766 if (retval >= 0) 767 retval = -EIO; 768 goto out_free_interp; 769 } 770 /* make sure path is NULL terminated */ 771 retval = -ENOEXEC; 772 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0') 773 goto out_free_interp; 774 775 interpreter = open_exec(elf_interpreter); 776 retval = PTR_ERR(interpreter); 777 if (IS_ERR(interpreter)) 778 goto out_free_interp; 779 780 /* 781 * If the binary is not readable then enforce 782 * mm->dumpable = 0 regardless of the interpreter's 783 * permissions. 784 */ 785 would_dump(bprm, interpreter); 786 787 /* Get the exec headers */ 788 pos = 0; 789 retval = kernel_read(interpreter, &loc->interp_elf_ex, 790 sizeof(loc->interp_elf_ex), &pos); 791 if (retval != sizeof(loc->interp_elf_ex)) { 792 if (retval >= 0) 793 retval = -EIO; 794 goto out_free_dentry; 795 } 796 797 break; 798 } 799 elf_ppnt++; 800 } 801 802 elf_ppnt = elf_phdata; 803 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) 804 switch (elf_ppnt->p_type) { 805 case PT_GNU_STACK: 806 if (elf_ppnt->p_flags & PF_X) 807 executable_stack = EXSTACK_ENABLE_X; 808 else 809 executable_stack = EXSTACK_DISABLE_X; 810 break; 811 812 case PT_LOPROC ... PT_HIPROC: 813 retval = arch_elf_pt_proc(&loc->elf_ex, elf_ppnt, 814 bprm->file, false, 815 &arch_state); 816 if (retval) 817 goto out_free_dentry; 818 break; 819 } 820 821 /* Some simple consistency checks for the interpreter */ 822 if (elf_interpreter) { 823 retval = -ELIBBAD; 824 /* Not an ELF interpreter */ 825 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0) 826 goto out_free_dentry; 827 /* Verify the interpreter has a valid arch */ 828 if (!elf_check_arch(&loc->interp_elf_ex) || 829 elf_check_fdpic(&loc->interp_elf_ex)) 830 goto out_free_dentry; 831 832 /* Load the interpreter program headers */ 833 interp_elf_phdata = load_elf_phdrs(&loc->interp_elf_ex, 834 interpreter); 835 if (!interp_elf_phdata) 836 goto out_free_dentry; 837 838 /* Pass PT_LOPROC..PT_HIPROC headers to arch code */ 839 elf_ppnt = interp_elf_phdata; 840 for (i = 0; i < loc->interp_elf_ex.e_phnum; i++, elf_ppnt++) 841 switch (elf_ppnt->p_type) { 842 case PT_LOPROC ... PT_HIPROC: 843 retval = arch_elf_pt_proc(&loc->interp_elf_ex, 844 elf_ppnt, interpreter, 845 true, &arch_state); 846 if (retval) 847 goto out_free_dentry; 848 break; 849 } 850 } 851 852 /* 853 * Allow arch code to reject the ELF at this point, whilst it's 854 * still possible to return an error to the code that invoked 855 * the exec syscall. 856 */ 857 retval = arch_check_elf(&loc->elf_ex, 858 !!interpreter, &loc->interp_elf_ex, 859 &arch_state); 860 if (retval) 861 goto out_free_dentry; 862 863 /* Flush all traces of the currently running executable */ 864 retval = flush_old_exec(bprm); 865 if (retval) 866 goto out_free_dentry; 867 868 /* Do this immediately, since STACK_TOP as used in setup_arg_pages 869 may depend on the personality. */ 870 SET_PERSONALITY2(loc->elf_ex, &arch_state); 871 if (elf_read_implies_exec(loc->elf_ex, executable_stack)) 872 current->personality |= READ_IMPLIES_EXEC; 873 874 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 875 current->flags |= PF_RANDOMIZE; 876 877 setup_new_exec(bprm); 878 install_exec_creds(bprm); 879 880 /* Do this so that we can load the interpreter, if need be. We will 881 change some of these later */ 882 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP), 883 executable_stack); 884 if (retval < 0) 885 goto out_free_dentry; 886 887 current->mm->start_stack = bprm->p; 888 889 /* Now we do a little grungy work by mmapping the ELF image into 890 the correct location in memory. */ 891 for(i = 0, elf_ppnt = elf_phdata; 892 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { 893 int elf_prot = 0, elf_flags; 894 unsigned long k, vaddr; 895 unsigned long total_size = 0; 896 897 if (elf_ppnt->p_type != PT_LOAD) 898 continue; 899 900 if (unlikely (elf_brk > elf_bss)) { 901 unsigned long nbyte; 902 903 /* There was a PT_LOAD segment with p_memsz > p_filesz 904 before this one. Map anonymous pages, if needed, 905 and clear the area. */ 906 retval = set_brk(elf_bss + load_bias, 907 elf_brk + load_bias, 908 bss_prot); 909 if (retval) 910 goto out_free_dentry; 911 nbyte = ELF_PAGEOFFSET(elf_bss); 912 if (nbyte) { 913 nbyte = ELF_MIN_ALIGN - nbyte; 914 if (nbyte > elf_brk - elf_bss) 915 nbyte = elf_brk - elf_bss; 916 if (clear_user((void __user *)elf_bss + 917 load_bias, nbyte)) { 918 /* 919 * This bss-zeroing can fail if the ELF 920 * file specifies odd protections. So 921 * we don't check the return value 922 */ 923 } 924 } 925 } 926 927 if (elf_ppnt->p_flags & PF_R) 928 elf_prot |= PROT_READ; 929 if (elf_ppnt->p_flags & PF_W) 930 elf_prot |= PROT_WRITE; 931 if (elf_ppnt->p_flags & PF_X) 932 elf_prot |= PROT_EXEC; 933 934 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE; 935 936 vaddr = elf_ppnt->p_vaddr; 937 /* 938 * If we are loading ET_EXEC or we have already performed 939 * the ET_DYN load_addr calculations, proceed normally. 940 */ 941 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) { 942 elf_flags |= MAP_FIXED; 943 } else if (loc->elf_ex.e_type == ET_DYN) { 944 /* 945 * This logic is run once for the first LOAD Program 946 * Header for ET_DYN binaries to calculate the 947 * randomization (load_bias) for all the LOAD 948 * Program Headers, and to calculate the entire 949 * size of the ELF mapping (total_size). (Note that 950 * load_addr_set is set to true later once the 951 * initial mapping is performed.) 952 * 953 * There are effectively two types of ET_DYN 954 * binaries: programs (i.e. PIE: ET_DYN with INTERP) 955 * and loaders (ET_DYN without INTERP, since they 956 * _are_ the ELF interpreter). The loaders must 957 * be loaded away from programs since the program 958 * may otherwise collide with the loader (especially 959 * for ET_EXEC which does not have a randomized 960 * position). For example to handle invocations of 961 * "./ld.so someprog" to test out a new version of 962 * the loader, the subsequent program that the 963 * loader loads must avoid the loader itself, so 964 * they cannot share the same load range. Sufficient 965 * room for the brk must be allocated with the 966 * loader as well, since brk must be available with 967 * the loader. 968 * 969 * Therefore, programs are loaded offset from 970 * ELF_ET_DYN_BASE and loaders are loaded into the 971 * independently randomized mmap region (0 load_bias 972 * without MAP_FIXED). 973 */ 974 if (elf_interpreter) { 975 load_bias = ELF_ET_DYN_BASE; 976 if (current->flags & PF_RANDOMIZE) 977 load_bias += arch_mmap_rnd(); 978 elf_flags |= MAP_FIXED; 979 } else 980 load_bias = 0; 981 982 /* 983 * Since load_bias is used for all subsequent loading 984 * calculations, we must lower it by the first vaddr 985 * so that the remaining calculations based on the 986 * ELF vaddrs will be correctly offset. The result 987 * is then page aligned. 988 */ 989 load_bias = ELF_PAGESTART(load_bias - vaddr); 990 991 total_size = total_mapping_size(elf_phdata, 992 loc->elf_ex.e_phnum); 993 if (!total_size) { 994 retval = -EINVAL; 995 goto out_free_dentry; 996 } 997 } 998 999 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, 1000 elf_prot, elf_flags, total_size); 1001 if (BAD_ADDR(error)) { 1002 retval = IS_ERR((void *)error) ? 1003 PTR_ERR((void*)error) : -EINVAL; 1004 goto out_free_dentry; 1005 } 1006 1007 if (!load_addr_set) { 1008 load_addr_set = 1; 1009 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset); 1010 if (loc->elf_ex.e_type == ET_DYN) { 1011 load_bias += error - 1012 ELF_PAGESTART(load_bias + vaddr); 1013 load_addr += load_bias; 1014 reloc_func_desc = load_bias; 1015 } 1016 } 1017 k = elf_ppnt->p_vaddr; 1018 if (k < start_code) 1019 start_code = k; 1020 if (start_data < k) 1021 start_data = k; 1022 1023 /* 1024 * Check to see if the section's size will overflow the 1025 * allowed task size. Note that p_filesz must always be 1026 * <= p_memsz so it is only necessary to check p_memsz. 1027 */ 1028 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz || 1029 elf_ppnt->p_memsz > TASK_SIZE || 1030 TASK_SIZE - elf_ppnt->p_memsz < k) { 1031 /* set_brk can never work. Avoid overflows. */ 1032 retval = -EINVAL; 1033 goto out_free_dentry; 1034 } 1035 1036 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; 1037 1038 if (k > elf_bss) 1039 elf_bss = k; 1040 if ((elf_ppnt->p_flags & PF_X) && end_code < k) 1041 end_code = k; 1042 if (end_data < k) 1043 end_data = k; 1044 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; 1045 if (k > elf_brk) { 1046 bss_prot = elf_prot; 1047 elf_brk = k; 1048 } 1049 } 1050 1051 loc->elf_ex.e_entry += load_bias; 1052 elf_bss += load_bias; 1053 elf_brk += load_bias; 1054 start_code += load_bias; 1055 end_code += load_bias; 1056 start_data += load_bias; 1057 end_data += load_bias; 1058 1059 /* Calling set_brk effectively mmaps the pages that we need 1060 * for the bss and break sections. We must do this before 1061 * mapping in the interpreter, to make sure it doesn't wind 1062 * up getting placed where the bss needs to go. 1063 */ 1064 retval = set_brk(elf_bss, elf_brk, bss_prot); 1065 if (retval) 1066 goto out_free_dentry; 1067 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) { 1068 retval = -EFAULT; /* Nobody gets to see this, but.. */ 1069 goto out_free_dentry; 1070 } 1071 1072 if (elf_interpreter) { 1073 unsigned long interp_map_addr = 0; 1074 1075 elf_entry = load_elf_interp(&loc->interp_elf_ex, 1076 interpreter, 1077 &interp_map_addr, 1078 load_bias, interp_elf_phdata); 1079 if (!IS_ERR((void *)elf_entry)) { 1080 /* 1081 * load_elf_interp() returns relocation 1082 * adjustment 1083 */ 1084 interp_load_addr = elf_entry; 1085 elf_entry += loc->interp_elf_ex.e_entry; 1086 } 1087 if (BAD_ADDR(elf_entry)) { 1088 retval = IS_ERR((void *)elf_entry) ? 1089 (int)elf_entry : -EINVAL; 1090 goto out_free_dentry; 1091 } 1092 reloc_func_desc = interp_load_addr; 1093 1094 allow_write_access(interpreter); 1095 fput(interpreter); 1096 kfree(elf_interpreter); 1097 } else { 1098 elf_entry = loc->elf_ex.e_entry; 1099 if (BAD_ADDR(elf_entry)) { 1100 retval = -EINVAL; 1101 goto out_free_dentry; 1102 } 1103 } 1104 1105 kfree(interp_elf_phdata); 1106 kfree(elf_phdata); 1107 1108 set_binfmt(&elf_format); 1109 1110 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES 1111 retval = arch_setup_additional_pages(bprm, !!elf_interpreter); 1112 if (retval < 0) 1113 goto out; 1114 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */ 1115 1116 retval = create_elf_tables(bprm, &loc->elf_ex, 1117 load_addr, interp_load_addr); 1118 if (retval < 0) 1119 goto out; 1120 /* N.B. passed_fileno might not be initialized? */ 1121 current->mm->end_code = end_code; 1122 current->mm->start_code = start_code; 1123 current->mm->start_data = start_data; 1124 current->mm->end_data = end_data; 1125 current->mm->start_stack = bprm->p; 1126 1127 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) { 1128 current->mm->brk = current->mm->start_brk = 1129 arch_randomize_brk(current->mm); 1130 #ifdef compat_brk_randomized 1131 current->brk_randomized = 1; 1132 #endif 1133 } 1134 1135 if (current->personality & MMAP_PAGE_ZERO) { 1136 /* Why this, you ask??? Well SVr4 maps page 0 as read-only, 1137 and some applications "depend" upon this behavior. 1138 Since we do not have the power to recompile these, we 1139 emulate the SVr4 behavior. Sigh. */ 1140 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC, 1141 MAP_FIXED | MAP_PRIVATE, 0); 1142 } 1143 1144 #ifdef ELF_PLAT_INIT 1145 /* 1146 * The ABI may specify that certain registers be set up in special 1147 * ways (on i386 %edx is the address of a DT_FINI function, for 1148 * example. In addition, it may also specify (eg, PowerPC64 ELF) 1149 * that the e_entry field is the address of the function descriptor 1150 * for the startup routine, rather than the address of the startup 1151 * routine itself. This macro performs whatever initialization to 1152 * the regs structure is required as well as any relocations to the 1153 * function descriptor entries when executing dynamically links apps. 1154 */ 1155 ELF_PLAT_INIT(regs, reloc_func_desc); 1156 #endif 1157 1158 start_thread(regs, elf_entry, bprm->p); 1159 retval = 0; 1160 out: 1161 kfree(loc); 1162 out_ret: 1163 return retval; 1164 1165 /* error cleanup */ 1166 out_free_dentry: 1167 kfree(interp_elf_phdata); 1168 allow_write_access(interpreter); 1169 if (interpreter) 1170 fput(interpreter); 1171 out_free_interp: 1172 kfree(elf_interpreter); 1173 out_free_ph: 1174 kfree(elf_phdata); 1175 goto out; 1176 } 1177 1178 #ifdef CONFIG_USELIB 1179 /* This is really simpleminded and specialized - we are loading an 1180 a.out library that is given an ELF header. */ 1181 static int load_elf_library(struct file *file) 1182 { 1183 struct elf_phdr *elf_phdata; 1184 struct elf_phdr *eppnt; 1185 unsigned long elf_bss, bss, len; 1186 int retval, error, i, j; 1187 struct elfhdr elf_ex; 1188 loff_t pos = 0; 1189 1190 error = -ENOEXEC; 1191 retval = kernel_read(file, &elf_ex, sizeof(elf_ex), &pos); 1192 if (retval != sizeof(elf_ex)) 1193 goto out; 1194 1195 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0) 1196 goto out; 1197 1198 /* First of all, some simple consistency checks */ 1199 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || 1200 !elf_check_arch(&elf_ex) || !file->f_op->mmap) 1201 goto out; 1202 if (elf_check_fdpic(&elf_ex)) 1203 goto out; 1204 1205 /* Now read in all of the header information */ 1206 1207 j = sizeof(struct elf_phdr) * elf_ex.e_phnum; 1208 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */ 1209 1210 error = -ENOMEM; 1211 elf_phdata = kmalloc(j, GFP_KERNEL); 1212 if (!elf_phdata) 1213 goto out; 1214 1215 eppnt = elf_phdata; 1216 error = -ENOEXEC; 1217 pos = elf_ex.e_phoff; 1218 retval = kernel_read(file, eppnt, j, &pos); 1219 if (retval != j) 1220 goto out_free_ph; 1221 1222 for (j = 0, i = 0; i<elf_ex.e_phnum; i++) 1223 if ((eppnt + i)->p_type == PT_LOAD) 1224 j++; 1225 if (j != 1) 1226 goto out_free_ph; 1227 1228 while (eppnt->p_type != PT_LOAD) 1229 eppnt++; 1230 1231 /* Now use mmap to map the library into memory. */ 1232 error = vm_mmap(file, 1233 ELF_PAGESTART(eppnt->p_vaddr), 1234 (eppnt->p_filesz + 1235 ELF_PAGEOFFSET(eppnt->p_vaddr)), 1236 PROT_READ | PROT_WRITE | PROT_EXEC, 1237 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE, 1238 (eppnt->p_offset - 1239 ELF_PAGEOFFSET(eppnt->p_vaddr))); 1240 if (error != ELF_PAGESTART(eppnt->p_vaddr)) 1241 goto out_free_ph; 1242 1243 elf_bss = eppnt->p_vaddr + eppnt->p_filesz; 1244 if (padzero(elf_bss)) { 1245 error = -EFAULT; 1246 goto out_free_ph; 1247 } 1248 1249 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + 1250 ELF_MIN_ALIGN - 1); 1251 bss = eppnt->p_memsz + eppnt->p_vaddr; 1252 if (bss > len) { 1253 error = vm_brk(len, bss - len); 1254 if (error) 1255 goto out_free_ph; 1256 } 1257 error = 0; 1258 1259 out_free_ph: 1260 kfree(elf_phdata); 1261 out: 1262 return error; 1263 } 1264 #endif /* #ifdef CONFIG_USELIB */ 1265 1266 #ifdef CONFIG_ELF_CORE 1267 /* 1268 * ELF core dumper 1269 * 1270 * Modelled on fs/exec.c:aout_core_dump() 1271 * Jeremy Fitzhardinge <jeremy@sw.oz.au> 1272 */ 1273 1274 /* 1275 * The purpose of always_dump_vma() is to make sure that special kernel mappings 1276 * that are useful for post-mortem analysis are included in every core dump. 1277 * In that way we ensure that the core dump is fully interpretable later 1278 * without matching up the same kernel and hardware config to see what PC values 1279 * meant. These special mappings include - vDSO, vsyscall, and other 1280 * architecture specific mappings 1281 */ 1282 static bool always_dump_vma(struct vm_area_struct *vma) 1283 { 1284 /* Any vsyscall mappings? */ 1285 if (vma == get_gate_vma(vma->vm_mm)) 1286 return true; 1287 1288 /* 1289 * Assume that all vmas with a .name op should always be dumped. 1290 * If this changes, a new vm_ops field can easily be added. 1291 */ 1292 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma)) 1293 return true; 1294 1295 /* 1296 * arch_vma_name() returns non-NULL for special architecture mappings, 1297 * such as vDSO sections. 1298 */ 1299 if (arch_vma_name(vma)) 1300 return true; 1301 1302 return false; 1303 } 1304 1305 /* 1306 * Decide what to dump of a segment, part, all or none. 1307 */ 1308 static unsigned long vma_dump_size(struct vm_area_struct *vma, 1309 unsigned long mm_flags) 1310 { 1311 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type)) 1312 1313 /* always dump the vdso and vsyscall sections */ 1314 if (always_dump_vma(vma)) 1315 goto whole; 1316 1317 if (vma->vm_flags & VM_DONTDUMP) 1318 return 0; 1319 1320 /* support for DAX */ 1321 if (vma_is_dax(vma)) { 1322 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED)) 1323 goto whole; 1324 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE)) 1325 goto whole; 1326 return 0; 1327 } 1328 1329 /* Hugetlb memory check */ 1330 if (vma->vm_flags & VM_HUGETLB) { 1331 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED)) 1332 goto whole; 1333 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE)) 1334 goto whole; 1335 return 0; 1336 } 1337 1338 /* Do not dump I/O mapped devices or special mappings */ 1339 if (vma->vm_flags & VM_IO) 1340 return 0; 1341 1342 /* By default, dump shared memory if mapped from an anonymous file. */ 1343 if (vma->vm_flags & VM_SHARED) { 1344 if (file_inode(vma->vm_file)->i_nlink == 0 ? 1345 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED)) 1346 goto whole; 1347 return 0; 1348 } 1349 1350 /* Dump segments that have been written to. */ 1351 if (vma->anon_vma && FILTER(ANON_PRIVATE)) 1352 goto whole; 1353 if (vma->vm_file == NULL) 1354 return 0; 1355 1356 if (FILTER(MAPPED_PRIVATE)) 1357 goto whole; 1358 1359 /* 1360 * If this looks like the beginning of a DSO or executable mapping, 1361 * check for an ELF header. If we find one, dump the first page to 1362 * aid in determining what was mapped here. 1363 */ 1364 if (FILTER(ELF_HEADERS) && 1365 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) { 1366 u32 __user *header = (u32 __user *) vma->vm_start; 1367 u32 word; 1368 mm_segment_t fs = get_fs(); 1369 /* 1370 * Doing it this way gets the constant folded by GCC. 1371 */ 1372 union { 1373 u32 cmp; 1374 char elfmag[SELFMAG]; 1375 } magic; 1376 BUILD_BUG_ON(SELFMAG != sizeof word); 1377 magic.elfmag[EI_MAG0] = ELFMAG0; 1378 magic.elfmag[EI_MAG1] = ELFMAG1; 1379 magic.elfmag[EI_MAG2] = ELFMAG2; 1380 magic.elfmag[EI_MAG3] = ELFMAG3; 1381 /* 1382 * Switch to the user "segment" for get_user(), 1383 * then put back what elf_core_dump() had in place. 1384 */ 1385 set_fs(USER_DS); 1386 if (unlikely(get_user(word, header))) 1387 word = 0; 1388 set_fs(fs); 1389 if (word == magic.cmp) 1390 return PAGE_SIZE; 1391 } 1392 1393 #undef FILTER 1394 1395 return 0; 1396 1397 whole: 1398 return vma->vm_end - vma->vm_start; 1399 } 1400 1401 /* An ELF note in memory */ 1402 struct memelfnote 1403 { 1404 const char *name; 1405 int type; 1406 unsigned int datasz; 1407 void *data; 1408 }; 1409 1410 static int notesize(struct memelfnote *en) 1411 { 1412 int sz; 1413 1414 sz = sizeof(struct elf_note); 1415 sz += roundup(strlen(en->name) + 1, 4); 1416 sz += roundup(en->datasz, 4); 1417 1418 return sz; 1419 } 1420 1421 static int writenote(struct memelfnote *men, struct coredump_params *cprm) 1422 { 1423 struct elf_note en; 1424 en.n_namesz = strlen(men->name) + 1; 1425 en.n_descsz = men->datasz; 1426 en.n_type = men->type; 1427 1428 return dump_emit(cprm, &en, sizeof(en)) && 1429 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) && 1430 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4); 1431 } 1432 1433 static void fill_elf_header(struct elfhdr *elf, int segs, 1434 u16 machine, u32 flags) 1435 { 1436 memset(elf, 0, sizeof(*elf)); 1437 1438 memcpy(elf->e_ident, ELFMAG, SELFMAG); 1439 elf->e_ident[EI_CLASS] = ELF_CLASS; 1440 elf->e_ident[EI_DATA] = ELF_DATA; 1441 elf->e_ident[EI_VERSION] = EV_CURRENT; 1442 elf->e_ident[EI_OSABI] = ELF_OSABI; 1443 1444 elf->e_type = ET_CORE; 1445 elf->e_machine = machine; 1446 elf->e_version = EV_CURRENT; 1447 elf->e_phoff = sizeof(struct elfhdr); 1448 elf->e_flags = flags; 1449 elf->e_ehsize = sizeof(struct elfhdr); 1450 elf->e_phentsize = sizeof(struct elf_phdr); 1451 elf->e_phnum = segs; 1452 1453 return; 1454 } 1455 1456 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset) 1457 { 1458 phdr->p_type = PT_NOTE; 1459 phdr->p_offset = offset; 1460 phdr->p_vaddr = 0; 1461 phdr->p_paddr = 0; 1462 phdr->p_filesz = sz; 1463 phdr->p_memsz = 0; 1464 phdr->p_flags = 0; 1465 phdr->p_align = 0; 1466 return; 1467 } 1468 1469 static void fill_note(struct memelfnote *note, const char *name, int type, 1470 unsigned int sz, void *data) 1471 { 1472 note->name = name; 1473 note->type = type; 1474 note->datasz = sz; 1475 note->data = data; 1476 return; 1477 } 1478 1479 /* 1480 * fill up all the fields in prstatus from the given task struct, except 1481 * registers which need to be filled up separately. 1482 */ 1483 static void fill_prstatus(struct elf_prstatus *prstatus, 1484 struct task_struct *p, long signr) 1485 { 1486 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; 1487 prstatus->pr_sigpend = p->pending.signal.sig[0]; 1488 prstatus->pr_sighold = p->blocked.sig[0]; 1489 rcu_read_lock(); 1490 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent)); 1491 rcu_read_unlock(); 1492 prstatus->pr_pid = task_pid_vnr(p); 1493 prstatus->pr_pgrp = task_pgrp_vnr(p); 1494 prstatus->pr_sid = task_session_vnr(p); 1495 if (thread_group_leader(p)) { 1496 struct task_cputime cputime; 1497 1498 /* 1499 * This is the record for the group leader. It shows the 1500 * group-wide total, not its individual thread total. 1501 */ 1502 thread_group_cputime(p, &cputime); 1503 prstatus->pr_utime = ns_to_timeval(cputime.utime); 1504 prstatus->pr_stime = ns_to_timeval(cputime.stime); 1505 } else { 1506 u64 utime, stime; 1507 1508 task_cputime(p, &utime, &stime); 1509 prstatus->pr_utime = ns_to_timeval(utime); 1510 prstatus->pr_stime = ns_to_timeval(stime); 1511 } 1512 1513 prstatus->pr_cutime = ns_to_timeval(p->signal->cutime); 1514 prstatus->pr_cstime = ns_to_timeval(p->signal->cstime); 1515 } 1516 1517 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, 1518 struct mm_struct *mm) 1519 { 1520 const struct cred *cred; 1521 unsigned int i, len; 1522 1523 /* first copy the parameters from user space */ 1524 memset(psinfo, 0, sizeof(struct elf_prpsinfo)); 1525 1526 len = mm->arg_end - mm->arg_start; 1527 if (len >= ELF_PRARGSZ) 1528 len = ELF_PRARGSZ-1; 1529 if (copy_from_user(&psinfo->pr_psargs, 1530 (const char __user *)mm->arg_start, len)) 1531 return -EFAULT; 1532 for(i = 0; i < len; i++) 1533 if (psinfo->pr_psargs[i] == 0) 1534 psinfo->pr_psargs[i] = ' '; 1535 psinfo->pr_psargs[len] = 0; 1536 1537 rcu_read_lock(); 1538 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent)); 1539 rcu_read_unlock(); 1540 psinfo->pr_pid = task_pid_vnr(p); 1541 psinfo->pr_pgrp = task_pgrp_vnr(p); 1542 psinfo->pr_sid = task_session_vnr(p); 1543 1544 i = p->state ? ffz(~p->state) + 1 : 0; 1545 psinfo->pr_state = i; 1546 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i]; 1547 psinfo->pr_zomb = psinfo->pr_sname == 'Z'; 1548 psinfo->pr_nice = task_nice(p); 1549 psinfo->pr_flag = p->flags; 1550 rcu_read_lock(); 1551 cred = __task_cred(p); 1552 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid)); 1553 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid)); 1554 rcu_read_unlock(); 1555 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname)); 1556 1557 return 0; 1558 } 1559 1560 static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm) 1561 { 1562 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv; 1563 int i = 0; 1564 do 1565 i += 2; 1566 while (auxv[i - 2] != AT_NULL); 1567 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv); 1568 } 1569 1570 static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata, 1571 const siginfo_t *siginfo) 1572 { 1573 mm_segment_t old_fs = get_fs(); 1574 set_fs(KERNEL_DS); 1575 copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo); 1576 set_fs(old_fs); 1577 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata); 1578 } 1579 1580 #define MAX_FILE_NOTE_SIZE (4*1024*1024) 1581 /* 1582 * Format of NT_FILE note: 1583 * 1584 * long count -- how many files are mapped 1585 * long page_size -- units for file_ofs 1586 * array of [COUNT] elements of 1587 * long start 1588 * long end 1589 * long file_ofs 1590 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL... 1591 */ 1592 static int fill_files_note(struct memelfnote *note) 1593 { 1594 struct vm_area_struct *vma; 1595 unsigned count, size, names_ofs, remaining, n; 1596 user_long_t *data; 1597 user_long_t *start_end_ofs; 1598 char *name_base, *name_curpos; 1599 1600 /* *Estimated* file count and total data size needed */ 1601 count = current->mm->map_count; 1602 if (count > UINT_MAX / 64) 1603 return -EINVAL; 1604 size = count * 64; 1605 1606 names_ofs = (2 + 3 * count) * sizeof(data[0]); 1607 alloc: 1608 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */ 1609 return -EINVAL; 1610 size = round_up(size, PAGE_SIZE); 1611 data = vmalloc(size); 1612 if (!data) 1613 return -ENOMEM; 1614 1615 start_end_ofs = data + 2; 1616 name_base = name_curpos = ((char *)data) + names_ofs; 1617 remaining = size - names_ofs; 1618 count = 0; 1619 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) { 1620 struct file *file; 1621 const char *filename; 1622 1623 file = vma->vm_file; 1624 if (!file) 1625 continue; 1626 filename = file_path(file, name_curpos, remaining); 1627 if (IS_ERR(filename)) { 1628 if (PTR_ERR(filename) == -ENAMETOOLONG) { 1629 vfree(data); 1630 size = size * 5 / 4; 1631 goto alloc; 1632 } 1633 continue; 1634 } 1635 1636 /* file_path() fills at the end, move name down */ 1637 /* n = strlen(filename) + 1: */ 1638 n = (name_curpos + remaining) - filename; 1639 remaining = filename - name_curpos; 1640 memmove(name_curpos, filename, n); 1641 name_curpos += n; 1642 1643 *start_end_ofs++ = vma->vm_start; 1644 *start_end_ofs++ = vma->vm_end; 1645 *start_end_ofs++ = vma->vm_pgoff; 1646 count++; 1647 } 1648 1649 /* Now we know exact count of files, can store it */ 1650 data[0] = count; 1651 data[1] = PAGE_SIZE; 1652 /* 1653 * Count usually is less than current->mm->map_count, 1654 * we need to move filenames down. 1655 */ 1656 n = current->mm->map_count - count; 1657 if (n != 0) { 1658 unsigned shift_bytes = n * 3 * sizeof(data[0]); 1659 memmove(name_base - shift_bytes, name_base, 1660 name_curpos - name_base); 1661 name_curpos -= shift_bytes; 1662 } 1663 1664 size = name_curpos - (char *)data; 1665 fill_note(note, "CORE", NT_FILE, size, data); 1666 return 0; 1667 } 1668 1669 #ifdef CORE_DUMP_USE_REGSET 1670 #include <linux/regset.h> 1671 1672 struct elf_thread_core_info { 1673 struct elf_thread_core_info *next; 1674 struct task_struct *task; 1675 struct elf_prstatus prstatus; 1676 struct memelfnote notes[0]; 1677 }; 1678 1679 struct elf_note_info { 1680 struct elf_thread_core_info *thread; 1681 struct memelfnote psinfo; 1682 struct memelfnote signote; 1683 struct memelfnote auxv; 1684 struct memelfnote files; 1685 user_siginfo_t csigdata; 1686 size_t size; 1687 int thread_notes; 1688 }; 1689 1690 /* 1691 * When a regset has a writeback hook, we call it on each thread before 1692 * dumping user memory. On register window machines, this makes sure the 1693 * user memory backing the register data is up to date before we read it. 1694 */ 1695 static void do_thread_regset_writeback(struct task_struct *task, 1696 const struct user_regset *regset) 1697 { 1698 if (regset->writeback) 1699 regset->writeback(task, regset, 1); 1700 } 1701 1702 #ifndef PRSTATUS_SIZE 1703 #define PRSTATUS_SIZE(S, R) sizeof(S) 1704 #endif 1705 1706 #ifndef SET_PR_FPVALID 1707 #define SET_PR_FPVALID(S, V, R) ((S)->pr_fpvalid = (V)) 1708 #endif 1709 1710 static int fill_thread_core_info(struct elf_thread_core_info *t, 1711 const struct user_regset_view *view, 1712 long signr, size_t *total) 1713 { 1714 unsigned int i; 1715 unsigned int regset0_size = regset_size(t->task, &view->regsets[0]); 1716 1717 /* 1718 * NT_PRSTATUS is the one special case, because the regset data 1719 * goes into the pr_reg field inside the note contents, rather 1720 * than being the whole note contents. We fill the reset in here. 1721 * We assume that regset 0 is NT_PRSTATUS. 1722 */ 1723 fill_prstatus(&t->prstatus, t->task, signr); 1724 (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset0_size, 1725 &t->prstatus.pr_reg, NULL); 1726 1727 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, 1728 PRSTATUS_SIZE(t->prstatus, regset0_size), &t->prstatus); 1729 *total += notesize(&t->notes[0]); 1730 1731 do_thread_regset_writeback(t->task, &view->regsets[0]); 1732 1733 /* 1734 * Each other regset might generate a note too. For each regset 1735 * that has no core_note_type or is inactive, we leave t->notes[i] 1736 * all zero and we'll know to skip writing it later. 1737 */ 1738 for (i = 1; i < view->n; ++i) { 1739 const struct user_regset *regset = &view->regsets[i]; 1740 do_thread_regset_writeback(t->task, regset); 1741 if (regset->core_note_type && regset->get && 1742 (!regset->active || regset->active(t->task, regset))) { 1743 int ret; 1744 size_t size = regset_size(t->task, regset); 1745 void *data = kmalloc(size, GFP_KERNEL); 1746 if (unlikely(!data)) 1747 return 0; 1748 ret = regset->get(t->task, regset, 1749 0, size, data, NULL); 1750 if (unlikely(ret)) 1751 kfree(data); 1752 else { 1753 if (regset->core_note_type != NT_PRFPREG) 1754 fill_note(&t->notes[i], "LINUX", 1755 regset->core_note_type, 1756 size, data); 1757 else { 1758 SET_PR_FPVALID(&t->prstatus, 1759 1, regset0_size); 1760 fill_note(&t->notes[i], "CORE", 1761 NT_PRFPREG, size, data); 1762 } 1763 *total += notesize(&t->notes[i]); 1764 } 1765 } 1766 } 1767 1768 return 1; 1769 } 1770 1771 static int fill_note_info(struct elfhdr *elf, int phdrs, 1772 struct elf_note_info *info, 1773 const siginfo_t *siginfo, struct pt_regs *regs) 1774 { 1775 struct task_struct *dump_task = current; 1776 const struct user_regset_view *view = task_user_regset_view(dump_task); 1777 struct elf_thread_core_info *t; 1778 struct elf_prpsinfo *psinfo; 1779 struct core_thread *ct; 1780 unsigned int i; 1781 1782 info->size = 0; 1783 info->thread = NULL; 1784 1785 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL); 1786 if (psinfo == NULL) { 1787 info->psinfo.data = NULL; /* So we don't free this wrongly */ 1788 return 0; 1789 } 1790 1791 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo); 1792 1793 /* 1794 * Figure out how many notes we're going to need for each thread. 1795 */ 1796 info->thread_notes = 0; 1797 for (i = 0; i < view->n; ++i) 1798 if (view->regsets[i].core_note_type != 0) 1799 ++info->thread_notes; 1800 1801 /* 1802 * Sanity check. We rely on regset 0 being in NT_PRSTATUS, 1803 * since it is our one special case. 1804 */ 1805 if (unlikely(info->thread_notes == 0) || 1806 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) { 1807 WARN_ON(1); 1808 return 0; 1809 } 1810 1811 /* 1812 * Initialize the ELF file header. 1813 */ 1814 fill_elf_header(elf, phdrs, 1815 view->e_machine, view->e_flags); 1816 1817 /* 1818 * Allocate a structure for each thread. 1819 */ 1820 for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) { 1821 t = kzalloc(offsetof(struct elf_thread_core_info, 1822 notes[info->thread_notes]), 1823 GFP_KERNEL); 1824 if (unlikely(!t)) 1825 return 0; 1826 1827 t->task = ct->task; 1828 if (ct->task == dump_task || !info->thread) { 1829 t->next = info->thread; 1830 info->thread = t; 1831 } else { 1832 /* 1833 * Make sure to keep the original task at 1834 * the head of the list. 1835 */ 1836 t->next = info->thread->next; 1837 info->thread->next = t; 1838 } 1839 } 1840 1841 /* 1842 * Now fill in each thread's information. 1843 */ 1844 for (t = info->thread; t != NULL; t = t->next) 1845 if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size)) 1846 return 0; 1847 1848 /* 1849 * Fill in the two process-wide notes. 1850 */ 1851 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm); 1852 info->size += notesize(&info->psinfo); 1853 1854 fill_siginfo_note(&info->signote, &info->csigdata, siginfo); 1855 info->size += notesize(&info->signote); 1856 1857 fill_auxv_note(&info->auxv, current->mm); 1858 info->size += notesize(&info->auxv); 1859 1860 if (fill_files_note(&info->files) == 0) 1861 info->size += notesize(&info->files); 1862 1863 return 1; 1864 } 1865 1866 static size_t get_note_info_size(struct elf_note_info *info) 1867 { 1868 return info->size; 1869 } 1870 1871 /* 1872 * Write all the notes for each thread. When writing the first thread, the 1873 * process-wide notes are interleaved after the first thread-specific note. 1874 */ 1875 static int write_note_info(struct elf_note_info *info, 1876 struct coredump_params *cprm) 1877 { 1878 bool first = true; 1879 struct elf_thread_core_info *t = info->thread; 1880 1881 do { 1882 int i; 1883 1884 if (!writenote(&t->notes[0], cprm)) 1885 return 0; 1886 1887 if (first && !writenote(&info->psinfo, cprm)) 1888 return 0; 1889 if (first && !writenote(&info->signote, cprm)) 1890 return 0; 1891 if (first && !writenote(&info->auxv, cprm)) 1892 return 0; 1893 if (first && info->files.data && 1894 !writenote(&info->files, cprm)) 1895 return 0; 1896 1897 for (i = 1; i < info->thread_notes; ++i) 1898 if (t->notes[i].data && 1899 !writenote(&t->notes[i], cprm)) 1900 return 0; 1901 1902 first = false; 1903 t = t->next; 1904 } while (t); 1905 1906 return 1; 1907 } 1908 1909 static void free_note_info(struct elf_note_info *info) 1910 { 1911 struct elf_thread_core_info *threads = info->thread; 1912 while (threads) { 1913 unsigned int i; 1914 struct elf_thread_core_info *t = threads; 1915 threads = t->next; 1916 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus); 1917 for (i = 1; i < info->thread_notes; ++i) 1918 kfree(t->notes[i].data); 1919 kfree(t); 1920 } 1921 kfree(info->psinfo.data); 1922 vfree(info->files.data); 1923 } 1924 1925 #else 1926 1927 /* Here is the structure in which status of each thread is captured. */ 1928 struct elf_thread_status 1929 { 1930 struct list_head list; 1931 struct elf_prstatus prstatus; /* NT_PRSTATUS */ 1932 elf_fpregset_t fpu; /* NT_PRFPREG */ 1933 struct task_struct *thread; 1934 #ifdef ELF_CORE_COPY_XFPREGS 1935 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */ 1936 #endif 1937 struct memelfnote notes[3]; 1938 int num_notes; 1939 }; 1940 1941 /* 1942 * In order to add the specific thread information for the elf file format, 1943 * we need to keep a linked list of every threads pr_status and then create 1944 * a single section for them in the final core file. 1945 */ 1946 static int elf_dump_thread_status(long signr, struct elf_thread_status *t) 1947 { 1948 int sz = 0; 1949 struct task_struct *p = t->thread; 1950 t->num_notes = 0; 1951 1952 fill_prstatus(&t->prstatus, p, signr); 1953 elf_core_copy_task_regs(p, &t->prstatus.pr_reg); 1954 1955 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), 1956 &(t->prstatus)); 1957 t->num_notes++; 1958 sz += notesize(&t->notes[0]); 1959 1960 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, 1961 &t->fpu))) { 1962 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), 1963 &(t->fpu)); 1964 t->num_notes++; 1965 sz += notesize(&t->notes[1]); 1966 } 1967 1968 #ifdef ELF_CORE_COPY_XFPREGS 1969 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) { 1970 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE, 1971 sizeof(t->xfpu), &t->xfpu); 1972 t->num_notes++; 1973 sz += notesize(&t->notes[2]); 1974 } 1975 #endif 1976 return sz; 1977 } 1978 1979 struct elf_note_info { 1980 struct memelfnote *notes; 1981 struct memelfnote *notes_files; 1982 struct elf_prstatus *prstatus; /* NT_PRSTATUS */ 1983 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */ 1984 struct list_head thread_list; 1985 elf_fpregset_t *fpu; 1986 #ifdef ELF_CORE_COPY_XFPREGS 1987 elf_fpxregset_t *xfpu; 1988 #endif 1989 user_siginfo_t csigdata; 1990 int thread_status_size; 1991 int numnote; 1992 }; 1993 1994 static int elf_note_info_init(struct elf_note_info *info) 1995 { 1996 memset(info, 0, sizeof(*info)); 1997 INIT_LIST_HEAD(&info->thread_list); 1998 1999 /* Allocate space for ELF notes */ 2000 info->notes = kmalloc(8 * sizeof(struct memelfnote), GFP_KERNEL); 2001 if (!info->notes) 2002 return 0; 2003 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL); 2004 if (!info->psinfo) 2005 return 0; 2006 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL); 2007 if (!info->prstatus) 2008 return 0; 2009 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL); 2010 if (!info->fpu) 2011 return 0; 2012 #ifdef ELF_CORE_COPY_XFPREGS 2013 info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL); 2014 if (!info->xfpu) 2015 return 0; 2016 #endif 2017 return 1; 2018 } 2019 2020 static int fill_note_info(struct elfhdr *elf, int phdrs, 2021 struct elf_note_info *info, 2022 const siginfo_t *siginfo, struct pt_regs *regs) 2023 { 2024 struct list_head *t; 2025 struct core_thread *ct; 2026 struct elf_thread_status *ets; 2027 2028 if (!elf_note_info_init(info)) 2029 return 0; 2030 2031 for (ct = current->mm->core_state->dumper.next; 2032 ct; ct = ct->next) { 2033 ets = kzalloc(sizeof(*ets), GFP_KERNEL); 2034 if (!ets) 2035 return 0; 2036 2037 ets->thread = ct->task; 2038 list_add(&ets->list, &info->thread_list); 2039 } 2040 2041 list_for_each(t, &info->thread_list) { 2042 int sz; 2043 2044 ets = list_entry(t, struct elf_thread_status, list); 2045 sz = elf_dump_thread_status(siginfo->si_signo, ets); 2046 info->thread_status_size += sz; 2047 } 2048 /* now collect the dump for the current */ 2049 memset(info->prstatus, 0, sizeof(*info->prstatus)); 2050 fill_prstatus(info->prstatus, current, siginfo->si_signo); 2051 elf_core_copy_regs(&info->prstatus->pr_reg, regs); 2052 2053 /* Set up header */ 2054 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS); 2055 2056 /* 2057 * Set up the notes in similar form to SVR4 core dumps made 2058 * with info from their /proc. 2059 */ 2060 2061 fill_note(info->notes + 0, "CORE", NT_PRSTATUS, 2062 sizeof(*info->prstatus), info->prstatus); 2063 fill_psinfo(info->psinfo, current->group_leader, current->mm); 2064 fill_note(info->notes + 1, "CORE", NT_PRPSINFO, 2065 sizeof(*info->psinfo), info->psinfo); 2066 2067 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo); 2068 fill_auxv_note(info->notes + 3, current->mm); 2069 info->numnote = 4; 2070 2071 if (fill_files_note(info->notes + info->numnote) == 0) { 2072 info->notes_files = info->notes + info->numnote; 2073 info->numnote++; 2074 } 2075 2076 /* Try to dump the FPU. */ 2077 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, 2078 info->fpu); 2079 if (info->prstatus->pr_fpvalid) 2080 fill_note(info->notes + info->numnote++, 2081 "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu); 2082 #ifdef ELF_CORE_COPY_XFPREGS 2083 if (elf_core_copy_task_xfpregs(current, info->xfpu)) 2084 fill_note(info->notes + info->numnote++, 2085 "LINUX", ELF_CORE_XFPREG_TYPE, 2086 sizeof(*info->xfpu), info->xfpu); 2087 #endif 2088 2089 return 1; 2090 } 2091 2092 static size_t get_note_info_size(struct elf_note_info *info) 2093 { 2094 int sz = 0; 2095 int i; 2096 2097 for (i = 0; i < info->numnote; i++) 2098 sz += notesize(info->notes + i); 2099 2100 sz += info->thread_status_size; 2101 2102 return sz; 2103 } 2104 2105 static int write_note_info(struct elf_note_info *info, 2106 struct coredump_params *cprm) 2107 { 2108 int i; 2109 struct list_head *t; 2110 2111 for (i = 0; i < info->numnote; i++) 2112 if (!writenote(info->notes + i, cprm)) 2113 return 0; 2114 2115 /* write out the thread status notes section */ 2116 list_for_each(t, &info->thread_list) { 2117 struct elf_thread_status *tmp = 2118 list_entry(t, struct elf_thread_status, list); 2119 2120 for (i = 0; i < tmp->num_notes; i++) 2121 if (!writenote(&tmp->notes[i], cprm)) 2122 return 0; 2123 } 2124 2125 return 1; 2126 } 2127 2128 static void free_note_info(struct elf_note_info *info) 2129 { 2130 while (!list_empty(&info->thread_list)) { 2131 struct list_head *tmp = info->thread_list.next; 2132 list_del(tmp); 2133 kfree(list_entry(tmp, struct elf_thread_status, list)); 2134 } 2135 2136 /* Free data possibly allocated by fill_files_note(): */ 2137 if (info->notes_files) 2138 vfree(info->notes_files->data); 2139 2140 kfree(info->prstatus); 2141 kfree(info->psinfo); 2142 kfree(info->notes); 2143 kfree(info->fpu); 2144 #ifdef ELF_CORE_COPY_XFPREGS 2145 kfree(info->xfpu); 2146 #endif 2147 } 2148 2149 #endif 2150 2151 static struct vm_area_struct *first_vma(struct task_struct *tsk, 2152 struct vm_area_struct *gate_vma) 2153 { 2154 struct vm_area_struct *ret = tsk->mm->mmap; 2155 2156 if (ret) 2157 return ret; 2158 return gate_vma; 2159 } 2160 /* 2161 * Helper function for iterating across a vma list. It ensures that the caller 2162 * will visit `gate_vma' prior to terminating the search. 2163 */ 2164 static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma, 2165 struct vm_area_struct *gate_vma) 2166 { 2167 struct vm_area_struct *ret; 2168 2169 ret = this_vma->vm_next; 2170 if (ret) 2171 return ret; 2172 if (this_vma == gate_vma) 2173 return NULL; 2174 return gate_vma; 2175 } 2176 2177 static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum, 2178 elf_addr_t e_shoff, int segs) 2179 { 2180 elf->e_shoff = e_shoff; 2181 elf->e_shentsize = sizeof(*shdr4extnum); 2182 elf->e_shnum = 1; 2183 elf->e_shstrndx = SHN_UNDEF; 2184 2185 memset(shdr4extnum, 0, sizeof(*shdr4extnum)); 2186 2187 shdr4extnum->sh_type = SHT_NULL; 2188 shdr4extnum->sh_size = elf->e_shnum; 2189 shdr4extnum->sh_link = elf->e_shstrndx; 2190 shdr4extnum->sh_info = segs; 2191 } 2192 2193 /* 2194 * Actual dumper 2195 * 2196 * This is a two-pass process; first we find the offsets of the bits, 2197 * and then they are actually written out. If we run out of core limit 2198 * we just truncate. 2199 */ 2200 static int elf_core_dump(struct coredump_params *cprm) 2201 { 2202 int has_dumped = 0; 2203 mm_segment_t fs; 2204 int segs, i; 2205 size_t vma_data_size = 0; 2206 struct vm_area_struct *vma, *gate_vma; 2207 struct elfhdr *elf = NULL; 2208 loff_t offset = 0, dataoff; 2209 struct elf_note_info info = { }; 2210 struct elf_phdr *phdr4note = NULL; 2211 struct elf_shdr *shdr4extnum = NULL; 2212 Elf_Half e_phnum; 2213 elf_addr_t e_shoff; 2214 elf_addr_t *vma_filesz = NULL; 2215 2216 /* 2217 * We no longer stop all VM operations. 2218 * 2219 * This is because those proceses that could possibly change map_count 2220 * or the mmap / vma pages are now blocked in do_exit on current 2221 * finishing this core dump. 2222 * 2223 * Only ptrace can touch these memory addresses, but it doesn't change 2224 * the map_count or the pages allocated. So no possibility of crashing 2225 * exists while dumping the mm->vm_next areas to the core file. 2226 */ 2227 2228 /* alloc memory for large data structures: too large to be on stack */ 2229 elf = kmalloc(sizeof(*elf), GFP_KERNEL); 2230 if (!elf) 2231 goto out; 2232 /* 2233 * The number of segs are recored into ELF header as 16bit value. 2234 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here. 2235 */ 2236 segs = current->mm->map_count; 2237 segs += elf_core_extra_phdrs(); 2238 2239 gate_vma = get_gate_vma(current->mm); 2240 if (gate_vma != NULL) 2241 segs++; 2242 2243 /* for notes section */ 2244 segs++; 2245 2246 /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid 2247 * this, kernel supports extended numbering. Have a look at 2248 * include/linux/elf.h for further information. */ 2249 e_phnum = segs > PN_XNUM ? PN_XNUM : segs; 2250 2251 /* 2252 * Collect all the non-memory information about the process for the 2253 * notes. This also sets up the file header. 2254 */ 2255 if (!fill_note_info(elf, e_phnum, &info, cprm->siginfo, cprm->regs)) 2256 goto cleanup; 2257 2258 has_dumped = 1; 2259 2260 fs = get_fs(); 2261 set_fs(KERNEL_DS); 2262 2263 offset += sizeof(*elf); /* Elf header */ 2264 offset += segs * sizeof(struct elf_phdr); /* Program headers */ 2265 2266 /* Write notes phdr entry */ 2267 { 2268 size_t sz = get_note_info_size(&info); 2269 2270 sz += elf_coredump_extra_notes_size(); 2271 2272 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL); 2273 if (!phdr4note) 2274 goto end_coredump; 2275 2276 fill_elf_note_phdr(phdr4note, sz, offset); 2277 offset += sz; 2278 } 2279 2280 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); 2281 2282 if (segs - 1 > ULONG_MAX / sizeof(*vma_filesz)) 2283 goto end_coredump; 2284 vma_filesz = vmalloc((segs - 1) * sizeof(*vma_filesz)); 2285 if (!vma_filesz) 2286 goto end_coredump; 2287 2288 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL; 2289 vma = next_vma(vma, gate_vma)) { 2290 unsigned long dump_size; 2291 2292 dump_size = vma_dump_size(vma, cprm->mm_flags); 2293 vma_filesz[i++] = dump_size; 2294 vma_data_size += dump_size; 2295 } 2296 2297 offset += vma_data_size; 2298 offset += elf_core_extra_data_size(); 2299 e_shoff = offset; 2300 2301 if (e_phnum == PN_XNUM) { 2302 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL); 2303 if (!shdr4extnum) 2304 goto end_coredump; 2305 fill_extnum_info(elf, shdr4extnum, e_shoff, segs); 2306 } 2307 2308 offset = dataoff; 2309 2310 if (!dump_emit(cprm, elf, sizeof(*elf))) 2311 goto end_coredump; 2312 2313 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note))) 2314 goto end_coredump; 2315 2316 /* Write program headers for segments dump */ 2317 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL; 2318 vma = next_vma(vma, gate_vma)) { 2319 struct elf_phdr phdr; 2320 2321 phdr.p_type = PT_LOAD; 2322 phdr.p_offset = offset; 2323 phdr.p_vaddr = vma->vm_start; 2324 phdr.p_paddr = 0; 2325 phdr.p_filesz = vma_filesz[i++]; 2326 phdr.p_memsz = vma->vm_end - vma->vm_start; 2327 offset += phdr.p_filesz; 2328 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; 2329 if (vma->vm_flags & VM_WRITE) 2330 phdr.p_flags |= PF_W; 2331 if (vma->vm_flags & VM_EXEC) 2332 phdr.p_flags |= PF_X; 2333 phdr.p_align = ELF_EXEC_PAGESIZE; 2334 2335 if (!dump_emit(cprm, &phdr, sizeof(phdr))) 2336 goto end_coredump; 2337 } 2338 2339 if (!elf_core_write_extra_phdrs(cprm, offset)) 2340 goto end_coredump; 2341 2342 /* write out the notes section */ 2343 if (!write_note_info(&info, cprm)) 2344 goto end_coredump; 2345 2346 if (elf_coredump_extra_notes_write(cprm)) 2347 goto end_coredump; 2348 2349 /* Align to page */ 2350 if (!dump_skip(cprm, dataoff - cprm->pos)) 2351 goto end_coredump; 2352 2353 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL; 2354 vma = next_vma(vma, gate_vma)) { 2355 unsigned long addr; 2356 unsigned long end; 2357 2358 end = vma->vm_start + vma_filesz[i++]; 2359 2360 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) { 2361 struct page *page; 2362 int stop; 2363 2364 page = get_dump_page(addr); 2365 if (page) { 2366 void *kaddr = kmap(page); 2367 stop = !dump_emit(cprm, kaddr, PAGE_SIZE); 2368 kunmap(page); 2369 put_page(page); 2370 } else 2371 stop = !dump_skip(cprm, PAGE_SIZE); 2372 if (stop) 2373 goto end_coredump; 2374 } 2375 } 2376 dump_truncate(cprm); 2377 2378 if (!elf_core_write_extra_data(cprm)) 2379 goto end_coredump; 2380 2381 if (e_phnum == PN_XNUM) { 2382 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum))) 2383 goto end_coredump; 2384 } 2385 2386 end_coredump: 2387 set_fs(fs); 2388 2389 cleanup: 2390 free_note_info(&info); 2391 kfree(shdr4extnum); 2392 vfree(vma_filesz); 2393 kfree(phdr4note); 2394 kfree(elf); 2395 out: 2396 return has_dumped; 2397 } 2398 2399 #endif /* CONFIG_ELF_CORE */ 2400 2401 static int __init init_elf_binfmt(void) 2402 { 2403 register_binfmt(&elf_format); 2404 return 0; 2405 } 2406 2407 static void __exit exit_elf_binfmt(void) 2408 { 2409 /* Remove the COFF and ELF loaders. */ 2410 unregister_binfmt(&elf_format); 2411 } 2412 2413 core_initcall(init_elf_binfmt); 2414 module_exit(exit_elf_binfmt); 2415 MODULE_LICENSE("GPL"); 2416