1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/binfmt_elf.c 4 * 5 * These are the functions used to load ELF format executables as used 6 * on SVr4 machines. Information on the format may be found in the book 7 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support 8 * Tools". 9 * 10 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com). 11 */ 12 13 #include <linux/module.h> 14 #include <linux/kernel.h> 15 #include <linux/fs.h> 16 #include <linux/mm.h> 17 #include <linux/mman.h> 18 #include <linux/errno.h> 19 #include <linux/signal.h> 20 #include <linux/binfmts.h> 21 #include <linux/string.h> 22 #include <linux/file.h> 23 #include <linux/slab.h> 24 #include <linux/personality.h> 25 #include <linux/elfcore.h> 26 #include <linux/init.h> 27 #include <linux/highuid.h> 28 #include <linux/compiler.h> 29 #include <linux/highmem.h> 30 #include <linux/pagemap.h> 31 #include <linux/vmalloc.h> 32 #include <linux/security.h> 33 #include <linux/random.h> 34 #include <linux/elf.h> 35 #include <linux/elf-randomize.h> 36 #include <linux/utsname.h> 37 #include <linux/coredump.h> 38 #include <linux/sched.h> 39 #include <linux/sched/coredump.h> 40 #include <linux/sched/task_stack.h> 41 #include <linux/sched/cputime.h> 42 #include <linux/cred.h> 43 #include <linux/dax.h> 44 #include <linux/uaccess.h> 45 #include <asm/param.h> 46 #include <asm/page.h> 47 48 #ifndef user_long_t 49 #define user_long_t long 50 #endif 51 #ifndef user_siginfo_t 52 #define user_siginfo_t siginfo_t 53 #endif 54 55 /* That's for binfmt_elf_fdpic to deal with */ 56 #ifndef elf_check_fdpic 57 #define elf_check_fdpic(ex) false 58 #endif 59 60 static int load_elf_binary(struct linux_binprm *bprm); 61 62 #ifdef CONFIG_USELIB 63 static int load_elf_library(struct file *); 64 #else 65 #define load_elf_library NULL 66 #endif 67 68 /* 69 * If we don't support core dumping, then supply a NULL so we 70 * don't even try. 71 */ 72 #ifdef CONFIG_ELF_CORE 73 static int elf_core_dump(struct coredump_params *cprm); 74 #else 75 #define elf_core_dump NULL 76 #endif 77 78 #if ELF_EXEC_PAGESIZE > PAGE_SIZE 79 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE 80 #else 81 #define ELF_MIN_ALIGN PAGE_SIZE 82 #endif 83 84 #ifndef ELF_CORE_EFLAGS 85 #define ELF_CORE_EFLAGS 0 86 #endif 87 88 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1)) 89 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1)) 90 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1)) 91 92 static struct linux_binfmt elf_format = { 93 .module = THIS_MODULE, 94 .load_binary = load_elf_binary, 95 .load_shlib = load_elf_library, 96 .core_dump = elf_core_dump, 97 .min_coredump = ELF_EXEC_PAGESIZE, 98 }; 99 100 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE) 101 102 static int set_brk(unsigned long start, unsigned long end, int prot) 103 { 104 start = ELF_PAGEALIGN(start); 105 end = ELF_PAGEALIGN(end); 106 if (end > start) { 107 /* 108 * Map the last of the bss segment. 109 * If the header is requesting these pages to be 110 * executable, honour that (ppc32 needs this). 111 */ 112 int error = vm_brk_flags(start, end - start, 113 prot & PROT_EXEC ? VM_EXEC : 0); 114 if (error) 115 return error; 116 } 117 current->mm->start_brk = current->mm->brk = end; 118 return 0; 119 } 120 121 /* We need to explicitly zero any fractional pages 122 after the data section (i.e. bss). This would 123 contain the junk from the file that should not 124 be in memory 125 */ 126 static int padzero(unsigned long elf_bss) 127 { 128 unsigned long nbyte; 129 130 nbyte = ELF_PAGEOFFSET(elf_bss); 131 if (nbyte) { 132 nbyte = ELF_MIN_ALIGN - nbyte; 133 if (clear_user((void __user *) elf_bss, nbyte)) 134 return -EFAULT; 135 } 136 return 0; 137 } 138 139 /* Let's use some macros to make this stack manipulation a little clearer */ 140 #ifdef CONFIG_STACK_GROWSUP 141 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items)) 142 #define STACK_ROUND(sp, items) \ 143 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL) 144 #define STACK_ALLOC(sp, len) ({ \ 145 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \ 146 old_sp; }) 147 #else 148 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items)) 149 #define STACK_ROUND(sp, items) \ 150 (((unsigned long) (sp - items)) &~ 15UL) 151 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; }) 152 #endif 153 154 #ifndef ELF_BASE_PLATFORM 155 /* 156 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture. 157 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value 158 * will be copied to the user stack in the same manner as AT_PLATFORM. 159 */ 160 #define ELF_BASE_PLATFORM NULL 161 #endif 162 163 static int 164 create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, 165 unsigned long load_addr, unsigned long interp_load_addr) 166 { 167 unsigned long p = bprm->p; 168 int argc = bprm->argc; 169 int envc = bprm->envc; 170 elf_addr_t __user *sp; 171 elf_addr_t __user *u_platform; 172 elf_addr_t __user *u_base_platform; 173 elf_addr_t __user *u_rand_bytes; 174 const char *k_platform = ELF_PLATFORM; 175 const char *k_base_platform = ELF_BASE_PLATFORM; 176 unsigned char k_rand_bytes[16]; 177 int items; 178 elf_addr_t *elf_info; 179 int ei_index = 0; 180 const struct cred *cred = current_cred(); 181 struct vm_area_struct *vma; 182 183 /* 184 * In some cases (e.g. Hyper-Threading), we want to avoid L1 185 * evictions by the processes running on the same package. One 186 * thing we can do is to shuffle the initial stack for them. 187 */ 188 189 p = arch_align_stack(p); 190 191 /* 192 * If this architecture has a platform capability string, copy it 193 * to userspace. In some cases (Sparc), this info is impossible 194 * for userspace to get any other way, in others (i386) it is 195 * merely difficult. 196 */ 197 u_platform = NULL; 198 if (k_platform) { 199 size_t len = strlen(k_platform) + 1; 200 201 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); 202 if (__copy_to_user(u_platform, k_platform, len)) 203 return -EFAULT; 204 } 205 206 /* 207 * If this architecture has a "base" platform capability 208 * string, copy it to userspace. 209 */ 210 u_base_platform = NULL; 211 if (k_base_platform) { 212 size_t len = strlen(k_base_platform) + 1; 213 214 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); 215 if (__copy_to_user(u_base_platform, k_base_platform, len)) 216 return -EFAULT; 217 } 218 219 /* 220 * Generate 16 random bytes for userspace PRNG seeding. 221 */ 222 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes)); 223 u_rand_bytes = (elf_addr_t __user *) 224 STACK_ALLOC(p, sizeof(k_rand_bytes)); 225 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes))) 226 return -EFAULT; 227 228 /* Create the ELF interpreter info */ 229 elf_info = (elf_addr_t *)current->mm->saved_auxv; 230 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */ 231 #define NEW_AUX_ENT(id, val) \ 232 do { \ 233 elf_info[ei_index++] = id; \ 234 elf_info[ei_index++] = val; \ 235 } while (0) 236 237 #ifdef ARCH_DLINFO 238 /* 239 * ARCH_DLINFO must come first so PPC can do its special alignment of 240 * AUXV. 241 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in 242 * ARCH_DLINFO changes 243 */ 244 ARCH_DLINFO; 245 #endif 246 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP); 247 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE); 248 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC); 249 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff); 250 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr)); 251 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum); 252 NEW_AUX_ENT(AT_BASE, interp_load_addr); 253 NEW_AUX_ENT(AT_FLAGS, 0); 254 NEW_AUX_ENT(AT_ENTRY, exec->e_entry); 255 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid)); 256 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid)); 257 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid)); 258 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid)); 259 NEW_AUX_ENT(AT_SECURE, bprm->secureexec); 260 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes); 261 #ifdef ELF_HWCAP2 262 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2); 263 #endif 264 NEW_AUX_ENT(AT_EXECFN, bprm->exec); 265 if (k_platform) { 266 NEW_AUX_ENT(AT_PLATFORM, 267 (elf_addr_t)(unsigned long)u_platform); 268 } 269 if (k_base_platform) { 270 NEW_AUX_ENT(AT_BASE_PLATFORM, 271 (elf_addr_t)(unsigned long)u_base_platform); 272 } 273 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) { 274 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data); 275 } 276 #undef NEW_AUX_ENT 277 /* AT_NULL is zero; clear the rest too */ 278 memset(&elf_info[ei_index], 0, 279 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]); 280 281 /* And advance past the AT_NULL entry. */ 282 ei_index += 2; 283 284 sp = STACK_ADD(p, ei_index); 285 286 items = (argc + 1) + (envc + 1) + 1; 287 bprm->p = STACK_ROUND(sp, items); 288 289 /* Point sp at the lowest address on the stack */ 290 #ifdef CONFIG_STACK_GROWSUP 291 sp = (elf_addr_t __user *)bprm->p - items - ei_index; 292 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */ 293 #else 294 sp = (elf_addr_t __user *)bprm->p; 295 #endif 296 297 298 /* 299 * Grow the stack manually; some architectures have a limit on how 300 * far ahead a user-space access may be in order to grow the stack. 301 */ 302 vma = find_extend_vma(current->mm, bprm->p); 303 if (!vma) 304 return -EFAULT; 305 306 /* Now, let's put argc (and argv, envp if appropriate) on the stack */ 307 if (__put_user(argc, sp++)) 308 return -EFAULT; 309 310 /* Populate list of argv pointers back to argv strings. */ 311 p = current->mm->arg_end = current->mm->arg_start; 312 while (argc-- > 0) { 313 size_t len; 314 if (__put_user((elf_addr_t)p, sp++)) 315 return -EFAULT; 316 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN); 317 if (!len || len > MAX_ARG_STRLEN) 318 return -EINVAL; 319 p += len; 320 } 321 if (__put_user(0, sp++)) 322 return -EFAULT; 323 current->mm->arg_end = p; 324 325 /* Populate list of envp pointers back to envp strings. */ 326 current->mm->env_end = current->mm->env_start = p; 327 while (envc-- > 0) { 328 size_t len; 329 if (__put_user((elf_addr_t)p, sp++)) 330 return -EFAULT; 331 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN); 332 if (!len || len > MAX_ARG_STRLEN) 333 return -EINVAL; 334 p += len; 335 } 336 if (__put_user(0, sp++)) 337 return -EFAULT; 338 current->mm->env_end = p; 339 340 /* Put the elf_info on the stack in the right place. */ 341 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t))) 342 return -EFAULT; 343 return 0; 344 } 345 346 #ifndef elf_map 347 348 static unsigned long elf_map(struct file *filep, unsigned long addr, 349 const struct elf_phdr *eppnt, int prot, int type, 350 unsigned long total_size) 351 { 352 unsigned long map_addr; 353 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr); 354 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr); 355 addr = ELF_PAGESTART(addr); 356 size = ELF_PAGEALIGN(size); 357 358 /* mmap() will return -EINVAL if given a zero size, but a 359 * segment with zero filesize is perfectly valid */ 360 if (!size) 361 return addr; 362 363 /* 364 * total_size is the size of the ELF (interpreter) image. 365 * The _first_ mmap needs to know the full size, otherwise 366 * randomization might put this image into an overlapping 367 * position with the ELF binary image. (since size < total_size) 368 * So we first map the 'big' image - and unmap the remainder at 369 * the end. (which unmap is needed for ELF images with holes.) 370 */ 371 if (total_size) { 372 total_size = ELF_PAGEALIGN(total_size); 373 map_addr = vm_mmap(filep, addr, total_size, prot, type, off); 374 if (!BAD_ADDR(map_addr)) 375 vm_munmap(map_addr+size, total_size-size); 376 } else 377 map_addr = vm_mmap(filep, addr, size, prot, type, off); 378 379 if ((type & MAP_FIXED_NOREPLACE) && 380 PTR_ERR((void *)map_addr) == -EEXIST) 381 pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n", 382 task_pid_nr(current), current->comm, (void *)addr); 383 384 return(map_addr); 385 } 386 387 #endif /* !elf_map */ 388 389 static unsigned long total_mapping_size(const struct elf_phdr *cmds, int nr) 390 { 391 int i, first_idx = -1, last_idx = -1; 392 393 for (i = 0; i < nr; i++) { 394 if (cmds[i].p_type == PT_LOAD) { 395 last_idx = i; 396 if (first_idx == -1) 397 first_idx = i; 398 } 399 } 400 if (first_idx == -1) 401 return 0; 402 403 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz - 404 ELF_PAGESTART(cmds[first_idx].p_vaddr); 405 } 406 407 /** 408 * load_elf_phdrs() - load ELF program headers 409 * @elf_ex: ELF header of the binary whose program headers should be loaded 410 * @elf_file: the opened ELF binary file 411 * 412 * Loads ELF program headers from the binary file elf_file, which has the ELF 413 * header pointed to by elf_ex, into a newly allocated array. The caller is 414 * responsible for freeing the allocated data. Returns an ERR_PTR upon failure. 415 */ 416 static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex, 417 struct file *elf_file) 418 { 419 struct elf_phdr *elf_phdata = NULL; 420 int retval, err = -1; 421 loff_t pos = elf_ex->e_phoff; 422 unsigned int size; 423 424 /* 425 * If the size of this structure has changed, then punt, since 426 * we will be doing the wrong thing. 427 */ 428 if (elf_ex->e_phentsize != sizeof(struct elf_phdr)) 429 goto out; 430 431 /* Sanity check the number of program headers... */ 432 /* ...and their total size. */ 433 size = sizeof(struct elf_phdr) * elf_ex->e_phnum; 434 if (size == 0 || size > 65536 || size > ELF_MIN_ALIGN) 435 goto out; 436 437 elf_phdata = kmalloc(size, GFP_KERNEL); 438 if (!elf_phdata) 439 goto out; 440 441 /* Read in the program headers */ 442 retval = kernel_read(elf_file, elf_phdata, size, &pos); 443 if (retval != size) { 444 err = (retval < 0) ? retval : -EIO; 445 goto out; 446 } 447 448 /* Success! */ 449 err = 0; 450 out: 451 if (err) { 452 kfree(elf_phdata); 453 elf_phdata = NULL; 454 } 455 return elf_phdata; 456 } 457 458 #ifndef CONFIG_ARCH_BINFMT_ELF_STATE 459 460 /** 461 * struct arch_elf_state - arch-specific ELF loading state 462 * 463 * This structure is used to preserve architecture specific data during 464 * the loading of an ELF file, throughout the checking of architecture 465 * specific ELF headers & through to the point where the ELF load is 466 * known to be proceeding (ie. SET_PERSONALITY). 467 * 468 * This implementation is a dummy for architectures which require no 469 * specific state. 470 */ 471 struct arch_elf_state { 472 }; 473 474 #define INIT_ARCH_ELF_STATE {} 475 476 /** 477 * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header 478 * @ehdr: The main ELF header 479 * @phdr: The program header to check 480 * @elf: The open ELF file 481 * @is_interp: True if the phdr is from the interpreter of the ELF being 482 * loaded, else false. 483 * @state: Architecture-specific state preserved throughout the process 484 * of loading the ELF. 485 * 486 * Inspects the program header phdr to validate its correctness and/or 487 * suitability for the system. Called once per ELF program header in the 488 * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its 489 * interpreter. 490 * 491 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load 492 * with that return code. 493 */ 494 static inline int arch_elf_pt_proc(struct elfhdr *ehdr, 495 struct elf_phdr *phdr, 496 struct file *elf, bool is_interp, 497 struct arch_elf_state *state) 498 { 499 /* Dummy implementation, always proceed */ 500 return 0; 501 } 502 503 /** 504 * arch_check_elf() - check an ELF executable 505 * @ehdr: The main ELF header 506 * @has_interp: True if the ELF has an interpreter, else false. 507 * @interp_ehdr: The interpreter's ELF header 508 * @state: Architecture-specific state preserved throughout the process 509 * of loading the ELF. 510 * 511 * Provides a final opportunity for architecture code to reject the loading 512 * of the ELF & cause an exec syscall to return an error. This is called after 513 * all program headers to be checked by arch_elf_pt_proc have been. 514 * 515 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load 516 * with that return code. 517 */ 518 static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp, 519 struct elfhdr *interp_ehdr, 520 struct arch_elf_state *state) 521 { 522 /* Dummy implementation, always proceed */ 523 return 0; 524 } 525 526 #endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */ 527 528 static inline int make_prot(u32 p_flags) 529 { 530 int prot = 0; 531 532 if (p_flags & PF_R) 533 prot |= PROT_READ; 534 if (p_flags & PF_W) 535 prot |= PROT_WRITE; 536 if (p_flags & PF_X) 537 prot |= PROT_EXEC; 538 return prot; 539 } 540 541 /* This is much more generalized than the library routine read function, 542 so we keep this separate. Technically the library read function 543 is only provided so that we can read a.out libraries that have 544 an ELF header */ 545 546 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, 547 struct file *interpreter, unsigned long *interp_map_addr, 548 unsigned long no_base, struct elf_phdr *interp_elf_phdata) 549 { 550 struct elf_phdr *eppnt; 551 unsigned long load_addr = 0; 552 int load_addr_set = 0; 553 unsigned long last_bss = 0, elf_bss = 0; 554 int bss_prot = 0; 555 unsigned long error = ~0UL; 556 unsigned long total_size; 557 int i; 558 559 /* First of all, some simple consistency checks */ 560 if (interp_elf_ex->e_type != ET_EXEC && 561 interp_elf_ex->e_type != ET_DYN) 562 goto out; 563 if (!elf_check_arch(interp_elf_ex) || 564 elf_check_fdpic(interp_elf_ex)) 565 goto out; 566 if (!interpreter->f_op->mmap) 567 goto out; 568 569 total_size = total_mapping_size(interp_elf_phdata, 570 interp_elf_ex->e_phnum); 571 if (!total_size) { 572 error = -EINVAL; 573 goto out; 574 } 575 576 eppnt = interp_elf_phdata; 577 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { 578 if (eppnt->p_type == PT_LOAD) { 579 int elf_type = MAP_PRIVATE | MAP_DENYWRITE; 580 int elf_prot = make_prot(eppnt->p_flags); 581 unsigned long vaddr = 0; 582 unsigned long k, map_addr; 583 584 vaddr = eppnt->p_vaddr; 585 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) 586 elf_type |= MAP_FIXED_NOREPLACE; 587 else if (no_base && interp_elf_ex->e_type == ET_DYN) 588 load_addr = -vaddr; 589 590 map_addr = elf_map(interpreter, load_addr + vaddr, 591 eppnt, elf_prot, elf_type, total_size); 592 total_size = 0; 593 if (!*interp_map_addr) 594 *interp_map_addr = map_addr; 595 error = map_addr; 596 if (BAD_ADDR(map_addr)) 597 goto out; 598 599 if (!load_addr_set && 600 interp_elf_ex->e_type == ET_DYN) { 601 load_addr = map_addr - ELF_PAGESTART(vaddr); 602 load_addr_set = 1; 603 } 604 605 /* 606 * Check to see if the section's size will overflow the 607 * allowed task size. Note that p_filesz must always be 608 * <= p_memsize so it's only necessary to check p_memsz. 609 */ 610 k = load_addr + eppnt->p_vaddr; 611 if (BAD_ADDR(k) || 612 eppnt->p_filesz > eppnt->p_memsz || 613 eppnt->p_memsz > TASK_SIZE || 614 TASK_SIZE - eppnt->p_memsz < k) { 615 error = -ENOMEM; 616 goto out; 617 } 618 619 /* 620 * Find the end of the file mapping for this phdr, and 621 * keep track of the largest address we see for this. 622 */ 623 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz; 624 if (k > elf_bss) 625 elf_bss = k; 626 627 /* 628 * Do the same thing for the memory mapping - between 629 * elf_bss and last_bss is the bss section. 630 */ 631 k = load_addr + eppnt->p_vaddr + eppnt->p_memsz; 632 if (k > last_bss) { 633 last_bss = k; 634 bss_prot = elf_prot; 635 } 636 } 637 } 638 639 /* 640 * Now fill out the bss section: first pad the last page from 641 * the file up to the page boundary, and zero it from elf_bss 642 * up to the end of the page. 643 */ 644 if (padzero(elf_bss)) { 645 error = -EFAULT; 646 goto out; 647 } 648 /* 649 * Next, align both the file and mem bss up to the page size, 650 * since this is where elf_bss was just zeroed up to, and where 651 * last_bss will end after the vm_brk_flags() below. 652 */ 653 elf_bss = ELF_PAGEALIGN(elf_bss); 654 last_bss = ELF_PAGEALIGN(last_bss); 655 /* Finally, if there is still more bss to allocate, do it. */ 656 if (last_bss > elf_bss) { 657 error = vm_brk_flags(elf_bss, last_bss - elf_bss, 658 bss_prot & PROT_EXEC ? VM_EXEC : 0); 659 if (error) 660 goto out; 661 } 662 663 error = load_addr; 664 out: 665 return error; 666 } 667 668 /* 669 * These are the functions used to load ELF style executables and shared 670 * libraries. There is no binary dependent code anywhere else. 671 */ 672 673 static int load_elf_binary(struct linux_binprm *bprm) 674 { 675 struct file *interpreter = NULL; /* to shut gcc up */ 676 unsigned long load_addr = 0, load_bias = 0; 677 int load_addr_set = 0; 678 unsigned long error; 679 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL; 680 unsigned long elf_bss, elf_brk; 681 int bss_prot = 0; 682 int retval, i; 683 unsigned long elf_entry; 684 unsigned long interp_load_addr = 0; 685 unsigned long start_code, end_code, start_data, end_data; 686 unsigned long reloc_func_desc __maybe_unused = 0; 687 int executable_stack = EXSTACK_DEFAULT; 688 struct { 689 struct elfhdr elf_ex; 690 struct elfhdr interp_elf_ex; 691 } *loc; 692 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE; 693 struct pt_regs *regs; 694 695 loc = kmalloc(sizeof(*loc), GFP_KERNEL); 696 if (!loc) { 697 retval = -ENOMEM; 698 goto out_ret; 699 } 700 701 /* Get the exec-header */ 702 loc->elf_ex = *((struct elfhdr *)bprm->buf); 703 704 retval = -ENOEXEC; 705 /* First of all, some simple consistency checks */ 706 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0) 707 goto out; 708 709 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN) 710 goto out; 711 if (!elf_check_arch(&loc->elf_ex)) 712 goto out; 713 if (elf_check_fdpic(&loc->elf_ex)) 714 goto out; 715 if (!bprm->file->f_op->mmap) 716 goto out; 717 718 elf_phdata = load_elf_phdrs(&loc->elf_ex, bprm->file); 719 if (!elf_phdata) 720 goto out; 721 722 elf_ppnt = elf_phdata; 723 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { 724 char *elf_interpreter; 725 loff_t pos; 726 727 if (elf_ppnt->p_type != PT_INTERP) 728 continue; 729 730 /* 731 * This is the program interpreter used for shared libraries - 732 * for now assume that this is an a.out format binary. 733 */ 734 retval = -ENOEXEC; 735 if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2) 736 goto out_free_ph; 737 738 retval = -ENOMEM; 739 elf_interpreter = kmalloc(elf_ppnt->p_filesz, GFP_KERNEL); 740 if (!elf_interpreter) 741 goto out_free_ph; 742 743 pos = elf_ppnt->p_offset; 744 retval = kernel_read(bprm->file, elf_interpreter, 745 elf_ppnt->p_filesz, &pos); 746 if (retval != elf_ppnt->p_filesz) { 747 if (retval >= 0) 748 retval = -EIO; 749 goto out_free_interp; 750 } 751 /* make sure path is NULL terminated */ 752 retval = -ENOEXEC; 753 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0') 754 goto out_free_interp; 755 756 interpreter = open_exec(elf_interpreter); 757 kfree(elf_interpreter); 758 retval = PTR_ERR(interpreter); 759 if (IS_ERR(interpreter)) 760 goto out_free_ph; 761 762 /* 763 * If the binary is not readable then enforce mm->dumpable = 0 764 * regardless of the interpreter's permissions. 765 */ 766 would_dump(bprm, interpreter); 767 768 /* Get the exec headers */ 769 pos = 0; 770 retval = kernel_read(interpreter, &loc->interp_elf_ex, 771 sizeof(loc->interp_elf_ex), &pos); 772 if (retval != sizeof(loc->interp_elf_ex)) { 773 if (retval >= 0) 774 retval = -EIO; 775 goto out_free_dentry; 776 } 777 778 break; 779 780 out_free_interp: 781 kfree(elf_interpreter); 782 goto out_free_ph; 783 } 784 785 elf_ppnt = elf_phdata; 786 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) 787 switch (elf_ppnt->p_type) { 788 case PT_GNU_STACK: 789 if (elf_ppnt->p_flags & PF_X) 790 executable_stack = EXSTACK_ENABLE_X; 791 else 792 executable_stack = EXSTACK_DISABLE_X; 793 break; 794 795 case PT_LOPROC ... PT_HIPROC: 796 retval = arch_elf_pt_proc(&loc->elf_ex, elf_ppnt, 797 bprm->file, false, 798 &arch_state); 799 if (retval) 800 goto out_free_dentry; 801 break; 802 } 803 804 /* Some simple consistency checks for the interpreter */ 805 if (interpreter) { 806 retval = -ELIBBAD; 807 /* Not an ELF interpreter */ 808 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0) 809 goto out_free_dentry; 810 /* Verify the interpreter has a valid arch */ 811 if (!elf_check_arch(&loc->interp_elf_ex) || 812 elf_check_fdpic(&loc->interp_elf_ex)) 813 goto out_free_dentry; 814 815 /* Load the interpreter program headers */ 816 interp_elf_phdata = load_elf_phdrs(&loc->interp_elf_ex, 817 interpreter); 818 if (!interp_elf_phdata) 819 goto out_free_dentry; 820 821 /* Pass PT_LOPROC..PT_HIPROC headers to arch code */ 822 elf_ppnt = interp_elf_phdata; 823 for (i = 0; i < loc->interp_elf_ex.e_phnum; i++, elf_ppnt++) 824 switch (elf_ppnt->p_type) { 825 case PT_LOPROC ... PT_HIPROC: 826 retval = arch_elf_pt_proc(&loc->interp_elf_ex, 827 elf_ppnt, interpreter, 828 true, &arch_state); 829 if (retval) 830 goto out_free_dentry; 831 break; 832 } 833 } 834 835 /* 836 * Allow arch code to reject the ELF at this point, whilst it's 837 * still possible to return an error to the code that invoked 838 * the exec syscall. 839 */ 840 retval = arch_check_elf(&loc->elf_ex, 841 !!interpreter, &loc->interp_elf_ex, 842 &arch_state); 843 if (retval) 844 goto out_free_dentry; 845 846 /* Flush all traces of the currently running executable */ 847 retval = flush_old_exec(bprm); 848 if (retval) 849 goto out_free_dentry; 850 851 /* Do this immediately, since STACK_TOP as used in setup_arg_pages 852 may depend on the personality. */ 853 SET_PERSONALITY2(loc->elf_ex, &arch_state); 854 if (elf_read_implies_exec(loc->elf_ex, executable_stack)) 855 current->personality |= READ_IMPLIES_EXEC; 856 857 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 858 current->flags |= PF_RANDOMIZE; 859 860 setup_new_exec(bprm); 861 install_exec_creds(bprm); 862 863 /* Do this so that we can load the interpreter, if need be. We will 864 change some of these later */ 865 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP), 866 executable_stack); 867 if (retval < 0) 868 goto out_free_dentry; 869 870 elf_bss = 0; 871 elf_brk = 0; 872 873 start_code = ~0UL; 874 end_code = 0; 875 start_data = 0; 876 end_data = 0; 877 878 /* Now we do a little grungy work by mmapping the ELF image into 879 the correct location in memory. */ 880 for(i = 0, elf_ppnt = elf_phdata; 881 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { 882 int elf_prot, elf_flags, elf_fixed = MAP_FIXED_NOREPLACE; 883 unsigned long k, vaddr; 884 unsigned long total_size = 0; 885 886 if (elf_ppnt->p_type != PT_LOAD) 887 continue; 888 889 if (unlikely (elf_brk > elf_bss)) { 890 unsigned long nbyte; 891 892 /* There was a PT_LOAD segment with p_memsz > p_filesz 893 before this one. Map anonymous pages, if needed, 894 and clear the area. */ 895 retval = set_brk(elf_bss + load_bias, 896 elf_brk + load_bias, 897 bss_prot); 898 if (retval) 899 goto out_free_dentry; 900 nbyte = ELF_PAGEOFFSET(elf_bss); 901 if (nbyte) { 902 nbyte = ELF_MIN_ALIGN - nbyte; 903 if (nbyte > elf_brk - elf_bss) 904 nbyte = elf_brk - elf_bss; 905 if (clear_user((void __user *)elf_bss + 906 load_bias, nbyte)) { 907 /* 908 * This bss-zeroing can fail if the ELF 909 * file specifies odd protections. So 910 * we don't check the return value 911 */ 912 } 913 } 914 915 /* 916 * Some binaries have overlapping elf segments and then 917 * we have to forcefully map over an existing mapping 918 * e.g. over this newly established brk mapping. 919 */ 920 elf_fixed = MAP_FIXED; 921 } 922 923 elf_prot = make_prot(elf_ppnt->p_flags); 924 925 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE; 926 927 vaddr = elf_ppnt->p_vaddr; 928 /* 929 * If we are loading ET_EXEC or we have already performed 930 * the ET_DYN load_addr calculations, proceed normally. 931 */ 932 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) { 933 elf_flags |= elf_fixed; 934 } else if (loc->elf_ex.e_type == ET_DYN) { 935 /* 936 * This logic is run once for the first LOAD Program 937 * Header for ET_DYN binaries to calculate the 938 * randomization (load_bias) for all the LOAD 939 * Program Headers, and to calculate the entire 940 * size of the ELF mapping (total_size). (Note that 941 * load_addr_set is set to true later once the 942 * initial mapping is performed.) 943 * 944 * There are effectively two types of ET_DYN 945 * binaries: programs (i.e. PIE: ET_DYN with INTERP) 946 * and loaders (ET_DYN without INTERP, since they 947 * _are_ the ELF interpreter). The loaders must 948 * be loaded away from programs since the program 949 * may otherwise collide with the loader (especially 950 * for ET_EXEC which does not have a randomized 951 * position). For example to handle invocations of 952 * "./ld.so someprog" to test out a new version of 953 * the loader, the subsequent program that the 954 * loader loads must avoid the loader itself, so 955 * they cannot share the same load range. Sufficient 956 * room for the brk must be allocated with the 957 * loader as well, since brk must be available with 958 * the loader. 959 * 960 * Therefore, programs are loaded offset from 961 * ELF_ET_DYN_BASE and loaders are loaded into the 962 * independently randomized mmap region (0 load_bias 963 * without MAP_FIXED). 964 */ 965 if (interpreter) { 966 load_bias = ELF_ET_DYN_BASE; 967 if (current->flags & PF_RANDOMIZE) 968 load_bias += arch_mmap_rnd(); 969 elf_flags |= elf_fixed; 970 } else 971 load_bias = 0; 972 973 /* 974 * Since load_bias is used for all subsequent loading 975 * calculations, we must lower it by the first vaddr 976 * so that the remaining calculations based on the 977 * ELF vaddrs will be correctly offset. The result 978 * is then page aligned. 979 */ 980 load_bias = ELF_PAGESTART(load_bias - vaddr); 981 982 total_size = total_mapping_size(elf_phdata, 983 loc->elf_ex.e_phnum); 984 if (!total_size) { 985 retval = -EINVAL; 986 goto out_free_dentry; 987 } 988 } 989 990 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, 991 elf_prot, elf_flags, total_size); 992 if (BAD_ADDR(error)) { 993 retval = IS_ERR((void *)error) ? 994 PTR_ERR((void*)error) : -EINVAL; 995 goto out_free_dentry; 996 } 997 998 if (!load_addr_set) { 999 load_addr_set = 1; 1000 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset); 1001 if (loc->elf_ex.e_type == ET_DYN) { 1002 load_bias += error - 1003 ELF_PAGESTART(load_bias + vaddr); 1004 load_addr += load_bias; 1005 reloc_func_desc = load_bias; 1006 } 1007 } 1008 k = elf_ppnt->p_vaddr; 1009 if (k < start_code) 1010 start_code = k; 1011 if (start_data < k) 1012 start_data = k; 1013 1014 /* 1015 * Check to see if the section's size will overflow the 1016 * allowed task size. Note that p_filesz must always be 1017 * <= p_memsz so it is only necessary to check p_memsz. 1018 */ 1019 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz || 1020 elf_ppnt->p_memsz > TASK_SIZE || 1021 TASK_SIZE - elf_ppnt->p_memsz < k) { 1022 /* set_brk can never work. Avoid overflows. */ 1023 retval = -EINVAL; 1024 goto out_free_dentry; 1025 } 1026 1027 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; 1028 1029 if (k > elf_bss) 1030 elf_bss = k; 1031 if ((elf_ppnt->p_flags & PF_X) && end_code < k) 1032 end_code = k; 1033 if (end_data < k) 1034 end_data = k; 1035 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; 1036 if (k > elf_brk) { 1037 bss_prot = elf_prot; 1038 elf_brk = k; 1039 } 1040 } 1041 1042 loc->elf_ex.e_entry += load_bias; 1043 elf_bss += load_bias; 1044 elf_brk += load_bias; 1045 start_code += load_bias; 1046 end_code += load_bias; 1047 start_data += load_bias; 1048 end_data += load_bias; 1049 1050 /* Calling set_brk effectively mmaps the pages that we need 1051 * for the bss and break sections. We must do this before 1052 * mapping in the interpreter, to make sure it doesn't wind 1053 * up getting placed where the bss needs to go. 1054 */ 1055 retval = set_brk(elf_bss, elf_brk, bss_prot); 1056 if (retval) 1057 goto out_free_dentry; 1058 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) { 1059 retval = -EFAULT; /* Nobody gets to see this, but.. */ 1060 goto out_free_dentry; 1061 } 1062 1063 if (interpreter) { 1064 unsigned long interp_map_addr = 0; 1065 1066 elf_entry = load_elf_interp(&loc->interp_elf_ex, 1067 interpreter, 1068 &interp_map_addr, 1069 load_bias, interp_elf_phdata); 1070 if (!IS_ERR((void *)elf_entry)) { 1071 /* 1072 * load_elf_interp() returns relocation 1073 * adjustment 1074 */ 1075 interp_load_addr = elf_entry; 1076 elf_entry += loc->interp_elf_ex.e_entry; 1077 } 1078 if (BAD_ADDR(elf_entry)) { 1079 retval = IS_ERR((void *)elf_entry) ? 1080 (int)elf_entry : -EINVAL; 1081 goto out_free_dentry; 1082 } 1083 reloc_func_desc = interp_load_addr; 1084 1085 allow_write_access(interpreter); 1086 fput(interpreter); 1087 } else { 1088 elf_entry = loc->elf_ex.e_entry; 1089 if (BAD_ADDR(elf_entry)) { 1090 retval = -EINVAL; 1091 goto out_free_dentry; 1092 } 1093 } 1094 1095 kfree(interp_elf_phdata); 1096 kfree(elf_phdata); 1097 1098 set_binfmt(&elf_format); 1099 1100 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES 1101 retval = arch_setup_additional_pages(bprm, !!interpreter); 1102 if (retval < 0) 1103 goto out; 1104 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */ 1105 1106 retval = create_elf_tables(bprm, &loc->elf_ex, 1107 load_addr, interp_load_addr); 1108 if (retval < 0) 1109 goto out; 1110 current->mm->end_code = end_code; 1111 current->mm->start_code = start_code; 1112 current->mm->start_data = start_data; 1113 current->mm->end_data = end_data; 1114 current->mm->start_stack = bprm->p; 1115 1116 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) { 1117 /* 1118 * For architectures with ELF randomization, when executing 1119 * a loader directly (i.e. no interpreter listed in ELF 1120 * headers), move the brk area out of the mmap region 1121 * (since it grows up, and may collide early with the stack 1122 * growing down), and into the unused ELF_ET_DYN_BASE region. 1123 */ 1124 if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) && !interpreter) 1125 current->mm->brk = current->mm->start_brk = 1126 ELF_ET_DYN_BASE; 1127 1128 current->mm->brk = current->mm->start_brk = 1129 arch_randomize_brk(current->mm); 1130 #ifdef compat_brk_randomized 1131 current->brk_randomized = 1; 1132 #endif 1133 } 1134 1135 if (current->personality & MMAP_PAGE_ZERO) { 1136 /* Why this, you ask??? Well SVr4 maps page 0 as read-only, 1137 and some applications "depend" upon this behavior. 1138 Since we do not have the power to recompile these, we 1139 emulate the SVr4 behavior. Sigh. */ 1140 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC, 1141 MAP_FIXED | MAP_PRIVATE, 0); 1142 } 1143 1144 regs = current_pt_regs(); 1145 #ifdef ELF_PLAT_INIT 1146 /* 1147 * The ABI may specify that certain registers be set up in special 1148 * ways (on i386 %edx is the address of a DT_FINI function, for 1149 * example. In addition, it may also specify (eg, PowerPC64 ELF) 1150 * that the e_entry field is the address of the function descriptor 1151 * for the startup routine, rather than the address of the startup 1152 * routine itself. This macro performs whatever initialization to 1153 * the regs structure is required as well as any relocations to the 1154 * function descriptor entries when executing dynamically links apps. 1155 */ 1156 ELF_PLAT_INIT(regs, reloc_func_desc); 1157 #endif 1158 1159 finalize_exec(bprm); 1160 start_thread(regs, elf_entry, bprm->p); 1161 retval = 0; 1162 out: 1163 kfree(loc); 1164 out_ret: 1165 return retval; 1166 1167 /* error cleanup */ 1168 out_free_dentry: 1169 kfree(interp_elf_phdata); 1170 allow_write_access(interpreter); 1171 if (interpreter) 1172 fput(interpreter); 1173 out_free_ph: 1174 kfree(elf_phdata); 1175 goto out; 1176 } 1177 1178 #ifdef CONFIG_USELIB 1179 /* This is really simpleminded and specialized - we are loading an 1180 a.out library that is given an ELF header. */ 1181 static int load_elf_library(struct file *file) 1182 { 1183 struct elf_phdr *elf_phdata; 1184 struct elf_phdr *eppnt; 1185 unsigned long elf_bss, bss, len; 1186 int retval, error, i, j; 1187 struct elfhdr elf_ex; 1188 loff_t pos = 0; 1189 1190 error = -ENOEXEC; 1191 retval = kernel_read(file, &elf_ex, sizeof(elf_ex), &pos); 1192 if (retval != sizeof(elf_ex)) 1193 goto out; 1194 1195 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0) 1196 goto out; 1197 1198 /* First of all, some simple consistency checks */ 1199 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || 1200 !elf_check_arch(&elf_ex) || !file->f_op->mmap) 1201 goto out; 1202 if (elf_check_fdpic(&elf_ex)) 1203 goto out; 1204 1205 /* Now read in all of the header information */ 1206 1207 j = sizeof(struct elf_phdr) * elf_ex.e_phnum; 1208 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */ 1209 1210 error = -ENOMEM; 1211 elf_phdata = kmalloc(j, GFP_KERNEL); 1212 if (!elf_phdata) 1213 goto out; 1214 1215 eppnt = elf_phdata; 1216 error = -ENOEXEC; 1217 pos = elf_ex.e_phoff; 1218 retval = kernel_read(file, eppnt, j, &pos); 1219 if (retval != j) 1220 goto out_free_ph; 1221 1222 for (j = 0, i = 0; i<elf_ex.e_phnum; i++) 1223 if ((eppnt + i)->p_type == PT_LOAD) 1224 j++; 1225 if (j != 1) 1226 goto out_free_ph; 1227 1228 while (eppnt->p_type != PT_LOAD) 1229 eppnt++; 1230 1231 /* Now use mmap to map the library into memory. */ 1232 error = vm_mmap(file, 1233 ELF_PAGESTART(eppnt->p_vaddr), 1234 (eppnt->p_filesz + 1235 ELF_PAGEOFFSET(eppnt->p_vaddr)), 1236 PROT_READ | PROT_WRITE | PROT_EXEC, 1237 MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_DENYWRITE, 1238 (eppnt->p_offset - 1239 ELF_PAGEOFFSET(eppnt->p_vaddr))); 1240 if (error != ELF_PAGESTART(eppnt->p_vaddr)) 1241 goto out_free_ph; 1242 1243 elf_bss = eppnt->p_vaddr + eppnt->p_filesz; 1244 if (padzero(elf_bss)) { 1245 error = -EFAULT; 1246 goto out_free_ph; 1247 } 1248 1249 len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr); 1250 bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr); 1251 if (bss > len) { 1252 error = vm_brk(len, bss - len); 1253 if (error) 1254 goto out_free_ph; 1255 } 1256 error = 0; 1257 1258 out_free_ph: 1259 kfree(elf_phdata); 1260 out: 1261 return error; 1262 } 1263 #endif /* #ifdef CONFIG_USELIB */ 1264 1265 #ifdef CONFIG_ELF_CORE 1266 /* 1267 * ELF core dumper 1268 * 1269 * Modelled on fs/exec.c:aout_core_dump() 1270 * Jeremy Fitzhardinge <jeremy@sw.oz.au> 1271 */ 1272 1273 /* 1274 * The purpose of always_dump_vma() is to make sure that special kernel mappings 1275 * that are useful for post-mortem analysis are included in every core dump. 1276 * In that way we ensure that the core dump is fully interpretable later 1277 * without matching up the same kernel and hardware config to see what PC values 1278 * meant. These special mappings include - vDSO, vsyscall, and other 1279 * architecture specific mappings 1280 */ 1281 static bool always_dump_vma(struct vm_area_struct *vma) 1282 { 1283 /* Any vsyscall mappings? */ 1284 if (vma == get_gate_vma(vma->vm_mm)) 1285 return true; 1286 1287 /* 1288 * Assume that all vmas with a .name op should always be dumped. 1289 * If this changes, a new vm_ops field can easily be added. 1290 */ 1291 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma)) 1292 return true; 1293 1294 /* 1295 * arch_vma_name() returns non-NULL for special architecture mappings, 1296 * such as vDSO sections. 1297 */ 1298 if (arch_vma_name(vma)) 1299 return true; 1300 1301 return false; 1302 } 1303 1304 /* 1305 * Decide what to dump of a segment, part, all or none. 1306 */ 1307 static unsigned long vma_dump_size(struct vm_area_struct *vma, 1308 unsigned long mm_flags) 1309 { 1310 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type)) 1311 1312 /* always dump the vdso and vsyscall sections */ 1313 if (always_dump_vma(vma)) 1314 goto whole; 1315 1316 if (vma->vm_flags & VM_DONTDUMP) 1317 return 0; 1318 1319 /* support for DAX */ 1320 if (vma_is_dax(vma)) { 1321 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED)) 1322 goto whole; 1323 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE)) 1324 goto whole; 1325 return 0; 1326 } 1327 1328 /* Hugetlb memory check */ 1329 if (vma->vm_flags & VM_HUGETLB) { 1330 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED)) 1331 goto whole; 1332 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE)) 1333 goto whole; 1334 return 0; 1335 } 1336 1337 /* Do not dump I/O mapped devices or special mappings */ 1338 if (vma->vm_flags & VM_IO) 1339 return 0; 1340 1341 /* By default, dump shared memory if mapped from an anonymous file. */ 1342 if (vma->vm_flags & VM_SHARED) { 1343 if (file_inode(vma->vm_file)->i_nlink == 0 ? 1344 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED)) 1345 goto whole; 1346 return 0; 1347 } 1348 1349 /* Dump segments that have been written to. */ 1350 if (vma->anon_vma && FILTER(ANON_PRIVATE)) 1351 goto whole; 1352 if (vma->vm_file == NULL) 1353 return 0; 1354 1355 if (FILTER(MAPPED_PRIVATE)) 1356 goto whole; 1357 1358 /* 1359 * If this looks like the beginning of a DSO or executable mapping, 1360 * check for an ELF header. If we find one, dump the first page to 1361 * aid in determining what was mapped here. 1362 */ 1363 if (FILTER(ELF_HEADERS) && 1364 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) { 1365 u32 __user *header = (u32 __user *) vma->vm_start; 1366 u32 word; 1367 mm_segment_t fs = get_fs(); 1368 /* 1369 * Doing it this way gets the constant folded by GCC. 1370 */ 1371 union { 1372 u32 cmp; 1373 char elfmag[SELFMAG]; 1374 } magic; 1375 BUILD_BUG_ON(SELFMAG != sizeof word); 1376 magic.elfmag[EI_MAG0] = ELFMAG0; 1377 magic.elfmag[EI_MAG1] = ELFMAG1; 1378 magic.elfmag[EI_MAG2] = ELFMAG2; 1379 magic.elfmag[EI_MAG3] = ELFMAG3; 1380 /* 1381 * Switch to the user "segment" for get_user(), 1382 * then put back what elf_core_dump() had in place. 1383 */ 1384 set_fs(USER_DS); 1385 if (unlikely(get_user(word, header))) 1386 word = 0; 1387 set_fs(fs); 1388 if (word == magic.cmp) 1389 return PAGE_SIZE; 1390 } 1391 1392 #undef FILTER 1393 1394 return 0; 1395 1396 whole: 1397 return vma->vm_end - vma->vm_start; 1398 } 1399 1400 /* An ELF note in memory */ 1401 struct memelfnote 1402 { 1403 const char *name; 1404 int type; 1405 unsigned int datasz; 1406 void *data; 1407 }; 1408 1409 static int notesize(struct memelfnote *en) 1410 { 1411 int sz; 1412 1413 sz = sizeof(struct elf_note); 1414 sz += roundup(strlen(en->name) + 1, 4); 1415 sz += roundup(en->datasz, 4); 1416 1417 return sz; 1418 } 1419 1420 static int writenote(struct memelfnote *men, struct coredump_params *cprm) 1421 { 1422 struct elf_note en; 1423 en.n_namesz = strlen(men->name) + 1; 1424 en.n_descsz = men->datasz; 1425 en.n_type = men->type; 1426 1427 return dump_emit(cprm, &en, sizeof(en)) && 1428 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) && 1429 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4); 1430 } 1431 1432 static void fill_elf_header(struct elfhdr *elf, int segs, 1433 u16 machine, u32 flags) 1434 { 1435 memset(elf, 0, sizeof(*elf)); 1436 1437 memcpy(elf->e_ident, ELFMAG, SELFMAG); 1438 elf->e_ident[EI_CLASS] = ELF_CLASS; 1439 elf->e_ident[EI_DATA] = ELF_DATA; 1440 elf->e_ident[EI_VERSION] = EV_CURRENT; 1441 elf->e_ident[EI_OSABI] = ELF_OSABI; 1442 1443 elf->e_type = ET_CORE; 1444 elf->e_machine = machine; 1445 elf->e_version = EV_CURRENT; 1446 elf->e_phoff = sizeof(struct elfhdr); 1447 elf->e_flags = flags; 1448 elf->e_ehsize = sizeof(struct elfhdr); 1449 elf->e_phentsize = sizeof(struct elf_phdr); 1450 elf->e_phnum = segs; 1451 } 1452 1453 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset) 1454 { 1455 phdr->p_type = PT_NOTE; 1456 phdr->p_offset = offset; 1457 phdr->p_vaddr = 0; 1458 phdr->p_paddr = 0; 1459 phdr->p_filesz = sz; 1460 phdr->p_memsz = 0; 1461 phdr->p_flags = 0; 1462 phdr->p_align = 0; 1463 } 1464 1465 static void fill_note(struct memelfnote *note, const char *name, int type, 1466 unsigned int sz, void *data) 1467 { 1468 note->name = name; 1469 note->type = type; 1470 note->datasz = sz; 1471 note->data = data; 1472 } 1473 1474 /* 1475 * fill up all the fields in prstatus from the given task struct, except 1476 * registers which need to be filled up separately. 1477 */ 1478 static void fill_prstatus(struct elf_prstatus *prstatus, 1479 struct task_struct *p, long signr) 1480 { 1481 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; 1482 prstatus->pr_sigpend = p->pending.signal.sig[0]; 1483 prstatus->pr_sighold = p->blocked.sig[0]; 1484 rcu_read_lock(); 1485 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent)); 1486 rcu_read_unlock(); 1487 prstatus->pr_pid = task_pid_vnr(p); 1488 prstatus->pr_pgrp = task_pgrp_vnr(p); 1489 prstatus->pr_sid = task_session_vnr(p); 1490 if (thread_group_leader(p)) { 1491 struct task_cputime cputime; 1492 1493 /* 1494 * This is the record for the group leader. It shows the 1495 * group-wide total, not its individual thread total. 1496 */ 1497 thread_group_cputime(p, &cputime); 1498 prstatus->pr_utime = ns_to_timeval(cputime.utime); 1499 prstatus->pr_stime = ns_to_timeval(cputime.stime); 1500 } else { 1501 u64 utime, stime; 1502 1503 task_cputime(p, &utime, &stime); 1504 prstatus->pr_utime = ns_to_timeval(utime); 1505 prstatus->pr_stime = ns_to_timeval(stime); 1506 } 1507 1508 prstatus->pr_cutime = ns_to_timeval(p->signal->cutime); 1509 prstatus->pr_cstime = ns_to_timeval(p->signal->cstime); 1510 } 1511 1512 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, 1513 struct mm_struct *mm) 1514 { 1515 const struct cred *cred; 1516 unsigned int i, len; 1517 1518 /* first copy the parameters from user space */ 1519 memset(psinfo, 0, sizeof(struct elf_prpsinfo)); 1520 1521 len = mm->arg_end - mm->arg_start; 1522 if (len >= ELF_PRARGSZ) 1523 len = ELF_PRARGSZ-1; 1524 if (copy_from_user(&psinfo->pr_psargs, 1525 (const char __user *)mm->arg_start, len)) 1526 return -EFAULT; 1527 for(i = 0; i < len; i++) 1528 if (psinfo->pr_psargs[i] == 0) 1529 psinfo->pr_psargs[i] = ' '; 1530 psinfo->pr_psargs[len] = 0; 1531 1532 rcu_read_lock(); 1533 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent)); 1534 rcu_read_unlock(); 1535 psinfo->pr_pid = task_pid_vnr(p); 1536 psinfo->pr_pgrp = task_pgrp_vnr(p); 1537 psinfo->pr_sid = task_session_vnr(p); 1538 1539 i = p->state ? ffz(~p->state) + 1 : 0; 1540 psinfo->pr_state = i; 1541 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i]; 1542 psinfo->pr_zomb = psinfo->pr_sname == 'Z'; 1543 psinfo->pr_nice = task_nice(p); 1544 psinfo->pr_flag = p->flags; 1545 rcu_read_lock(); 1546 cred = __task_cred(p); 1547 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid)); 1548 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid)); 1549 rcu_read_unlock(); 1550 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname)); 1551 1552 return 0; 1553 } 1554 1555 static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm) 1556 { 1557 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv; 1558 int i = 0; 1559 do 1560 i += 2; 1561 while (auxv[i - 2] != AT_NULL); 1562 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv); 1563 } 1564 1565 static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata, 1566 const kernel_siginfo_t *siginfo) 1567 { 1568 mm_segment_t old_fs = get_fs(); 1569 set_fs(KERNEL_DS); 1570 copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo); 1571 set_fs(old_fs); 1572 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata); 1573 } 1574 1575 #define MAX_FILE_NOTE_SIZE (4*1024*1024) 1576 /* 1577 * Format of NT_FILE note: 1578 * 1579 * long count -- how many files are mapped 1580 * long page_size -- units for file_ofs 1581 * array of [COUNT] elements of 1582 * long start 1583 * long end 1584 * long file_ofs 1585 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL... 1586 */ 1587 static int fill_files_note(struct memelfnote *note) 1588 { 1589 struct vm_area_struct *vma; 1590 unsigned count, size, names_ofs, remaining, n; 1591 user_long_t *data; 1592 user_long_t *start_end_ofs; 1593 char *name_base, *name_curpos; 1594 1595 /* *Estimated* file count and total data size needed */ 1596 count = current->mm->map_count; 1597 if (count > UINT_MAX / 64) 1598 return -EINVAL; 1599 size = count * 64; 1600 1601 names_ofs = (2 + 3 * count) * sizeof(data[0]); 1602 alloc: 1603 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */ 1604 return -EINVAL; 1605 size = round_up(size, PAGE_SIZE); 1606 data = kvmalloc(size, GFP_KERNEL); 1607 if (ZERO_OR_NULL_PTR(data)) 1608 return -ENOMEM; 1609 1610 start_end_ofs = data + 2; 1611 name_base = name_curpos = ((char *)data) + names_ofs; 1612 remaining = size - names_ofs; 1613 count = 0; 1614 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) { 1615 struct file *file; 1616 const char *filename; 1617 1618 file = vma->vm_file; 1619 if (!file) 1620 continue; 1621 filename = file_path(file, name_curpos, remaining); 1622 if (IS_ERR(filename)) { 1623 if (PTR_ERR(filename) == -ENAMETOOLONG) { 1624 kvfree(data); 1625 size = size * 5 / 4; 1626 goto alloc; 1627 } 1628 continue; 1629 } 1630 1631 /* file_path() fills at the end, move name down */ 1632 /* n = strlen(filename) + 1: */ 1633 n = (name_curpos + remaining) - filename; 1634 remaining = filename - name_curpos; 1635 memmove(name_curpos, filename, n); 1636 name_curpos += n; 1637 1638 *start_end_ofs++ = vma->vm_start; 1639 *start_end_ofs++ = vma->vm_end; 1640 *start_end_ofs++ = vma->vm_pgoff; 1641 count++; 1642 } 1643 1644 /* Now we know exact count of files, can store it */ 1645 data[0] = count; 1646 data[1] = PAGE_SIZE; 1647 /* 1648 * Count usually is less than current->mm->map_count, 1649 * we need to move filenames down. 1650 */ 1651 n = current->mm->map_count - count; 1652 if (n != 0) { 1653 unsigned shift_bytes = n * 3 * sizeof(data[0]); 1654 memmove(name_base - shift_bytes, name_base, 1655 name_curpos - name_base); 1656 name_curpos -= shift_bytes; 1657 } 1658 1659 size = name_curpos - (char *)data; 1660 fill_note(note, "CORE", NT_FILE, size, data); 1661 return 0; 1662 } 1663 1664 #ifdef CORE_DUMP_USE_REGSET 1665 #include <linux/regset.h> 1666 1667 struct elf_thread_core_info { 1668 struct elf_thread_core_info *next; 1669 struct task_struct *task; 1670 struct elf_prstatus prstatus; 1671 struct memelfnote notes[0]; 1672 }; 1673 1674 struct elf_note_info { 1675 struct elf_thread_core_info *thread; 1676 struct memelfnote psinfo; 1677 struct memelfnote signote; 1678 struct memelfnote auxv; 1679 struct memelfnote files; 1680 user_siginfo_t csigdata; 1681 size_t size; 1682 int thread_notes; 1683 }; 1684 1685 /* 1686 * When a regset has a writeback hook, we call it on each thread before 1687 * dumping user memory. On register window machines, this makes sure the 1688 * user memory backing the register data is up to date before we read it. 1689 */ 1690 static void do_thread_regset_writeback(struct task_struct *task, 1691 const struct user_regset *regset) 1692 { 1693 if (regset->writeback) 1694 regset->writeback(task, regset, 1); 1695 } 1696 1697 #ifndef PRSTATUS_SIZE 1698 #define PRSTATUS_SIZE(S, R) sizeof(S) 1699 #endif 1700 1701 #ifndef SET_PR_FPVALID 1702 #define SET_PR_FPVALID(S, V, R) ((S)->pr_fpvalid = (V)) 1703 #endif 1704 1705 static int fill_thread_core_info(struct elf_thread_core_info *t, 1706 const struct user_regset_view *view, 1707 long signr, size_t *total) 1708 { 1709 unsigned int i; 1710 unsigned int regset0_size = regset_size(t->task, &view->regsets[0]); 1711 1712 /* 1713 * NT_PRSTATUS is the one special case, because the regset data 1714 * goes into the pr_reg field inside the note contents, rather 1715 * than being the whole note contents. We fill the reset in here. 1716 * We assume that regset 0 is NT_PRSTATUS. 1717 */ 1718 fill_prstatus(&t->prstatus, t->task, signr); 1719 (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset0_size, 1720 &t->prstatus.pr_reg, NULL); 1721 1722 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, 1723 PRSTATUS_SIZE(t->prstatus, regset0_size), &t->prstatus); 1724 *total += notesize(&t->notes[0]); 1725 1726 do_thread_regset_writeback(t->task, &view->regsets[0]); 1727 1728 /* 1729 * Each other regset might generate a note too. For each regset 1730 * that has no core_note_type or is inactive, we leave t->notes[i] 1731 * all zero and we'll know to skip writing it later. 1732 */ 1733 for (i = 1; i < view->n; ++i) { 1734 const struct user_regset *regset = &view->regsets[i]; 1735 do_thread_regset_writeback(t->task, regset); 1736 if (regset->core_note_type && regset->get && 1737 (!regset->active || regset->active(t->task, regset) > 0)) { 1738 int ret; 1739 size_t size = regset_size(t->task, regset); 1740 void *data = kmalloc(size, GFP_KERNEL); 1741 if (unlikely(!data)) 1742 return 0; 1743 ret = regset->get(t->task, regset, 1744 0, size, data, NULL); 1745 if (unlikely(ret)) 1746 kfree(data); 1747 else { 1748 if (regset->core_note_type != NT_PRFPREG) 1749 fill_note(&t->notes[i], "LINUX", 1750 regset->core_note_type, 1751 size, data); 1752 else { 1753 SET_PR_FPVALID(&t->prstatus, 1754 1, regset0_size); 1755 fill_note(&t->notes[i], "CORE", 1756 NT_PRFPREG, size, data); 1757 } 1758 *total += notesize(&t->notes[i]); 1759 } 1760 } 1761 } 1762 1763 return 1; 1764 } 1765 1766 static int fill_note_info(struct elfhdr *elf, int phdrs, 1767 struct elf_note_info *info, 1768 const kernel_siginfo_t *siginfo, struct pt_regs *regs) 1769 { 1770 struct task_struct *dump_task = current; 1771 const struct user_regset_view *view = task_user_regset_view(dump_task); 1772 struct elf_thread_core_info *t; 1773 struct elf_prpsinfo *psinfo; 1774 struct core_thread *ct; 1775 unsigned int i; 1776 1777 info->size = 0; 1778 info->thread = NULL; 1779 1780 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL); 1781 if (psinfo == NULL) { 1782 info->psinfo.data = NULL; /* So we don't free this wrongly */ 1783 return 0; 1784 } 1785 1786 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo); 1787 1788 /* 1789 * Figure out how many notes we're going to need for each thread. 1790 */ 1791 info->thread_notes = 0; 1792 for (i = 0; i < view->n; ++i) 1793 if (view->regsets[i].core_note_type != 0) 1794 ++info->thread_notes; 1795 1796 /* 1797 * Sanity check. We rely on regset 0 being in NT_PRSTATUS, 1798 * since it is our one special case. 1799 */ 1800 if (unlikely(info->thread_notes == 0) || 1801 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) { 1802 WARN_ON(1); 1803 return 0; 1804 } 1805 1806 /* 1807 * Initialize the ELF file header. 1808 */ 1809 fill_elf_header(elf, phdrs, 1810 view->e_machine, view->e_flags); 1811 1812 /* 1813 * Allocate a structure for each thread. 1814 */ 1815 for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) { 1816 t = kzalloc(offsetof(struct elf_thread_core_info, 1817 notes[info->thread_notes]), 1818 GFP_KERNEL); 1819 if (unlikely(!t)) 1820 return 0; 1821 1822 t->task = ct->task; 1823 if (ct->task == dump_task || !info->thread) { 1824 t->next = info->thread; 1825 info->thread = t; 1826 } else { 1827 /* 1828 * Make sure to keep the original task at 1829 * the head of the list. 1830 */ 1831 t->next = info->thread->next; 1832 info->thread->next = t; 1833 } 1834 } 1835 1836 /* 1837 * Now fill in each thread's information. 1838 */ 1839 for (t = info->thread; t != NULL; t = t->next) 1840 if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size)) 1841 return 0; 1842 1843 /* 1844 * Fill in the two process-wide notes. 1845 */ 1846 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm); 1847 info->size += notesize(&info->psinfo); 1848 1849 fill_siginfo_note(&info->signote, &info->csigdata, siginfo); 1850 info->size += notesize(&info->signote); 1851 1852 fill_auxv_note(&info->auxv, current->mm); 1853 info->size += notesize(&info->auxv); 1854 1855 if (fill_files_note(&info->files) == 0) 1856 info->size += notesize(&info->files); 1857 1858 return 1; 1859 } 1860 1861 static size_t get_note_info_size(struct elf_note_info *info) 1862 { 1863 return info->size; 1864 } 1865 1866 /* 1867 * Write all the notes for each thread. When writing the first thread, the 1868 * process-wide notes are interleaved after the first thread-specific note. 1869 */ 1870 static int write_note_info(struct elf_note_info *info, 1871 struct coredump_params *cprm) 1872 { 1873 bool first = true; 1874 struct elf_thread_core_info *t = info->thread; 1875 1876 do { 1877 int i; 1878 1879 if (!writenote(&t->notes[0], cprm)) 1880 return 0; 1881 1882 if (first && !writenote(&info->psinfo, cprm)) 1883 return 0; 1884 if (first && !writenote(&info->signote, cprm)) 1885 return 0; 1886 if (first && !writenote(&info->auxv, cprm)) 1887 return 0; 1888 if (first && info->files.data && 1889 !writenote(&info->files, cprm)) 1890 return 0; 1891 1892 for (i = 1; i < info->thread_notes; ++i) 1893 if (t->notes[i].data && 1894 !writenote(&t->notes[i], cprm)) 1895 return 0; 1896 1897 first = false; 1898 t = t->next; 1899 } while (t); 1900 1901 return 1; 1902 } 1903 1904 static void free_note_info(struct elf_note_info *info) 1905 { 1906 struct elf_thread_core_info *threads = info->thread; 1907 while (threads) { 1908 unsigned int i; 1909 struct elf_thread_core_info *t = threads; 1910 threads = t->next; 1911 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus); 1912 for (i = 1; i < info->thread_notes; ++i) 1913 kfree(t->notes[i].data); 1914 kfree(t); 1915 } 1916 kfree(info->psinfo.data); 1917 kvfree(info->files.data); 1918 } 1919 1920 #else 1921 1922 /* Here is the structure in which status of each thread is captured. */ 1923 struct elf_thread_status 1924 { 1925 struct list_head list; 1926 struct elf_prstatus prstatus; /* NT_PRSTATUS */ 1927 elf_fpregset_t fpu; /* NT_PRFPREG */ 1928 struct task_struct *thread; 1929 #ifdef ELF_CORE_COPY_XFPREGS 1930 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */ 1931 #endif 1932 struct memelfnote notes[3]; 1933 int num_notes; 1934 }; 1935 1936 /* 1937 * In order to add the specific thread information for the elf file format, 1938 * we need to keep a linked list of every threads pr_status and then create 1939 * a single section for them in the final core file. 1940 */ 1941 static int elf_dump_thread_status(long signr, struct elf_thread_status *t) 1942 { 1943 int sz = 0; 1944 struct task_struct *p = t->thread; 1945 t->num_notes = 0; 1946 1947 fill_prstatus(&t->prstatus, p, signr); 1948 elf_core_copy_task_regs(p, &t->prstatus.pr_reg); 1949 1950 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), 1951 &(t->prstatus)); 1952 t->num_notes++; 1953 sz += notesize(&t->notes[0]); 1954 1955 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, 1956 &t->fpu))) { 1957 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), 1958 &(t->fpu)); 1959 t->num_notes++; 1960 sz += notesize(&t->notes[1]); 1961 } 1962 1963 #ifdef ELF_CORE_COPY_XFPREGS 1964 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) { 1965 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE, 1966 sizeof(t->xfpu), &t->xfpu); 1967 t->num_notes++; 1968 sz += notesize(&t->notes[2]); 1969 } 1970 #endif 1971 return sz; 1972 } 1973 1974 struct elf_note_info { 1975 struct memelfnote *notes; 1976 struct memelfnote *notes_files; 1977 struct elf_prstatus *prstatus; /* NT_PRSTATUS */ 1978 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */ 1979 struct list_head thread_list; 1980 elf_fpregset_t *fpu; 1981 #ifdef ELF_CORE_COPY_XFPREGS 1982 elf_fpxregset_t *xfpu; 1983 #endif 1984 user_siginfo_t csigdata; 1985 int thread_status_size; 1986 int numnote; 1987 }; 1988 1989 static int elf_note_info_init(struct elf_note_info *info) 1990 { 1991 memset(info, 0, sizeof(*info)); 1992 INIT_LIST_HEAD(&info->thread_list); 1993 1994 /* Allocate space for ELF notes */ 1995 info->notes = kmalloc_array(8, sizeof(struct memelfnote), GFP_KERNEL); 1996 if (!info->notes) 1997 return 0; 1998 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL); 1999 if (!info->psinfo) 2000 return 0; 2001 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL); 2002 if (!info->prstatus) 2003 return 0; 2004 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL); 2005 if (!info->fpu) 2006 return 0; 2007 #ifdef ELF_CORE_COPY_XFPREGS 2008 info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL); 2009 if (!info->xfpu) 2010 return 0; 2011 #endif 2012 return 1; 2013 } 2014 2015 static int fill_note_info(struct elfhdr *elf, int phdrs, 2016 struct elf_note_info *info, 2017 const kernel_siginfo_t *siginfo, struct pt_regs *regs) 2018 { 2019 struct core_thread *ct; 2020 struct elf_thread_status *ets; 2021 2022 if (!elf_note_info_init(info)) 2023 return 0; 2024 2025 for (ct = current->mm->core_state->dumper.next; 2026 ct; ct = ct->next) { 2027 ets = kzalloc(sizeof(*ets), GFP_KERNEL); 2028 if (!ets) 2029 return 0; 2030 2031 ets->thread = ct->task; 2032 list_add(&ets->list, &info->thread_list); 2033 } 2034 2035 list_for_each_entry(ets, &info->thread_list, list) { 2036 int sz; 2037 2038 sz = elf_dump_thread_status(siginfo->si_signo, ets); 2039 info->thread_status_size += sz; 2040 } 2041 /* now collect the dump for the current */ 2042 memset(info->prstatus, 0, sizeof(*info->prstatus)); 2043 fill_prstatus(info->prstatus, current, siginfo->si_signo); 2044 elf_core_copy_regs(&info->prstatus->pr_reg, regs); 2045 2046 /* Set up header */ 2047 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS); 2048 2049 /* 2050 * Set up the notes in similar form to SVR4 core dumps made 2051 * with info from their /proc. 2052 */ 2053 2054 fill_note(info->notes + 0, "CORE", NT_PRSTATUS, 2055 sizeof(*info->prstatus), info->prstatus); 2056 fill_psinfo(info->psinfo, current->group_leader, current->mm); 2057 fill_note(info->notes + 1, "CORE", NT_PRPSINFO, 2058 sizeof(*info->psinfo), info->psinfo); 2059 2060 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo); 2061 fill_auxv_note(info->notes + 3, current->mm); 2062 info->numnote = 4; 2063 2064 if (fill_files_note(info->notes + info->numnote) == 0) { 2065 info->notes_files = info->notes + info->numnote; 2066 info->numnote++; 2067 } 2068 2069 /* Try to dump the FPU. */ 2070 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, 2071 info->fpu); 2072 if (info->prstatus->pr_fpvalid) 2073 fill_note(info->notes + info->numnote++, 2074 "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu); 2075 #ifdef ELF_CORE_COPY_XFPREGS 2076 if (elf_core_copy_task_xfpregs(current, info->xfpu)) 2077 fill_note(info->notes + info->numnote++, 2078 "LINUX", ELF_CORE_XFPREG_TYPE, 2079 sizeof(*info->xfpu), info->xfpu); 2080 #endif 2081 2082 return 1; 2083 } 2084 2085 static size_t get_note_info_size(struct elf_note_info *info) 2086 { 2087 int sz = 0; 2088 int i; 2089 2090 for (i = 0; i < info->numnote; i++) 2091 sz += notesize(info->notes + i); 2092 2093 sz += info->thread_status_size; 2094 2095 return sz; 2096 } 2097 2098 static int write_note_info(struct elf_note_info *info, 2099 struct coredump_params *cprm) 2100 { 2101 struct elf_thread_status *ets; 2102 int i; 2103 2104 for (i = 0; i < info->numnote; i++) 2105 if (!writenote(info->notes + i, cprm)) 2106 return 0; 2107 2108 /* write out the thread status notes section */ 2109 list_for_each_entry(ets, &info->thread_list, list) { 2110 for (i = 0; i < ets->num_notes; i++) 2111 if (!writenote(&ets->notes[i], cprm)) 2112 return 0; 2113 } 2114 2115 return 1; 2116 } 2117 2118 static void free_note_info(struct elf_note_info *info) 2119 { 2120 while (!list_empty(&info->thread_list)) { 2121 struct list_head *tmp = info->thread_list.next; 2122 list_del(tmp); 2123 kfree(list_entry(tmp, struct elf_thread_status, list)); 2124 } 2125 2126 /* Free data possibly allocated by fill_files_note(): */ 2127 if (info->notes_files) 2128 kvfree(info->notes_files->data); 2129 2130 kfree(info->prstatus); 2131 kfree(info->psinfo); 2132 kfree(info->notes); 2133 kfree(info->fpu); 2134 #ifdef ELF_CORE_COPY_XFPREGS 2135 kfree(info->xfpu); 2136 #endif 2137 } 2138 2139 #endif 2140 2141 static struct vm_area_struct *first_vma(struct task_struct *tsk, 2142 struct vm_area_struct *gate_vma) 2143 { 2144 struct vm_area_struct *ret = tsk->mm->mmap; 2145 2146 if (ret) 2147 return ret; 2148 return gate_vma; 2149 } 2150 /* 2151 * Helper function for iterating across a vma list. It ensures that the caller 2152 * will visit `gate_vma' prior to terminating the search. 2153 */ 2154 static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma, 2155 struct vm_area_struct *gate_vma) 2156 { 2157 struct vm_area_struct *ret; 2158 2159 ret = this_vma->vm_next; 2160 if (ret) 2161 return ret; 2162 if (this_vma == gate_vma) 2163 return NULL; 2164 return gate_vma; 2165 } 2166 2167 static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum, 2168 elf_addr_t e_shoff, int segs) 2169 { 2170 elf->e_shoff = e_shoff; 2171 elf->e_shentsize = sizeof(*shdr4extnum); 2172 elf->e_shnum = 1; 2173 elf->e_shstrndx = SHN_UNDEF; 2174 2175 memset(shdr4extnum, 0, sizeof(*shdr4extnum)); 2176 2177 shdr4extnum->sh_type = SHT_NULL; 2178 shdr4extnum->sh_size = elf->e_shnum; 2179 shdr4extnum->sh_link = elf->e_shstrndx; 2180 shdr4extnum->sh_info = segs; 2181 } 2182 2183 /* 2184 * Actual dumper 2185 * 2186 * This is a two-pass process; first we find the offsets of the bits, 2187 * and then they are actually written out. If we run out of core limit 2188 * we just truncate. 2189 */ 2190 static int elf_core_dump(struct coredump_params *cprm) 2191 { 2192 int has_dumped = 0; 2193 mm_segment_t fs; 2194 int segs, i; 2195 size_t vma_data_size = 0; 2196 struct vm_area_struct *vma, *gate_vma; 2197 struct elfhdr *elf = NULL; 2198 loff_t offset = 0, dataoff; 2199 struct elf_note_info info = { }; 2200 struct elf_phdr *phdr4note = NULL; 2201 struct elf_shdr *shdr4extnum = NULL; 2202 Elf_Half e_phnum; 2203 elf_addr_t e_shoff; 2204 elf_addr_t *vma_filesz = NULL; 2205 2206 /* 2207 * We no longer stop all VM operations. 2208 * 2209 * This is because those proceses that could possibly change map_count 2210 * or the mmap / vma pages are now blocked in do_exit on current 2211 * finishing this core dump. 2212 * 2213 * Only ptrace can touch these memory addresses, but it doesn't change 2214 * the map_count or the pages allocated. So no possibility of crashing 2215 * exists while dumping the mm->vm_next areas to the core file. 2216 */ 2217 2218 /* alloc memory for large data structures: too large to be on stack */ 2219 elf = kmalloc(sizeof(*elf), GFP_KERNEL); 2220 if (!elf) 2221 goto out; 2222 /* 2223 * The number of segs are recored into ELF header as 16bit value. 2224 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here. 2225 */ 2226 segs = current->mm->map_count; 2227 segs += elf_core_extra_phdrs(); 2228 2229 gate_vma = get_gate_vma(current->mm); 2230 if (gate_vma != NULL) 2231 segs++; 2232 2233 /* for notes section */ 2234 segs++; 2235 2236 /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid 2237 * this, kernel supports extended numbering. Have a look at 2238 * include/linux/elf.h for further information. */ 2239 e_phnum = segs > PN_XNUM ? PN_XNUM : segs; 2240 2241 /* 2242 * Collect all the non-memory information about the process for the 2243 * notes. This also sets up the file header. 2244 */ 2245 if (!fill_note_info(elf, e_phnum, &info, cprm->siginfo, cprm->regs)) 2246 goto cleanup; 2247 2248 has_dumped = 1; 2249 2250 fs = get_fs(); 2251 set_fs(KERNEL_DS); 2252 2253 offset += sizeof(*elf); /* Elf header */ 2254 offset += segs * sizeof(struct elf_phdr); /* Program headers */ 2255 2256 /* Write notes phdr entry */ 2257 { 2258 size_t sz = get_note_info_size(&info); 2259 2260 sz += elf_coredump_extra_notes_size(); 2261 2262 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL); 2263 if (!phdr4note) 2264 goto end_coredump; 2265 2266 fill_elf_note_phdr(phdr4note, sz, offset); 2267 offset += sz; 2268 } 2269 2270 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); 2271 2272 if (segs - 1 > ULONG_MAX / sizeof(*vma_filesz)) 2273 goto end_coredump; 2274 vma_filesz = kvmalloc(array_size(sizeof(*vma_filesz), (segs - 1)), 2275 GFP_KERNEL); 2276 if (ZERO_OR_NULL_PTR(vma_filesz)) 2277 goto end_coredump; 2278 2279 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL; 2280 vma = next_vma(vma, gate_vma)) { 2281 unsigned long dump_size; 2282 2283 dump_size = vma_dump_size(vma, cprm->mm_flags); 2284 vma_filesz[i++] = dump_size; 2285 vma_data_size += dump_size; 2286 } 2287 2288 offset += vma_data_size; 2289 offset += elf_core_extra_data_size(); 2290 e_shoff = offset; 2291 2292 if (e_phnum == PN_XNUM) { 2293 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL); 2294 if (!shdr4extnum) 2295 goto end_coredump; 2296 fill_extnum_info(elf, shdr4extnum, e_shoff, segs); 2297 } 2298 2299 offset = dataoff; 2300 2301 if (!dump_emit(cprm, elf, sizeof(*elf))) 2302 goto end_coredump; 2303 2304 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note))) 2305 goto end_coredump; 2306 2307 /* Write program headers for segments dump */ 2308 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL; 2309 vma = next_vma(vma, gate_vma)) { 2310 struct elf_phdr phdr; 2311 2312 phdr.p_type = PT_LOAD; 2313 phdr.p_offset = offset; 2314 phdr.p_vaddr = vma->vm_start; 2315 phdr.p_paddr = 0; 2316 phdr.p_filesz = vma_filesz[i++]; 2317 phdr.p_memsz = vma->vm_end - vma->vm_start; 2318 offset += phdr.p_filesz; 2319 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; 2320 if (vma->vm_flags & VM_WRITE) 2321 phdr.p_flags |= PF_W; 2322 if (vma->vm_flags & VM_EXEC) 2323 phdr.p_flags |= PF_X; 2324 phdr.p_align = ELF_EXEC_PAGESIZE; 2325 2326 if (!dump_emit(cprm, &phdr, sizeof(phdr))) 2327 goto end_coredump; 2328 } 2329 2330 if (!elf_core_write_extra_phdrs(cprm, offset)) 2331 goto end_coredump; 2332 2333 /* write out the notes section */ 2334 if (!write_note_info(&info, cprm)) 2335 goto end_coredump; 2336 2337 if (elf_coredump_extra_notes_write(cprm)) 2338 goto end_coredump; 2339 2340 /* Align to page */ 2341 if (!dump_skip(cprm, dataoff - cprm->pos)) 2342 goto end_coredump; 2343 2344 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL; 2345 vma = next_vma(vma, gate_vma)) { 2346 unsigned long addr; 2347 unsigned long end; 2348 2349 end = vma->vm_start + vma_filesz[i++]; 2350 2351 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) { 2352 struct page *page; 2353 int stop; 2354 2355 page = get_dump_page(addr); 2356 if (page) { 2357 void *kaddr = kmap(page); 2358 stop = !dump_emit(cprm, kaddr, PAGE_SIZE); 2359 kunmap(page); 2360 put_page(page); 2361 } else 2362 stop = !dump_skip(cprm, PAGE_SIZE); 2363 if (stop) 2364 goto end_coredump; 2365 } 2366 } 2367 dump_truncate(cprm); 2368 2369 if (!elf_core_write_extra_data(cprm)) 2370 goto end_coredump; 2371 2372 if (e_phnum == PN_XNUM) { 2373 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum))) 2374 goto end_coredump; 2375 } 2376 2377 end_coredump: 2378 set_fs(fs); 2379 2380 cleanup: 2381 free_note_info(&info); 2382 kfree(shdr4extnum); 2383 kvfree(vma_filesz); 2384 kfree(phdr4note); 2385 kfree(elf); 2386 out: 2387 return has_dumped; 2388 } 2389 2390 #endif /* CONFIG_ELF_CORE */ 2391 2392 static int __init init_elf_binfmt(void) 2393 { 2394 register_binfmt(&elf_format); 2395 return 0; 2396 } 2397 2398 static void __exit exit_elf_binfmt(void) 2399 { 2400 /* Remove the COFF and ELF loaders. */ 2401 unregister_binfmt(&elf_format); 2402 } 2403 2404 core_initcall(init_elf_binfmt); 2405 module_exit(exit_elf_binfmt); 2406 MODULE_LICENSE("GPL"); 2407