1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/slab.h> 3 #include <linux/file.h> 4 #include <linux/fdtable.h> 5 #include <linux/freezer.h> 6 #include <linux/mm.h> 7 #include <linux/stat.h> 8 #include <linux/fcntl.h> 9 #include <linux/swap.h> 10 #include <linux/ctype.h> 11 #include <linux/string.h> 12 #include <linux/init.h> 13 #include <linux/pagemap.h> 14 #include <linux/perf_event.h> 15 #include <linux/highmem.h> 16 #include <linux/spinlock.h> 17 #include <linux/key.h> 18 #include <linux/personality.h> 19 #include <linux/binfmts.h> 20 #include <linux/coredump.h> 21 #include <linux/sched/coredump.h> 22 #include <linux/sched/signal.h> 23 #include <linux/sched/task_stack.h> 24 #include <linux/utsname.h> 25 #include <linux/pid_namespace.h> 26 #include <linux/module.h> 27 #include <linux/namei.h> 28 #include <linux/mount.h> 29 #include <linux/security.h> 30 #include <linux/syscalls.h> 31 #include <linux/tsacct_kern.h> 32 #include <linux/cn_proc.h> 33 #include <linux/audit.h> 34 #include <linux/kmod.h> 35 #include <linux/fsnotify.h> 36 #include <linux/fs_struct.h> 37 #include <linux/pipe_fs_i.h> 38 #include <linux/oom.h> 39 #include <linux/compat.h> 40 #include <linux/fs.h> 41 #include <linux/path.h> 42 #include <linux/timekeeping.h> 43 #include <linux/sysctl.h> 44 #include <linux/elf.h> 45 46 #include <linux/uaccess.h> 47 #include <asm/mmu_context.h> 48 #include <asm/tlb.h> 49 #include <asm/exec.h> 50 51 #include <trace/events/task.h> 52 #include "internal.h" 53 54 #include <trace/events/sched.h> 55 56 static bool dump_vma_snapshot(struct coredump_params *cprm); 57 static void free_vma_snapshot(struct coredump_params *cprm); 58 59 static int core_uses_pid; 60 static unsigned int core_pipe_limit; 61 static char core_pattern[CORENAME_MAX_SIZE] = "core"; 62 static int core_name_size = CORENAME_MAX_SIZE; 63 64 struct core_name { 65 char *corename; 66 int used, size; 67 }; 68 69 static int expand_corename(struct core_name *cn, int size) 70 { 71 char *corename; 72 73 size = kmalloc_size_roundup(size); 74 corename = krealloc(cn->corename, size, GFP_KERNEL); 75 76 if (!corename) 77 return -ENOMEM; 78 79 if (size > core_name_size) /* racy but harmless */ 80 core_name_size = size; 81 82 cn->size = size; 83 cn->corename = corename; 84 return 0; 85 } 86 87 static __printf(2, 0) int cn_vprintf(struct core_name *cn, const char *fmt, 88 va_list arg) 89 { 90 int free, need; 91 va_list arg_copy; 92 93 again: 94 free = cn->size - cn->used; 95 96 va_copy(arg_copy, arg); 97 need = vsnprintf(cn->corename + cn->used, free, fmt, arg_copy); 98 va_end(arg_copy); 99 100 if (need < free) { 101 cn->used += need; 102 return 0; 103 } 104 105 if (!expand_corename(cn, cn->size + need - free + 1)) 106 goto again; 107 108 return -ENOMEM; 109 } 110 111 static __printf(2, 3) int cn_printf(struct core_name *cn, const char *fmt, ...) 112 { 113 va_list arg; 114 int ret; 115 116 va_start(arg, fmt); 117 ret = cn_vprintf(cn, fmt, arg); 118 va_end(arg); 119 120 return ret; 121 } 122 123 static __printf(2, 3) 124 int cn_esc_printf(struct core_name *cn, const char *fmt, ...) 125 { 126 int cur = cn->used; 127 va_list arg; 128 int ret; 129 130 va_start(arg, fmt); 131 ret = cn_vprintf(cn, fmt, arg); 132 va_end(arg); 133 134 if (ret == 0) { 135 /* 136 * Ensure that this coredump name component can't cause the 137 * resulting corefile path to consist of a ".." or ".". 138 */ 139 if ((cn->used - cur == 1 && cn->corename[cur] == '.') || 140 (cn->used - cur == 2 && cn->corename[cur] == '.' 141 && cn->corename[cur+1] == '.')) 142 cn->corename[cur] = '!'; 143 144 /* 145 * Empty names are fishy and could be used to create a "//" in a 146 * corefile name, causing the coredump to happen one directory 147 * level too high. Enforce that all components of the core 148 * pattern are at least one character long. 149 */ 150 if (cn->used == cur) 151 ret = cn_printf(cn, "!"); 152 } 153 154 for (; cur < cn->used; ++cur) { 155 if (cn->corename[cur] == '/') 156 cn->corename[cur] = '!'; 157 } 158 return ret; 159 } 160 161 static int cn_print_exe_file(struct core_name *cn, bool name_only) 162 { 163 struct file *exe_file; 164 char *pathbuf, *path, *ptr; 165 int ret; 166 167 exe_file = get_mm_exe_file(current->mm); 168 if (!exe_file) 169 return cn_esc_printf(cn, "%s (path unknown)", current->comm); 170 171 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); 172 if (!pathbuf) { 173 ret = -ENOMEM; 174 goto put_exe_file; 175 } 176 177 path = file_path(exe_file, pathbuf, PATH_MAX); 178 if (IS_ERR(path)) { 179 ret = PTR_ERR(path); 180 goto free_buf; 181 } 182 183 if (name_only) { 184 ptr = strrchr(path, '/'); 185 if (ptr) 186 path = ptr + 1; 187 } 188 ret = cn_esc_printf(cn, "%s", path); 189 190 free_buf: 191 kfree(pathbuf); 192 put_exe_file: 193 fput(exe_file); 194 return ret; 195 } 196 197 /* format_corename will inspect the pattern parameter, and output a 198 * name into corename, which must have space for at least 199 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. 200 */ 201 static int format_corename(struct core_name *cn, struct coredump_params *cprm, 202 size_t **argv, int *argc) 203 { 204 const struct cred *cred = current_cred(); 205 const char *pat_ptr = core_pattern; 206 int ispipe = (*pat_ptr == '|'); 207 bool was_space = false; 208 int pid_in_pattern = 0; 209 int err = 0; 210 211 cn->used = 0; 212 cn->corename = NULL; 213 if (expand_corename(cn, core_name_size)) 214 return -ENOMEM; 215 cn->corename[0] = '\0'; 216 217 if (ispipe) { 218 int argvs = sizeof(core_pattern) / 2; 219 (*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL); 220 if (!(*argv)) 221 return -ENOMEM; 222 (*argv)[(*argc)++] = 0; 223 ++pat_ptr; 224 if (!(*pat_ptr)) 225 return -ENOMEM; 226 } 227 228 /* Repeat as long as we have more pattern to process and more output 229 space */ 230 while (*pat_ptr) { 231 /* 232 * Split on spaces before doing template expansion so that 233 * %e and %E don't get split if they have spaces in them 234 */ 235 if (ispipe) { 236 if (isspace(*pat_ptr)) { 237 if (cn->used != 0) 238 was_space = true; 239 pat_ptr++; 240 continue; 241 } else if (was_space) { 242 was_space = false; 243 err = cn_printf(cn, "%c", '\0'); 244 if (err) 245 return err; 246 (*argv)[(*argc)++] = cn->used; 247 } 248 } 249 if (*pat_ptr != '%') { 250 err = cn_printf(cn, "%c", *pat_ptr++); 251 } else { 252 switch (*++pat_ptr) { 253 /* single % at the end, drop that */ 254 case 0: 255 goto out; 256 /* Double percent, output one percent */ 257 case '%': 258 err = cn_printf(cn, "%c", '%'); 259 break; 260 /* pid */ 261 case 'p': 262 pid_in_pattern = 1; 263 err = cn_printf(cn, "%d", 264 task_tgid_vnr(current)); 265 break; 266 /* global pid */ 267 case 'P': 268 err = cn_printf(cn, "%d", 269 task_tgid_nr(current)); 270 break; 271 case 'i': 272 err = cn_printf(cn, "%d", 273 task_pid_vnr(current)); 274 break; 275 case 'I': 276 err = cn_printf(cn, "%d", 277 task_pid_nr(current)); 278 break; 279 /* uid */ 280 case 'u': 281 err = cn_printf(cn, "%u", 282 from_kuid(&init_user_ns, 283 cred->uid)); 284 break; 285 /* gid */ 286 case 'g': 287 err = cn_printf(cn, "%u", 288 from_kgid(&init_user_ns, 289 cred->gid)); 290 break; 291 case 'd': 292 err = cn_printf(cn, "%d", 293 __get_dumpable(cprm->mm_flags)); 294 break; 295 /* signal that caused the coredump */ 296 case 's': 297 err = cn_printf(cn, "%d", 298 cprm->siginfo->si_signo); 299 break; 300 /* UNIX time of coredump */ 301 case 't': { 302 time64_t time; 303 304 time = ktime_get_real_seconds(); 305 err = cn_printf(cn, "%lld", time); 306 break; 307 } 308 /* hostname */ 309 case 'h': 310 down_read(&uts_sem); 311 err = cn_esc_printf(cn, "%s", 312 utsname()->nodename); 313 up_read(&uts_sem); 314 break; 315 /* executable, could be changed by prctl PR_SET_NAME etc */ 316 case 'e': 317 err = cn_esc_printf(cn, "%s", current->comm); 318 break; 319 /* file name of executable */ 320 case 'f': 321 err = cn_print_exe_file(cn, true); 322 break; 323 case 'E': 324 err = cn_print_exe_file(cn, false); 325 break; 326 /* core limit size */ 327 case 'c': 328 err = cn_printf(cn, "%lu", 329 rlimit(RLIMIT_CORE)); 330 break; 331 default: 332 break; 333 } 334 ++pat_ptr; 335 } 336 337 if (err) 338 return err; 339 } 340 341 out: 342 /* Backward compatibility with core_uses_pid: 343 * 344 * If core_pattern does not include a %p (as is the default) 345 * and core_uses_pid is set, then .%pid will be appended to 346 * the filename. Do not do this for piped commands. */ 347 if (!ispipe && !pid_in_pattern && core_uses_pid) { 348 err = cn_printf(cn, ".%d", task_tgid_vnr(current)); 349 if (err) 350 return err; 351 } 352 return ispipe; 353 } 354 355 static int zap_process(struct task_struct *start, int exit_code) 356 { 357 struct task_struct *t; 358 int nr = 0; 359 360 /* Allow SIGKILL, see prepare_signal() */ 361 start->signal->flags = SIGNAL_GROUP_EXIT; 362 start->signal->group_exit_code = exit_code; 363 start->signal->group_stop_count = 0; 364 365 for_each_thread(start, t) { 366 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); 367 if (t != current && !(t->flags & PF_POSTCOREDUMP)) { 368 sigaddset(&t->pending.signal, SIGKILL); 369 signal_wake_up(t, 1); 370 nr++; 371 } 372 } 373 374 return nr; 375 } 376 377 static int zap_threads(struct task_struct *tsk, 378 struct core_state *core_state, int exit_code) 379 { 380 struct signal_struct *signal = tsk->signal; 381 int nr = -EAGAIN; 382 383 spin_lock_irq(&tsk->sighand->siglock); 384 if (!(signal->flags & SIGNAL_GROUP_EXIT) && !signal->group_exec_task) { 385 signal->core_state = core_state; 386 nr = zap_process(tsk, exit_code); 387 clear_tsk_thread_flag(tsk, TIF_SIGPENDING); 388 tsk->flags |= PF_DUMPCORE; 389 atomic_set(&core_state->nr_threads, nr); 390 } 391 spin_unlock_irq(&tsk->sighand->siglock); 392 return nr; 393 } 394 395 static int coredump_wait(int exit_code, struct core_state *core_state) 396 { 397 struct task_struct *tsk = current; 398 int core_waiters = -EBUSY; 399 400 init_completion(&core_state->startup); 401 core_state->dumper.task = tsk; 402 core_state->dumper.next = NULL; 403 404 core_waiters = zap_threads(tsk, core_state, exit_code); 405 if (core_waiters > 0) { 406 struct core_thread *ptr; 407 408 wait_for_completion_state(&core_state->startup, 409 TASK_UNINTERRUPTIBLE|TASK_FREEZABLE); 410 /* 411 * Wait for all the threads to become inactive, so that 412 * all the thread context (extended register state, like 413 * fpu etc) gets copied to the memory. 414 */ 415 ptr = core_state->dumper.next; 416 while (ptr != NULL) { 417 wait_task_inactive(ptr->task, TASK_ANY); 418 ptr = ptr->next; 419 } 420 } 421 422 return core_waiters; 423 } 424 425 static void coredump_finish(bool core_dumped) 426 { 427 struct core_thread *curr, *next; 428 struct task_struct *task; 429 430 spin_lock_irq(¤t->sighand->siglock); 431 if (core_dumped && !__fatal_signal_pending(current)) 432 current->signal->group_exit_code |= 0x80; 433 next = current->signal->core_state->dumper.next; 434 current->signal->core_state = NULL; 435 spin_unlock_irq(¤t->sighand->siglock); 436 437 while ((curr = next) != NULL) { 438 next = curr->next; 439 task = curr->task; 440 /* 441 * see coredump_task_exit(), curr->task must not see 442 * ->task == NULL before we read ->next. 443 */ 444 smp_mb(); 445 curr->task = NULL; 446 wake_up_process(task); 447 } 448 } 449 450 static bool dump_interrupted(void) 451 { 452 /* 453 * SIGKILL or freezing() interrupt the coredumping. Perhaps we 454 * can do try_to_freeze() and check __fatal_signal_pending(), 455 * but then we need to teach dump_write() to restart and clear 456 * TIF_SIGPENDING. 457 */ 458 return fatal_signal_pending(current) || freezing(current); 459 } 460 461 static void wait_for_dump_helpers(struct file *file) 462 { 463 struct pipe_inode_info *pipe = file->private_data; 464 465 pipe_lock(pipe); 466 pipe->readers++; 467 pipe->writers--; 468 wake_up_interruptible_sync(&pipe->rd_wait); 469 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 470 pipe_unlock(pipe); 471 472 /* 473 * We actually want wait_event_freezable() but then we need 474 * to clear TIF_SIGPENDING and improve dump_interrupted(). 475 */ 476 wait_event_interruptible(pipe->rd_wait, pipe->readers == 1); 477 478 pipe_lock(pipe); 479 pipe->readers--; 480 pipe->writers++; 481 pipe_unlock(pipe); 482 } 483 484 /* 485 * umh_pipe_setup 486 * helper function to customize the process used 487 * to collect the core in userspace. Specifically 488 * it sets up a pipe and installs it as fd 0 (stdin) 489 * for the process. Returns 0 on success, or 490 * PTR_ERR on failure. 491 * Note that it also sets the core limit to 1. This 492 * is a special value that we use to trap recursive 493 * core dumps 494 */ 495 static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) 496 { 497 struct file *files[2]; 498 struct coredump_params *cp = (struct coredump_params *)info->data; 499 int err = create_pipe_files(files, 0); 500 if (err) 501 return err; 502 503 cp->file = files[1]; 504 505 err = replace_fd(0, files[0], 0); 506 fput(files[0]); 507 /* and disallow core files too */ 508 current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1}; 509 510 return err; 511 } 512 513 void do_coredump(const kernel_siginfo_t *siginfo) 514 { 515 struct core_state core_state; 516 struct core_name cn; 517 struct mm_struct *mm = current->mm; 518 struct linux_binfmt * binfmt; 519 const struct cred *old_cred; 520 struct cred *cred; 521 int retval = 0; 522 int ispipe; 523 size_t *argv = NULL; 524 int argc = 0; 525 /* require nonrelative corefile path and be extra careful */ 526 bool need_suid_safe = false; 527 bool core_dumped = false; 528 static atomic_t core_dump_count = ATOMIC_INIT(0); 529 struct coredump_params cprm = { 530 .siginfo = siginfo, 531 .regs = signal_pt_regs(), 532 .limit = rlimit(RLIMIT_CORE), 533 /* 534 * We must use the same mm->flags while dumping core to avoid 535 * inconsistency of bit flags, since this flag is not protected 536 * by any locks. 537 */ 538 .mm_flags = mm->flags, 539 .vma_meta = NULL, 540 }; 541 542 audit_core_dumps(siginfo->si_signo); 543 544 binfmt = mm->binfmt; 545 if (!binfmt || !binfmt->core_dump) 546 goto fail; 547 if (!__get_dumpable(cprm.mm_flags)) 548 goto fail; 549 550 cred = prepare_creds(); 551 if (!cred) 552 goto fail; 553 /* 554 * We cannot trust fsuid as being the "true" uid of the process 555 * nor do we know its entire history. We only know it was tainted 556 * so we dump it as root in mode 2, and only into a controlled 557 * environment (pipe handler or fully qualified path). 558 */ 559 if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) { 560 /* Setuid core dump mode */ 561 cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */ 562 need_suid_safe = true; 563 } 564 565 retval = coredump_wait(siginfo->si_signo, &core_state); 566 if (retval < 0) 567 goto fail_creds; 568 569 old_cred = override_creds(cred); 570 571 ispipe = format_corename(&cn, &cprm, &argv, &argc); 572 573 if (ispipe) { 574 int argi; 575 int dump_count; 576 char **helper_argv; 577 struct subprocess_info *sub_info; 578 579 if (ispipe < 0) { 580 printk(KERN_WARNING "format_corename failed\n"); 581 printk(KERN_WARNING "Aborting core\n"); 582 goto fail_unlock; 583 } 584 585 if (cprm.limit == 1) { 586 /* See umh_pipe_setup() which sets RLIMIT_CORE = 1. 587 * 588 * Normally core limits are irrelevant to pipes, since 589 * we're not writing to the file system, but we use 590 * cprm.limit of 1 here as a special value, this is a 591 * consistent way to catch recursive crashes. 592 * We can still crash if the core_pattern binary sets 593 * RLIM_CORE = !1, but it runs as root, and can do 594 * lots of stupid things. 595 * 596 * Note that we use task_tgid_vnr here to grab the pid 597 * of the process group leader. That way we get the 598 * right pid if a thread in a multi-threaded 599 * core_pattern process dies. 600 */ 601 printk(KERN_WARNING 602 "Process %d(%s) has RLIMIT_CORE set to 1\n", 603 task_tgid_vnr(current), current->comm); 604 printk(KERN_WARNING "Aborting core\n"); 605 goto fail_unlock; 606 } 607 cprm.limit = RLIM_INFINITY; 608 609 dump_count = atomic_inc_return(&core_dump_count); 610 if (core_pipe_limit && (core_pipe_limit < dump_count)) { 611 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n", 612 task_tgid_vnr(current), current->comm); 613 printk(KERN_WARNING "Skipping core dump\n"); 614 goto fail_dropcount; 615 } 616 617 helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv), 618 GFP_KERNEL); 619 if (!helper_argv) { 620 printk(KERN_WARNING "%s failed to allocate memory\n", 621 __func__); 622 goto fail_dropcount; 623 } 624 for (argi = 0; argi < argc; argi++) 625 helper_argv[argi] = cn.corename + argv[argi]; 626 helper_argv[argi] = NULL; 627 628 retval = -ENOMEM; 629 sub_info = call_usermodehelper_setup(helper_argv[0], 630 helper_argv, NULL, GFP_KERNEL, 631 umh_pipe_setup, NULL, &cprm); 632 if (sub_info) 633 retval = call_usermodehelper_exec(sub_info, 634 UMH_WAIT_EXEC); 635 636 kfree(helper_argv); 637 if (retval) { 638 printk(KERN_INFO "Core dump to |%s pipe failed\n", 639 cn.corename); 640 goto close_fail; 641 } 642 } else { 643 struct user_namespace *mnt_userns; 644 struct inode *inode; 645 int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW | 646 O_LARGEFILE | O_EXCL; 647 648 if (cprm.limit < binfmt->min_coredump) 649 goto fail_unlock; 650 651 if (need_suid_safe && cn.corename[0] != '/') { 652 printk(KERN_WARNING "Pid %d(%s) can only dump core "\ 653 "to fully qualified path!\n", 654 task_tgid_vnr(current), current->comm); 655 printk(KERN_WARNING "Skipping core dump\n"); 656 goto fail_unlock; 657 } 658 659 /* 660 * Unlink the file if it exists unless this is a SUID 661 * binary - in that case, we're running around with root 662 * privs and don't want to unlink another user's coredump. 663 */ 664 if (!need_suid_safe) { 665 /* 666 * If it doesn't exist, that's fine. If there's some 667 * other problem, we'll catch it at the filp_open(). 668 */ 669 do_unlinkat(AT_FDCWD, getname_kernel(cn.corename)); 670 } 671 672 /* 673 * There is a race between unlinking and creating the 674 * file, but if that causes an EEXIST here, that's 675 * fine - another process raced with us while creating 676 * the corefile, and the other process won. To userspace, 677 * what matters is that at least one of the two processes 678 * writes its coredump successfully, not which one. 679 */ 680 if (need_suid_safe) { 681 /* 682 * Using user namespaces, normal user tasks can change 683 * their current->fs->root to point to arbitrary 684 * directories. Since the intention of the "only dump 685 * with a fully qualified path" rule is to control where 686 * coredumps may be placed using root privileges, 687 * current->fs->root must not be used. Instead, use the 688 * root directory of init_task. 689 */ 690 struct path root; 691 692 task_lock(&init_task); 693 get_fs_root(init_task.fs, &root); 694 task_unlock(&init_task); 695 cprm.file = file_open_root(&root, cn.corename, 696 open_flags, 0600); 697 path_put(&root); 698 } else { 699 cprm.file = filp_open(cn.corename, open_flags, 0600); 700 } 701 if (IS_ERR(cprm.file)) 702 goto fail_unlock; 703 704 inode = file_inode(cprm.file); 705 if (inode->i_nlink > 1) 706 goto close_fail; 707 if (d_unhashed(cprm.file->f_path.dentry)) 708 goto close_fail; 709 /* 710 * AK: actually i see no reason to not allow this for named 711 * pipes etc, but keep the previous behaviour for now. 712 */ 713 if (!S_ISREG(inode->i_mode)) 714 goto close_fail; 715 /* 716 * Don't dump core if the filesystem changed owner or mode 717 * of the file during file creation. This is an issue when 718 * a process dumps core while its cwd is e.g. on a vfat 719 * filesystem. 720 */ 721 mnt_userns = file_mnt_user_ns(cprm.file); 722 if (!uid_eq(i_uid_into_mnt(mnt_userns, inode), 723 current_fsuid())) { 724 pr_info_ratelimited("Core dump to %s aborted: cannot preserve file owner\n", 725 cn.corename); 726 goto close_fail; 727 } 728 if ((inode->i_mode & 0677) != 0600) { 729 pr_info_ratelimited("Core dump to %s aborted: cannot preserve file permissions\n", 730 cn.corename); 731 goto close_fail; 732 } 733 if (!(cprm.file->f_mode & FMODE_CAN_WRITE)) 734 goto close_fail; 735 if (do_truncate(mnt_userns, cprm.file->f_path.dentry, 736 0, 0, cprm.file)) 737 goto close_fail; 738 } 739 740 /* get us an unshared descriptor table; almost always a no-op */ 741 /* The cell spufs coredump code reads the file descriptor tables */ 742 retval = unshare_files(); 743 if (retval) 744 goto close_fail; 745 if (!dump_interrupted()) { 746 /* 747 * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would 748 * have this set to NULL. 749 */ 750 if (!cprm.file) { 751 pr_info("Core dump to |%s disabled\n", cn.corename); 752 goto close_fail; 753 } 754 if (!dump_vma_snapshot(&cprm)) 755 goto close_fail; 756 757 file_start_write(cprm.file); 758 core_dumped = binfmt->core_dump(&cprm); 759 /* 760 * Ensures that file size is big enough to contain the current 761 * file postion. This prevents gdb from complaining about 762 * a truncated file if the last "write" to the file was 763 * dump_skip. 764 */ 765 if (cprm.to_skip) { 766 cprm.to_skip--; 767 dump_emit(&cprm, "", 1); 768 } 769 file_end_write(cprm.file); 770 free_vma_snapshot(&cprm); 771 } 772 if (ispipe && core_pipe_limit) 773 wait_for_dump_helpers(cprm.file); 774 close_fail: 775 if (cprm.file) 776 filp_close(cprm.file, NULL); 777 fail_dropcount: 778 if (ispipe) 779 atomic_dec(&core_dump_count); 780 fail_unlock: 781 kfree(argv); 782 kfree(cn.corename); 783 coredump_finish(core_dumped); 784 revert_creds(old_cred); 785 fail_creds: 786 put_cred(cred); 787 fail: 788 return; 789 } 790 791 /* 792 * Core dumping helper functions. These are the only things you should 793 * do on a core-file: use only these functions to write out all the 794 * necessary info. 795 */ 796 static int __dump_emit(struct coredump_params *cprm, const void *addr, int nr) 797 { 798 struct file *file = cprm->file; 799 loff_t pos = file->f_pos; 800 ssize_t n; 801 if (cprm->written + nr > cprm->limit) 802 return 0; 803 804 805 if (dump_interrupted()) 806 return 0; 807 n = __kernel_write(file, addr, nr, &pos); 808 if (n != nr) 809 return 0; 810 file->f_pos = pos; 811 cprm->written += n; 812 cprm->pos += n; 813 814 return 1; 815 } 816 817 static int __dump_skip(struct coredump_params *cprm, size_t nr) 818 { 819 static char zeroes[PAGE_SIZE]; 820 struct file *file = cprm->file; 821 if (file->f_mode & FMODE_LSEEK) { 822 if (dump_interrupted() || 823 vfs_llseek(file, nr, SEEK_CUR) < 0) 824 return 0; 825 cprm->pos += nr; 826 return 1; 827 } else { 828 while (nr > PAGE_SIZE) { 829 if (!__dump_emit(cprm, zeroes, PAGE_SIZE)) 830 return 0; 831 nr -= PAGE_SIZE; 832 } 833 return __dump_emit(cprm, zeroes, nr); 834 } 835 } 836 837 static int dump_emit_page(struct coredump_params *cprm, struct page *page) 838 { 839 struct bio_vec bvec = { 840 .bv_page = page, 841 .bv_offset = 0, 842 .bv_len = PAGE_SIZE, 843 }; 844 struct iov_iter iter; 845 struct file *file = cprm->file; 846 loff_t pos; 847 ssize_t n; 848 849 if (cprm->to_skip) { 850 if (!__dump_skip(cprm, cprm->to_skip)) 851 return 0; 852 cprm->to_skip = 0; 853 } 854 if (cprm->written + PAGE_SIZE > cprm->limit) 855 return 0; 856 if (dump_interrupted()) 857 return 0; 858 pos = file->f_pos; 859 iov_iter_bvec(&iter, WRITE, &bvec, 1, PAGE_SIZE); 860 n = __kernel_write_iter(cprm->file, &iter, &pos); 861 if (n != PAGE_SIZE) 862 return 0; 863 file->f_pos = pos; 864 cprm->written += PAGE_SIZE; 865 cprm->pos += PAGE_SIZE; 866 867 return 1; 868 } 869 870 int dump_emit(struct coredump_params *cprm, const void *addr, int nr) 871 { 872 if (cprm->to_skip) { 873 if (!__dump_skip(cprm, cprm->to_skip)) 874 return 0; 875 cprm->to_skip = 0; 876 } 877 return __dump_emit(cprm, addr, nr); 878 } 879 EXPORT_SYMBOL(dump_emit); 880 881 void dump_skip_to(struct coredump_params *cprm, unsigned long pos) 882 { 883 cprm->to_skip = pos - cprm->pos; 884 } 885 EXPORT_SYMBOL(dump_skip_to); 886 887 void dump_skip(struct coredump_params *cprm, size_t nr) 888 { 889 cprm->to_skip += nr; 890 } 891 EXPORT_SYMBOL(dump_skip); 892 893 #ifdef CONFIG_ELF_CORE 894 int dump_user_range(struct coredump_params *cprm, unsigned long start, 895 unsigned long len) 896 { 897 unsigned long addr; 898 899 for (addr = start; addr < start + len; addr += PAGE_SIZE) { 900 struct page *page; 901 902 /* 903 * To avoid having to allocate page tables for virtual address 904 * ranges that have never been used yet, and also to make it 905 * easy to generate sparse core files, use a helper that returns 906 * NULL when encountering an empty page table entry that would 907 * otherwise have been filled with the zero page. 908 */ 909 page = get_dump_page(addr); 910 if (page) { 911 int stop = !dump_emit_page(cprm, page); 912 put_page(page); 913 if (stop) 914 return 0; 915 } else { 916 dump_skip(cprm, PAGE_SIZE); 917 } 918 } 919 return 1; 920 } 921 #endif 922 923 int dump_align(struct coredump_params *cprm, int align) 924 { 925 unsigned mod = (cprm->pos + cprm->to_skip) & (align - 1); 926 if (align & (align - 1)) 927 return 0; 928 if (mod) 929 cprm->to_skip += align - mod; 930 return 1; 931 } 932 EXPORT_SYMBOL(dump_align); 933 934 #ifdef CONFIG_SYSCTL 935 936 void validate_coredump_safety(void) 937 { 938 if (suid_dumpable == SUID_DUMP_ROOT && 939 core_pattern[0] != '/' && core_pattern[0] != '|') { 940 pr_warn( 941 "Unsafe core_pattern used with fs.suid_dumpable=2.\n" 942 "Pipe handler or fully qualified core dump path required.\n" 943 "Set kernel.core_pattern before fs.suid_dumpable.\n" 944 ); 945 } 946 } 947 948 static int proc_dostring_coredump(struct ctl_table *table, int write, 949 void *buffer, size_t *lenp, loff_t *ppos) 950 { 951 int error = proc_dostring(table, write, buffer, lenp, ppos); 952 953 if (!error) 954 validate_coredump_safety(); 955 return error; 956 } 957 958 static struct ctl_table coredump_sysctls[] = { 959 { 960 .procname = "core_uses_pid", 961 .data = &core_uses_pid, 962 .maxlen = sizeof(int), 963 .mode = 0644, 964 .proc_handler = proc_dointvec, 965 }, 966 { 967 .procname = "core_pattern", 968 .data = core_pattern, 969 .maxlen = CORENAME_MAX_SIZE, 970 .mode = 0644, 971 .proc_handler = proc_dostring_coredump, 972 }, 973 { 974 .procname = "core_pipe_limit", 975 .data = &core_pipe_limit, 976 .maxlen = sizeof(unsigned int), 977 .mode = 0644, 978 .proc_handler = proc_dointvec, 979 }, 980 { } 981 }; 982 983 static int __init init_fs_coredump_sysctls(void) 984 { 985 register_sysctl_init("kernel", coredump_sysctls); 986 return 0; 987 } 988 fs_initcall(init_fs_coredump_sysctls); 989 #endif /* CONFIG_SYSCTL */ 990 991 /* 992 * The purpose of always_dump_vma() is to make sure that special kernel mappings 993 * that are useful for post-mortem analysis are included in every core dump. 994 * In that way we ensure that the core dump is fully interpretable later 995 * without matching up the same kernel and hardware config to see what PC values 996 * meant. These special mappings include - vDSO, vsyscall, and other 997 * architecture specific mappings 998 */ 999 static bool always_dump_vma(struct vm_area_struct *vma) 1000 { 1001 /* Any vsyscall mappings? */ 1002 if (vma == get_gate_vma(vma->vm_mm)) 1003 return true; 1004 1005 /* 1006 * Assume that all vmas with a .name op should always be dumped. 1007 * If this changes, a new vm_ops field can easily be added. 1008 */ 1009 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma)) 1010 return true; 1011 1012 /* 1013 * arch_vma_name() returns non-NULL for special architecture mappings, 1014 * such as vDSO sections. 1015 */ 1016 if (arch_vma_name(vma)) 1017 return true; 1018 1019 return false; 1020 } 1021 1022 #define DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER 1 1023 1024 /* 1025 * Decide how much of @vma's contents should be included in a core dump. 1026 */ 1027 static unsigned long vma_dump_size(struct vm_area_struct *vma, 1028 unsigned long mm_flags) 1029 { 1030 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type)) 1031 1032 /* always dump the vdso and vsyscall sections */ 1033 if (always_dump_vma(vma)) 1034 goto whole; 1035 1036 if (vma->vm_flags & VM_DONTDUMP) 1037 return 0; 1038 1039 /* support for DAX */ 1040 if (vma_is_dax(vma)) { 1041 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED)) 1042 goto whole; 1043 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE)) 1044 goto whole; 1045 return 0; 1046 } 1047 1048 /* Hugetlb memory check */ 1049 if (is_vm_hugetlb_page(vma)) { 1050 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED)) 1051 goto whole; 1052 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE)) 1053 goto whole; 1054 return 0; 1055 } 1056 1057 /* Do not dump I/O mapped devices or special mappings */ 1058 if (vma->vm_flags & VM_IO) 1059 return 0; 1060 1061 /* By default, dump shared memory if mapped from an anonymous file. */ 1062 if (vma->vm_flags & VM_SHARED) { 1063 if (file_inode(vma->vm_file)->i_nlink == 0 ? 1064 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED)) 1065 goto whole; 1066 return 0; 1067 } 1068 1069 /* Dump segments that have been written to. */ 1070 if ((!IS_ENABLED(CONFIG_MMU) || vma->anon_vma) && FILTER(ANON_PRIVATE)) 1071 goto whole; 1072 if (vma->vm_file == NULL) 1073 return 0; 1074 1075 if (FILTER(MAPPED_PRIVATE)) 1076 goto whole; 1077 1078 /* 1079 * If this is the beginning of an executable file mapping, 1080 * dump the first page to aid in determining what was mapped here. 1081 */ 1082 if (FILTER(ELF_HEADERS) && 1083 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) { 1084 if ((READ_ONCE(file_inode(vma->vm_file)->i_mode) & 0111) != 0) 1085 return PAGE_SIZE; 1086 1087 /* 1088 * ELF libraries aren't always executable. 1089 * We'll want to check whether the mapping starts with the ELF 1090 * magic, but not now - we're holding the mmap lock, 1091 * so copy_from_user() doesn't work here. 1092 * Use a placeholder instead, and fix it up later in 1093 * dump_vma_snapshot(). 1094 */ 1095 return DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER; 1096 } 1097 1098 #undef FILTER 1099 1100 return 0; 1101 1102 whole: 1103 return vma->vm_end - vma->vm_start; 1104 } 1105 1106 /* 1107 * Helper function for iterating across a vma list. It ensures that the caller 1108 * will visit `gate_vma' prior to terminating the search. 1109 */ 1110 static struct vm_area_struct *coredump_next_vma(struct ma_state *mas, 1111 struct vm_area_struct *vma, 1112 struct vm_area_struct *gate_vma) 1113 { 1114 if (gate_vma && (vma == gate_vma)) 1115 return NULL; 1116 1117 vma = mas_next(mas, ULONG_MAX); 1118 if (vma) 1119 return vma; 1120 return gate_vma; 1121 } 1122 1123 static void free_vma_snapshot(struct coredump_params *cprm) 1124 { 1125 if (cprm->vma_meta) { 1126 int i; 1127 for (i = 0; i < cprm->vma_count; i++) { 1128 struct file *file = cprm->vma_meta[i].file; 1129 if (file) 1130 fput(file); 1131 } 1132 kvfree(cprm->vma_meta); 1133 cprm->vma_meta = NULL; 1134 } 1135 } 1136 1137 /* 1138 * Under the mmap_lock, take a snapshot of relevant information about the task's 1139 * VMAs. 1140 */ 1141 static bool dump_vma_snapshot(struct coredump_params *cprm) 1142 { 1143 struct vm_area_struct *gate_vma, *vma = NULL; 1144 struct mm_struct *mm = current->mm; 1145 MA_STATE(mas, &mm->mm_mt, 0, 0); 1146 int i = 0; 1147 1148 /* 1149 * Once the stack expansion code is fixed to not change VMA bounds 1150 * under mmap_lock in read mode, this can be changed to take the 1151 * mmap_lock in read mode. 1152 */ 1153 if (mmap_write_lock_killable(mm)) 1154 return false; 1155 1156 cprm->vma_data_size = 0; 1157 gate_vma = get_gate_vma(mm); 1158 cprm->vma_count = mm->map_count + (gate_vma ? 1 : 0); 1159 1160 cprm->vma_meta = kvmalloc_array(cprm->vma_count, sizeof(*cprm->vma_meta), GFP_KERNEL); 1161 if (!cprm->vma_meta) { 1162 mmap_write_unlock(mm); 1163 return false; 1164 } 1165 1166 while ((vma = coredump_next_vma(&mas, vma, gate_vma)) != NULL) { 1167 struct core_vma_metadata *m = cprm->vma_meta + i; 1168 1169 m->start = vma->vm_start; 1170 m->end = vma->vm_end; 1171 m->flags = vma->vm_flags; 1172 m->dump_size = vma_dump_size(vma, cprm->mm_flags); 1173 m->pgoff = vma->vm_pgoff; 1174 m->file = vma->vm_file; 1175 if (m->file) 1176 get_file(m->file); 1177 i++; 1178 } 1179 1180 mmap_write_unlock(mm); 1181 1182 for (i = 0; i < cprm->vma_count; i++) { 1183 struct core_vma_metadata *m = cprm->vma_meta + i; 1184 1185 if (m->dump_size == DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER) { 1186 char elfmag[SELFMAG]; 1187 1188 if (copy_from_user(elfmag, (void __user *)m->start, SELFMAG) || 1189 memcmp(elfmag, ELFMAG, SELFMAG) != 0) { 1190 m->dump_size = 0; 1191 } else { 1192 m->dump_size = PAGE_SIZE; 1193 } 1194 } 1195 1196 cprm->vma_data_size += m->dump_size; 1197 } 1198 1199 return true; 1200 } 1201