xref: /openbmc/linux/fs/coredump.c (revision 8603b6f5)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/slab.h>
3 #include <linux/file.h>
4 #include <linux/fdtable.h>
5 #include <linux/freezer.h>
6 #include <linux/mm.h>
7 #include <linux/stat.h>
8 #include <linux/fcntl.h>
9 #include <linux/swap.h>
10 #include <linux/ctype.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/pagemap.h>
14 #include <linux/perf_event.h>
15 #include <linux/highmem.h>
16 #include <linux/spinlock.h>
17 #include <linux/key.h>
18 #include <linux/personality.h>
19 #include <linux/binfmts.h>
20 #include <linux/coredump.h>
21 #include <linux/sched/coredump.h>
22 #include <linux/sched/signal.h>
23 #include <linux/sched/task_stack.h>
24 #include <linux/utsname.h>
25 #include <linux/pid_namespace.h>
26 #include <linux/module.h>
27 #include <linux/namei.h>
28 #include <linux/mount.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/tsacct_kern.h>
32 #include <linux/cn_proc.h>
33 #include <linux/audit.h>
34 #include <linux/kmod.h>
35 #include <linux/fsnotify.h>
36 #include <linux/fs_struct.h>
37 #include <linux/pipe_fs_i.h>
38 #include <linux/oom.h>
39 #include <linux/compat.h>
40 #include <linux/fs.h>
41 #include <linux/path.h>
42 #include <linux/timekeeping.h>
43 #include <linux/sysctl.h>
44 #include <linux/elf.h>
45 
46 #include <linux/uaccess.h>
47 #include <asm/mmu_context.h>
48 #include <asm/tlb.h>
49 #include <asm/exec.h>
50 
51 #include <trace/events/task.h>
52 #include "internal.h"
53 
54 #include <trace/events/sched.h>
55 
56 static bool dump_vma_snapshot(struct coredump_params *cprm);
57 static void free_vma_snapshot(struct coredump_params *cprm);
58 
59 static int core_uses_pid;
60 static unsigned int core_pipe_limit;
61 static char core_pattern[CORENAME_MAX_SIZE] = "core";
62 static int core_name_size = CORENAME_MAX_SIZE;
63 
64 struct core_name {
65 	char *corename;
66 	int used, size;
67 };
68 
69 static int expand_corename(struct core_name *cn, int size)
70 {
71 	char *corename = krealloc(cn->corename, size, GFP_KERNEL);
72 
73 	if (!corename)
74 		return -ENOMEM;
75 
76 	if (size > core_name_size) /* racy but harmless */
77 		core_name_size = size;
78 
79 	cn->size = ksize(corename);
80 	cn->corename = corename;
81 	return 0;
82 }
83 
84 static __printf(2, 0) int cn_vprintf(struct core_name *cn, const char *fmt,
85 				     va_list arg)
86 {
87 	int free, need;
88 	va_list arg_copy;
89 
90 again:
91 	free = cn->size - cn->used;
92 
93 	va_copy(arg_copy, arg);
94 	need = vsnprintf(cn->corename + cn->used, free, fmt, arg_copy);
95 	va_end(arg_copy);
96 
97 	if (need < free) {
98 		cn->used += need;
99 		return 0;
100 	}
101 
102 	if (!expand_corename(cn, cn->size + need - free + 1))
103 		goto again;
104 
105 	return -ENOMEM;
106 }
107 
108 static __printf(2, 3) int cn_printf(struct core_name *cn, const char *fmt, ...)
109 {
110 	va_list arg;
111 	int ret;
112 
113 	va_start(arg, fmt);
114 	ret = cn_vprintf(cn, fmt, arg);
115 	va_end(arg);
116 
117 	return ret;
118 }
119 
120 static __printf(2, 3)
121 int cn_esc_printf(struct core_name *cn, const char *fmt, ...)
122 {
123 	int cur = cn->used;
124 	va_list arg;
125 	int ret;
126 
127 	va_start(arg, fmt);
128 	ret = cn_vprintf(cn, fmt, arg);
129 	va_end(arg);
130 
131 	if (ret == 0) {
132 		/*
133 		 * Ensure that this coredump name component can't cause the
134 		 * resulting corefile path to consist of a ".." or ".".
135 		 */
136 		if ((cn->used - cur == 1 && cn->corename[cur] == '.') ||
137 				(cn->used - cur == 2 && cn->corename[cur] == '.'
138 				&& cn->corename[cur+1] == '.'))
139 			cn->corename[cur] = '!';
140 
141 		/*
142 		 * Empty names are fishy and could be used to create a "//" in a
143 		 * corefile name, causing the coredump to happen one directory
144 		 * level too high. Enforce that all components of the core
145 		 * pattern are at least one character long.
146 		 */
147 		if (cn->used == cur)
148 			ret = cn_printf(cn, "!");
149 	}
150 
151 	for (; cur < cn->used; ++cur) {
152 		if (cn->corename[cur] == '/')
153 			cn->corename[cur] = '!';
154 	}
155 	return ret;
156 }
157 
158 static int cn_print_exe_file(struct core_name *cn, bool name_only)
159 {
160 	struct file *exe_file;
161 	char *pathbuf, *path, *ptr;
162 	int ret;
163 
164 	exe_file = get_mm_exe_file(current->mm);
165 	if (!exe_file)
166 		return cn_esc_printf(cn, "%s (path unknown)", current->comm);
167 
168 	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
169 	if (!pathbuf) {
170 		ret = -ENOMEM;
171 		goto put_exe_file;
172 	}
173 
174 	path = file_path(exe_file, pathbuf, PATH_MAX);
175 	if (IS_ERR(path)) {
176 		ret = PTR_ERR(path);
177 		goto free_buf;
178 	}
179 
180 	if (name_only) {
181 		ptr = strrchr(path, '/');
182 		if (ptr)
183 			path = ptr + 1;
184 	}
185 	ret = cn_esc_printf(cn, "%s", path);
186 
187 free_buf:
188 	kfree(pathbuf);
189 put_exe_file:
190 	fput(exe_file);
191 	return ret;
192 }
193 
194 /* format_corename will inspect the pattern parameter, and output a
195  * name into corename, which must have space for at least
196  * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
197  */
198 static int format_corename(struct core_name *cn, struct coredump_params *cprm,
199 			   size_t **argv, int *argc)
200 {
201 	const struct cred *cred = current_cred();
202 	const char *pat_ptr = core_pattern;
203 	int ispipe = (*pat_ptr == '|');
204 	bool was_space = false;
205 	int pid_in_pattern = 0;
206 	int err = 0;
207 
208 	cn->used = 0;
209 	cn->corename = NULL;
210 	if (expand_corename(cn, core_name_size))
211 		return -ENOMEM;
212 	cn->corename[0] = '\0';
213 
214 	if (ispipe) {
215 		int argvs = sizeof(core_pattern) / 2;
216 		(*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL);
217 		if (!(*argv))
218 			return -ENOMEM;
219 		(*argv)[(*argc)++] = 0;
220 		++pat_ptr;
221 		if (!(*pat_ptr))
222 			return -ENOMEM;
223 	}
224 
225 	/* Repeat as long as we have more pattern to process and more output
226 	   space */
227 	while (*pat_ptr) {
228 		/*
229 		 * Split on spaces before doing template expansion so that
230 		 * %e and %E don't get split if they have spaces in them
231 		 */
232 		if (ispipe) {
233 			if (isspace(*pat_ptr)) {
234 				if (cn->used != 0)
235 					was_space = true;
236 				pat_ptr++;
237 				continue;
238 			} else if (was_space) {
239 				was_space = false;
240 				err = cn_printf(cn, "%c", '\0');
241 				if (err)
242 					return err;
243 				(*argv)[(*argc)++] = cn->used;
244 			}
245 		}
246 		if (*pat_ptr != '%') {
247 			err = cn_printf(cn, "%c", *pat_ptr++);
248 		} else {
249 			switch (*++pat_ptr) {
250 			/* single % at the end, drop that */
251 			case 0:
252 				goto out;
253 			/* Double percent, output one percent */
254 			case '%':
255 				err = cn_printf(cn, "%c", '%');
256 				break;
257 			/* pid */
258 			case 'p':
259 				pid_in_pattern = 1;
260 				err = cn_printf(cn, "%d",
261 					      task_tgid_vnr(current));
262 				break;
263 			/* global pid */
264 			case 'P':
265 				err = cn_printf(cn, "%d",
266 					      task_tgid_nr(current));
267 				break;
268 			case 'i':
269 				err = cn_printf(cn, "%d",
270 					      task_pid_vnr(current));
271 				break;
272 			case 'I':
273 				err = cn_printf(cn, "%d",
274 					      task_pid_nr(current));
275 				break;
276 			/* uid */
277 			case 'u':
278 				err = cn_printf(cn, "%u",
279 						from_kuid(&init_user_ns,
280 							  cred->uid));
281 				break;
282 			/* gid */
283 			case 'g':
284 				err = cn_printf(cn, "%u",
285 						from_kgid(&init_user_ns,
286 							  cred->gid));
287 				break;
288 			case 'd':
289 				err = cn_printf(cn, "%d",
290 					__get_dumpable(cprm->mm_flags));
291 				break;
292 			/* signal that caused the coredump */
293 			case 's':
294 				err = cn_printf(cn, "%d",
295 						cprm->siginfo->si_signo);
296 				break;
297 			/* UNIX time of coredump */
298 			case 't': {
299 				time64_t time;
300 
301 				time = ktime_get_real_seconds();
302 				err = cn_printf(cn, "%lld", time);
303 				break;
304 			}
305 			/* hostname */
306 			case 'h':
307 				down_read(&uts_sem);
308 				err = cn_esc_printf(cn, "%s",
309 					      utsname()->nodename);
310 				up_read(&uts_sem);
311 				break;
312 			/* executable, could be changed by prctl PR_SET_NAME etc */
313 			case 'e':
314 				err = cn_esc_printf(cn, "%s", current->comm);
315 				break;
316 			/* file name of executable */
317 			case 'f':
318 				err = cn_print_exe_file(cn, true);
319 				break;
320 			case 'E':
321 				err = cn_print_exe_file(cn, false);
322 				break;
323 			/* core limit size */
324 			case 'c':
325 				err = cn_printf(cn, "%lu",
326 					      rlimit(RLIMIT_CORE));
327 				break;
328 			/* CPU the task ran on */
329 			case 'C':
330 				err = cn_printf(cn, "%d", cprm->cpu);
331 				break;
332 			default:
333 				break;
334 			}
335 			++pat_ptr;
336 		}
337 
338 		if (err)
339 			return err;
340 	}
341 
342 out:
343 	/* Backward compatibility with core_uses_pid:
344 	 *
345 	 * If core_pattern does not include a %p (as is the default)
346 	 * and core_uses_pid is set, then .%pid will be appended to
347 	 * the filename. Do not do this for piped commands. */
348 	if (!ispipe && !pid_in_pattern && core_uses_pid) {
349 		err = cn_printf(cn, ".%d", task_tgid_vnr(current));
350 		if (err)
351 			return err;
352 	}
353 	return ispipe;
354 }
355 
356 static int zap_process(struct task_struct *start, int exit_code)
357 {
358 	struct task_struct *t;
359 	int nr = 0;
360 
361 	/* Allow SIGKILL, see prepare_signal() */
362 	start->signal->flags = SIGNAL_GROUP_EXIT;
363 	start->signal->group_exit_code = exit_code;
364 	start->signal->group_stop_count = 0;
365 
366 	for_each_thread(start, t) {
367 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
368 		if (t != current && !(t->flags & PF_POSTCOREDUMP)) {
369 			sigaddset(&t->pending.signal, SIGKILL);
370 			signal_wake_up(t, 1);
371 			nr++;
372 		}
373 	}
374 
375 	return nr;
376 }
377 
378 static int zap_threads(struct task_struct *tsk,
379 			struct core_state *core_state, int exit_code)
380 {
381 	struct signal_struct *signal = tsk->signal;
382 	int nr = -EAGAIN;
383 
384 	spin_lock_irq(&tsk->sighand->siglock);
385 	if (!(signal->flags & SIGNAL_GROUP_EXIT) && !signal->group_exec_task) {
386 		signal->core_state = core_state;
387 		nr = zap_process(tsk, exit_code);
388 		clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
389 		tsk->flags |= PF_DUMPCORE;
390 		atomic_set(&core_state->nr_threads, nr);
391 	}
392 	spin_unlock_irq(&tsk->sighand->siglock);
393 	return nr;
394 }
395 
396 static int coredump_wait(int exit_code, struct core_state *core_state)
397 {
398 	struct task_struct *tsk = current;
399 	int core_waiters = -EBUSY;
400 
401 	init_completion(&core_state->startup);
402 	core_state->dumper.task = tsk;
403 	core_state->dumper.next = NULL;
404 
405 	core_waiters = zap_threads(tsk, core_state, exit_code);
406 	if (core_waiters > 0) {
407 		struct core_thread *ptr;
408 
409 		wait_for_completion_state(&core_state->startup,
410 					  TASK_UNINTERRUPTIBLE|TASK_FREEZABLE);
411 		/*
412 		 * Wait for all the threads to become inactive, so that
413 		 * all the thread context (extended register state, like
414 		 * fpu etc) gets copied to the memory.
415 		 */
416 		ptr = core_state->dumper.next;
417 		while (ptr != NULL) {
418 			wait_task_inactive(ptr->task, TASK_ANY);
419 			ptr = ptr->next;
420 		}
421 	}
422 
423 	return core_waiters;
424 }
425 
426 static void coredump_finish(bool core_dumped)
427 {
428 	struct core_thread *curr, *next;
429 	struct task_struct *task;
430 
431 	spin_lock_irq(&current->sighand->siglock);
432 	if (core_dumped && !__fatal_signal_pending(current))
433 		current->signal->group_exit_code |= 0x80;
434 	next = current->signal->core_state->dumper.next;
435 	current->signal->core_state = NULL;
436 	spin_unlock_irq(&current->sighand->siglock);
437 
438 	while ((curr = next) != NULL) {
439 		next = curr->next;
440 		task = curr->task;
441 		/*
442 		 * see coredump_task_exit(), curr->task must not see
443 		 * ->task == NULL before we read ->next.
444 		 */
445 		smp_mb();
446 		curr->task = NULL;
447 		wake_up_process(task);
448 	}
449 }
450 
451 static bool dump_interrupted(void)
452 {
453 	/*
454 	 * SIGKILL or freezing() interrupt the coredumping. Perhaps we
455 	 * can do try_to_freeze() and check __fatal_signal_pending(),
456 	 * but then we need to teach dump_write() to restart and clear
457 	 * TIF_SIGPENDING.
458 	 */
459 	return fatal_signal_pending(current) || freezing(current);
460 }
461 
462 static void wait_for_dump_helpers(struct file *file)
463 {
464 	struct pipe_inode_info *pipe = file->private_data;
465 
466 	pipe_lock(pipe);
467 	pipe->readers++;
468 	pipe->writers--;
469 	wake_up_interruptible_sync(&pipe->rd_wait);
470 	kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
471 	pipe_unlock(pipe);
472 
473 	/*
474 	 * We actually want wait_event_freezable() but then we need
475 	 * to clear TIF_SIGPENDING and improve dump_interrupted().
476 	 */
477 	wait_event_interruptible(pipe->rd_wait, pipe->readers == 1);
478 
479 	pipe_lock(pipe);
480 	pipe->readers--;
481 	pipe->writers++;
482 	pipe_unlock(pipe);
483 }
484 
485 /*
486  * umh_pipe_setup
487  * helper function to customize the process used
488  * to collect the core in userspace.  Specifically
489  * it sets up a pipe and installs it as fd 0 (stdin)
490  * for the process.  Returns 0 on success, or
491  * PTR_ERR on failure.
492  * Note that it also sets the core limit to 1.  This
493  * is a special value that we use to trap recursive
494  * core dumps
495  */
496 static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
497 {
498 	struct file *files[2];
499 	struct coredump_params *cp = (struct coredump_params *)info->data;
500 	int err = create_pipe_files(files, 0);
501 	if (err)
502 		return err;
503 
504 	cp->file = files[1];
505 
506 	err = replace_fd(0, files[0], 0);
507 	fput(files[0]);
508 	/* and disallow core files too */
509 	current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
510 
511 	return err;
512 }
513 
514 void do_coredump(const kernel_siginfo_t *siginfo)
515 {
516 	struct core_state core_state;
517 	struct core_name cn;
518 	struct mm_struct *mm = current->mm;
519 	struct linux_binfmt * binfmt;
520 	const struct cred *old_cred;
521 	struct cred *cred;
522 	int retval = 0;
523 	int ispipe;
524 	size_t *argv = NULL;
525 	int argc = 0;
526 	/* require nonrelative corefile path and be extra careful */
527 	bool need_suid_safe = false;
528 	bool core_dumped = false;
529 	static atomic_t core_dump_count = ATOMIC_INIT(0);
530 	struct coredump_params cprm = {
531 		.siginfo = siginfo,
532 		.regs = signal_pt_regs(),
533 		.limit = rlimit(RLIMIT_CORE),
534 		/*
535 		 * We must use the same mm->flags while dumping core to avoid
536 		 * inconsistency of bit flags, since this flag is not protected
537 		 * by any locks.
538 		 */
539 		.mm_flags = mm->flags,
540 		.vma_meta = NULL,
541 		.cpu = raw_smp_processor_id(),
542 	};
543 
544 	audit_core_dumps(siginfo->si_signo);
545 
546 	binfmt = mm->binfmt;
547 	if (!binfmt || !binfmt->core_dump)
548 		goto fail;
549 	if (!__get_dumpable(cprm.mm_flags))
550 		goto fail;
551 
552 	cred = prepare_creds();
553 	if (!cred)
554 		goto fail;
555 	/*
556 	 * We cannot trust fsuid as being the "true" uid of the process
557 	 * nor do we know its entire history. We only know it was tainted
558 	 * so we dump it as root in mode 2, and only into a controlled
559 	 * environment (pipe handler or fully qualified path).
560 	 */
561 	if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
562 		/* Setuid core dump mode */
563 		cred->fsuid = GLOBAL_ROOT_UID;	/* Dump root private */
564 		need_suid_safe = true;
565 	}
566 
567 	retval = coredump_wait(siginfo->si_signo, &core_state);
568 	if (retval < 0)
569 		goto fail_creds;
570 
571 	old_cred = override_creds(cred);
572 
573 	ispipe = format_corename(&cn, &cprm, &argv, &argc);
574 
575 	if (ispipe) {
576 		int argi;
577 		int dump_count;
578 		char **helper_argv;
579 		struct subprocess_info *sub_info;
580 
581 		if (ispipe < 0) {
582 			printk(KERN_WARNING "format_corename failed\n");
583 			printk(KERN_WARNING "Aborting core\n");
584 			goto fail_unlock;
585 		}
586 
587 		if (cprm.limit == 1) {
588 			/* See umh_pipe_setup() which sets RLIMIT_CORE = 1.
589 			 *
590 			 * Normally core limits are irrelevant to pipes, since
591 			 * we're not writing to the file system, but we use
592 			 * cprm.limit of 1 here as a special value, this is a
593 			 * consistent way to catch recursive crashes.
594 			 * We can still crash if the core_pattern binary sets
595 			 * RLIM_CORE = !1, but it runs as root, and can do
596 			 * lots of stupid things.
597 			 *
598 			 * Note that we use task_tgid_vnr here to grab the pid
599 			 * of the process group leader.  That way we get the
600 			 * right pid if a thread in a multi-threaded
601 			 * core_pattern process dies.
602 			 */
603 			printk(KERN_WARNING
604 				"Process %d(%s) has RLIMIT_CORE set to 1\n",
605 				task_tgid_vnr(current), current->comm);
606 			printk(KERN_WARNING "Aborting core\n");
607 			goto fail_unlock;
608 		}
609 		cprm.limit = RLIM_INFINITY;
610 
611 		dump_count = atomic_inc_return(&core_dump_count);
612 		if (core_pipe_limit && (core_pipe_limit < dump_count)) {
613 			printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
614 			       task_tgid_vnr(current), current->comm);
615 			printk(KERN_WARNING "Skipping core dump\n");
616 			goto fail_dropcount;
617 		}
618 
619 		helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv),
620 					    GFP_KERNEL);
621 		if (!helper_argv) {
622 			printk(KERN_WARNING "%s failed to allocate memory\n",
623 			       __func__);
624 			goto fail_dropcount;
625 		}
626 		for (argi = 0; argi < argc; argi++)
627 			helper_argv[argi] = cn.corename + argv[argi];
628 		helper_argv[argi] = NULL;
629 
630 		retval = -ENOMEM;
631 		sub_info = call_usermodehelper_setup(helper_argv[0],
632 						helper_argv, NULL, GFP_KERNEL,
633 						umh_pipe_setup, NULL, &cprm);
634 		if (sub_info)
635 			retval = call_usermodehelper_exec(sub_info,
636 							  UMH_WAIT_EXEC);
637 
638 		kfree(helper_argv);
639 		if (retval) {
640 			printk(KERN_INFO "Core dump to |%s pipe failed\n",
641 			       cn.corename);
642 			goto close_fail;
643 		}
644 	} else {
645 		struct user_namespace *mnt_userns;
646 		struct inode *inode;
647 		int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
648 				 O_LARGEFILE | O_EXCL;
649 
650 		if (cprm.limit < binfmt->min_coredump)
651 			goto fail_unlock;
652 
653 		if (need_suid_safe && cn.corename[0] != '/') {
654 			printk(KERN_WARNING "Pid %d(%s) can only dump core "\
655 				"to fully qualified path!\n",
656 				task_tgid_vnr(current), current->comm);
657 			printk(KERN_WARNING "Skipping core dump\n");
658 			goto fail_unlock;
659 		}
660 
661 		/*
662 		 * Unlink the file if it exists unless this is a SUID
663 		 * binary - in that case, we're running around with root
664 		 * privs and don't want to unlink another user's coredump.
665 		 */
666 		if (!need_suid_safe) {
667 			/*
668 			 * If it doesn't exist, that's fine. If there's some
669 			 * other problem, we'll catch it at the filp_open().
670 			 */
671 			do_unlinkat(AT_FDCWD, getname_kernel(cn.corename));
672 		}
673 
674 		/*
675 		 * There is a race between unlinking and creating the
676 		 * file, but if that causes an EEXIST here, that's
677 		 * fine - another process raced with us while creating
678 		 * the corefile, and the other process won. To userspace,
679 		 * what matters is that at least one of the two processes
680 		 * writes its coredump successfully, not which one.
681 		 */
682 		if (need_suid_safe) {
683 			/*
684 			 * Using user namespaces, normal user tasks can change
685 			 * their current->fs->root to point to arbitrary
686 			 * directories. Since the intention of the "only dump
687 			 * with a fully qualified path" rule is to control where
688 			 * coredumps may be placed using root privileges,
689 			 * current->fs->root must not be used. Instead, use the
690 			 * root directory of init_task.
691 			 */
692 			struct path root;
693 
694 			task_lock(&init_task);
695 			get_fs_root(init_task.fs, &root);
696 			task_unlock(&init_task);
697 			cprm.file = file_open_root(&root, cn.corename,
698 						   open_flags, 0600);
699 			path_put(&root);
700 		} else {
701 			cprm.file = filp_open(cn.corename, open_flags, 0600);
702 		}
703 		if (IS_ERR(cprm.file))
704 			goto fail_unlock;
705 
706 		inode = file_inode(cprm.file);
707 		if (inode->i_nlink > 1)
708 			goto close_fail;
709 		if (d_unhashed(cprm.file->f_path.dentry))
710 			goto close_fail;
711 		/*
712 		 * AK: actually i see no reason to not allow this for named
713 		 * pipes etc, but keep the previous behaviour for now.
714 		 */
715 		if (!S_ISREG(inode->i_mode))
716 			goto close_fail;
717 		/*
718 		 * Don't dump core if the filesystem changed owner or mode
719 		 * of the file during file creation. This is an issue when
720 		 * a process dumps core while its cwd is e.g. on a vfat
721 		 * filesystem.
722 		 */
723 		mnt_userns = file_mnt_user_ns(cprm.file);
724 		if (!uid_eq(i_uid_into_mnt(mnt_userns, inode),
725 			    current_fsuid())) {
726 			pr_info_ratelimited("Core dump to %s aborted: cannot preserve file owner\n",
727 					    cn.corename);
728 			goto close_fail;
729 		}
730 		if ((inode->i_mode & 0677) != 0600) {
731 			pr_info_ratelimited("Core dump to %s aborted: cannot preserve file permissions\n",
732 					    cn.corename);
733 			goto close_fail;
734 		}
735 		if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
736 			goto close_fail;
737 		if (do_truncate(mnt_userns, cprm.file->f_path.dentry,
738 				0, 0, cprm.file))
739 			goto close_fail;
740 	}
741 
742 	/* get us an unshared descriptor table; almost always a no-op */
743 	/* The cell spufs coredump code reads the file descriptor tables */
744 	retval = unshare_files();
745 	if (retval)
746 		goto close_fail;
747 	if (!dump_interrupted()) {
748 		/*
749 		 * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would
750 		 * have this set to NULL.
751 		 */
752 		if (!cprm.file) {
753 			pr_info("Core dump to |%s disabled\n", cn.corename);
754 			goto close_fail;
755 		}
756 		if (!dump_vma_snapshot(&cprm))
757 			goto close_fail;
758 
759 		file_start_write(cprm.file);
760 		core_dumped = binfmt->core_dump(&cprm);
761 		/*
762 		 * Ensures that file size is big enough to contain the current
763 		 * file postion. This prevents gdb from complaining about
764 		 * a truncated file if the last "write" to the file was
765 		 * dump_skip.
766 		 */
767 		if (cprm.to_skip) {
768 			cprm.to_skip--;
769 			dump_emit(&cprm, "", 1);
770 		}
771 		file_end_write(cprm.file);
772 		free_vma_snapshot(&cprm);
773 	}
774 	if (ispipe && core_pipe_limit)
775 		wait_for_dump_helpers(cprm.file);
776 close_fail:
777 	if (cprm.file)
778 		filp_close(cprm.file, NULL);
779 fail_dropcount:
780 	if (ispipe)
781 		atomic_dec(&core_dump_count);
782 fail_unlock:
783 	kfree(argv);
784 	kfree(cn.corename);
785 	coredump_finish(core_dumped);
786 	revert_creds(old_cred);
787 fail_creds:
788 	put_cred(cred);
789 fail:
790 	return;
791 }
792 
793 /*
794  * Core dumping helper functions.  These are the only things you should
795  * do on a core-file: use only these functions to write out all the
796  * necessary info.
797  */
798 static int __dump_emit(struct coredump_params *cprm, const void *addr, int nr)
799 {
800 	struct file *file = cprm->file;
801 	loff_t pos = file->f_pos;
802 	ssize_t n;
803 	if (cprm->written + nr > cprm->limit)
804 		return 0;
805 
806 
807 	if (dump_interrupted())
808 		return 0;
809 	n = __kernel_write(file, addr, nr, &pos);
810 	if (n != nr)
811 		return 0;
812 	file->f_pos = pos;
813 	cprm->written += n;
814 	cprm->pos += n;
815 
816 	return 1;
817 }
818 
819 static int __dump_skip(struct coredump_params *cprm, size_t nr)
820 {
821 	static char zeroes[PAGE_SIZE];
822 	struct file *file = cprm->file;
823 	if (file->f_mode & FMODE_LSEEK) {
824 		if (dump_interrupted() ||
825 		    vfs_llseek(file, nr, SEEK_CUR) < 0)
826 			return 0;
827 		cprm->pos += nr;
828 		return 1;
829 	} else {
830 		while (nr > PAGE_SIZE) {
831 			if (!__dump_emit(cprm, zeroes, PAGE_SIZE))
832 				return 0;
833 			nr -= PAGE_SIZE;
834 		}
835 		return __dump_emit(cprm, zeroes, nr);
836 	}
837 }
838 
839 static int dump_emit_page(struct coredump_params *cprm, struct page *page)
840 {
841 	struct bio_vec bvec = {
842 		.bv_page	= page,
843 		.bv_offset	= 0,
844 		.bv_len		= PAGE_SIZE,
845 	};
846 	struct iov_iter iter;
847 	struct file *file = cprm->file;
848 	loff_t pos;
849 	ssize_t n;
850 
851 	if (cprm->to_skip) {
852 		if (!__dump_skip(cprm, cprm->to_skip))
853 			return 0;
854 		cprm->to_skip = 0;
855 	}
856 	if (cprm->written + PAGE_SIZE > cprm->limit)
857 		return 0;
858 	if (dump_interrupted())
859 		return 0;
860 	pos = file->f_pos;
861 	iov_iter_bvec(&iter, WRITE, &bvec, 1, PAGE_SIZE);
862 	n = __kernel_write_iter(cprm->file, &iter, &pos);
863 	if (n != PAGE_SIZE)
864 		return 0;
865 	file->f_pos = pos;
866 	cprm->written += PAGE_SIZE;
867 	cprm->pos += PAGE_SIZE;
868 
869 	return 1;
870 }
871 
872 int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
873 {
874 	if (cprm->to_skip) {
875 		if (!__dump_skip(cprm, cprm->to_skip))
876 			return 0;
877 		cprm->to_skip = 0;
878 	}
879 	return __dump_emit(cprm, addr, nr);
880 }
881 EXPORT_SYMBOL(dump_emit);
882 
883 void dump_skip_to(struct coredump_params *cprm, unsigned long pos)
884 {
885 	cprm->to_skip = pos - cprm->pos;
886 }
887 EXPORT_SYMBOL(dump_skip_to);
888 
889 void dump_skip(struct coredump_params *cprm, size_t nr)
890 {
891 	cprm->to_skip += nr;
892 }
893 EXPORT_SYMBOL(dump_skip);
894 
895 #ifdef CONFIG_ELF_CORE
896 int dump_user_range(struct coredump_params *cprm, unsigned long start,
897 		    unsigned long len)
898 {
899 	unsigned long addr;
900 
901 	for (addr = start; addr < start + len; addr += PAGE_SIZE) {
902 		struct page *page;
903 
904 		/*
905 		 * To avoid having to allocate page tables for virtual address
906 		 * ranges that have never been used yet, and also to make it
907 		 * easy to generate sparse core files, use a helper that returns
908 		 * NULL when encountering an empty page table entry that would
909 		 * otherwise have been filled with the zero page.
910 		 */
911 		page = get_dump_page(addr);
912 		if (page) {
913 			int stop = !dump_emit_page(cprm, page);
914 			put_page(page);
915 			if (stop)
916 				return 0;
917 		} else {
918 			dump_skip(cprm, PAGE_SIZE);
919 		}
920 	}
921 	return 1;
922 }
923 #endif
924 
925 int dump_align(struct coredump_params *cprm, int align)
926 {
927 	unsigned mod = (cprm->pos + cprm->to_skip) & (align - 1);
928 	if (align & (align - 1))
929 		return 0;
930 	if (mod)
931 		cprm->to_skip += align - mod;
932 	return 1;
933 }
934 EXPORT_SYMBOL(dump_align);
935 
936 #ifdef CONFIG_SYSCTL
937 
938 void validate_coredump_safety(void)
939 {
940 	if (suid_dumpable == SUID_DUMP_ROOT &&
941 	    core_pattern[0] != '/' && core_pattern[0] != '|') {
942 		pr_warn(
943 "Unsafe core_pattern used with fs.suid_dumpable=2.\n"
944 "Pipe handler or fully qualified core dump path required.\n"
945 "Set kernel.core_pattern before fs.suid_dumpable.\n"
946 		);
947 	}
948 }
949 
950 static int proc_dostring_coredump(struct ctl_table *table, int write,
951 		  void *buffer, size_t *lenp, loff_t *ppos)
952 {
953 	int error = proc_dostring(table, write, buffer, lenp, ppos);
954 
955 	if (!error)
956 		validate_coredump_safety();
957 	return error;
958 }
959 
960 static struct ctl_table coredump_sysctls[] = {
961 	{
962 		.procname	= "core_uses_pid",
963 		.data		= &core_uses_pid,
964 		.maxlen		= sizeof(int),
965 		.mode		= 0644,
966 		.proc_handler	= proc_dointvec,
967 	},
968 	{
969 		.procname	= "core_pattern",
970 		.data		= core_pattern,
971 		.maxlen		= CORENAME_MAX_SIZE,
972 		.mode		= 0644,
973 		.proc_handler	= proc_dostring_coredump,
974 	},
975 	{
976 		.procname	= "core_pipe_limit",
977 		.data		= &core_pipe_limit,
978 		.maxlen		= sizeof(unsigned int),
979 		.mode		= 0644,
980 		.proc_handler	= proc_dointvec,
981 	},
982 	{ }
983 };
984 
985 static int __init init_fs_coredump_sysctls(void)
986 {
987 	register_sysctl_init("kernel", coredump_sysctls);
988 	return 0;
989 }
990 fs_initcall(init_fs_coredump_sysctls);
991 #endif /* CONFIG_SYSCTL */
992 
993 /*
994  * The purpose of always_dump_vma() is to make sure that special kernel mappings
995  * that are useful for post-mortem analysis are included in every core dump.
996  * In that way we ensure that the core dump is fully interpretable later
997  * without matching up the same kernel and hardware config to see what PC values
998  * meant. These special mappings include - vDSO, vsyscall, and other
999  * architecture specific mappings
1000  */
1001 static bool always_dump_vma(struct vm_area_struct *vma)
1002 {
1003 	/* Any vsyscall mappings? */
1004 	if (vma == get_gate_vma(vma->vm_mm))
1005 		return true;
1006 
1007 	/*
1008 	 * Assume that all vmas with a .name op should always be dumped.
1009 	 * If this changes, a new vm_ops field can easily be added.
1010 	 */
1011 	if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
1012 		return true;
1013 
1014 	/*
1015 	 * arch_vma_name() returns non-NULL for special architecture mappings,
1016 	 * such as vDSO sections.
1017 	 */
1018 	if (arch_vma_name(vma))
1019 		return true;
1020 
1021 	return false;
1022 }
1023 
1024 #define DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER 1
1025 
1026 /*
1027  * Decide how much of @vma's contents should be included in a core dump.
1028  */
1029 static unsigned long vma_dump_size(struct vm_area_struct *vma,
1030 				   unsigned long mm_flags)
1031 {
1032 #define FILTER(type)	(mm_flags & (1UL << MMF_DUMP_##type))
1033 
1034 	/* always dump the vdso and vsyscall sections */
1035 	if (always_dump_vma(vma))
1036 		goto whole;
1037 
1038 	if (vma->vm_flags & VM_DONTDUMP)
1039 		return 0;
1040 
1041 	/* support for DAX */
1042 	if (vma_is_dax(vma)) {
1043 		if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
1044 			goto whole;
1045 		if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
1046 			goto whole;
1047 		return 0;
1048 	}
1049 
1050 	/* Hugetlb memory check */
1051 	if (is_vm_hugetlb_page(vma)) {
1052 		if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1053 			goto whole;
1054 		if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1055 			goto whole;
1056 		return 0;
1057 	}
1058 
1059 	/* Do not dump I/O mapped devices or special mappings */
1060 	if (vma->vm_flags & VM_IO)
1061 		return 0;
1062 
1063 	/* By default, dump shared memory if mapped from an anonymous file. */
1064 	if (vma->vm_flags & VM_SHARED) {
1065 		if (file_inode(vma->vm_file)->i_nlink == 0 ?
1066 		    FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1067 			goto whole;
1068 		return 0;
1069 	}
1070 
1071 	/* Dump segments that have been written to.  */
1072 	if ((!IS_ENABLED(CONFIG_MMU) || vma->anon_vma) && FILTER(ANON_PRIVATE))
1073 		goto whole;
1074 	if (vma->vm_file == NULL)
1075 		return 0;
1076 
1077 	if (FILTER(MAPPED_PRIVATE))
1078 		goto whole;
1079 
1080 	/*
1081 	 * If this is the beginning of an executable file mapping,
1082 	 * dump the first page to aid in determining what was mapped here.
1083 	 */
1084 	if (FILTER(ELF_HEADERS) &&
1085 	    vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
1086 		if ((READ_ONCE(file_inode(vma->vm_file)->i_mode) & 0111) != 0)
1087 			return PAGE_SIZE;
1088 
1089 		/*
1090 		 * ELF libraries aren't always executable.
1091 		 * We'll want to check whether the mapping starts with the ELF
1092 		 * magic, but not now - we're holding the mmap lock,
1093 		 * so copy_from_user() doesn't work here.
1094 		 * Use a placeholder instead, and fix it up later in
1095 		 * dump_vma_snapshot().
1096 		 */
1097 		return DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER;
1098 	}
1099 
1100 #undef	FILTER
1101 
1102 	return 0;
1103 
1104 whole:
1105 	return vma->vm_end - vma->vm_start;
1106 }
1107 
1108 /*
1109  * Helper function for iterating across a vma list.  It ensures that the caller
1110  * will visit `gate_vma' prior to terminating the search.
1111  */
1112 static struct vm_area_struct *coredump_next_vma(struct ma_state *mas,
1113 				       struct vm_area_struct *vma,
1114 				       struct vm_area_struct *gate_vma)
1115 {
1116 	if (gate_vma && (vma == gate_vma))
1117 		return NULL;
1118 
1119 	vma = mas_next(mas, ULONG_MAX);
1120 	if (vma)
1121 		return vma;
1122 	return gate_vma;
1123 }
1124 
1125 static void free_vma_snapshot(struct coredump_params *cprm)
1126 {
1127 	if (cprm->vma_meta) {
1128 		int i;
1129 		for (i = 0; i < cprm->vma_count; i++) {
1130 			struct file *file = cprm->vma_meta[i].file;
1131 			if (file)
1132 				fput(file);
1133 		}
1134 		kvfree(cprm->vma_meta);
1135 		cprm->vma_meta = NULL;
1136 	}
1137 }
1138 
1139 /*
1140  * Under the mmap_lock, take a snapshot of relevant information about the task's
1141  * VMAs.
1142  */
1143 static bool dump_vma_snapshot(struct coredump_params *cprm)
1144 {
1145 	struct vm_area_struct *gate_vma, *vma = NULL;
1146 	struct mm_struct *mm = current->mm;
1147 	MA_STATE(mas, &mm->mm_mt, 0, 0);
1148 	int i = 0;
1149 
1150 	/*
1151 	 * Once the stack expansion code is fixed to not change VMA bounds
1152 	 * under mmap_lock in read mode, this can be changed to take the
1153 	 * mmap_lock in read mode.
1154 	 */
1155 	if (mmap_write_lock_killable(mm))
1156 		return false;
1157 
1158 	cprm->vma_data_size = 0;
1159 	gate_vma = get_gate_vma(mm);
1160 	cprm->vma_count = mm->map_count + (gate_vma ? 1 : 0);
1161 
1162 	cprm->vma_meta = kvmalloc_array(cprm->vma_count, sizeof(*cprm->vma_meta), GFP_KERNEL);
1163 	if (!cprm->vma_meta) {
1164 		mmap_write_unlock(mm);
1165 		return false;
1166 	}
1167 
1168 	while ((vma = coredump_next_vma(&mas, vma, gate_vma)) != NULL) {
1169 		struct core_vma_metadata *m = cprm->vma_meta + i;
1170 
1171 		m->start = vma->vm_start;
1172 		m->end = vma->vm_end;
1173 		m->flags = vma->vm_flags;
1174 		m->dump_size = vma_dump_size(vma, cprm->mm_flags);
1175 		m->pgoff = vma->vm_pgoff;
1176 		m->file = vma->vm_file;
1177 		if (m->file)
1178 			get_file(m->file);
1179 		i++;
1180 	}
1181 
1182 	mmap_write_unlock(mm);
1183 
1184 	for (i = 0; i < cprm->vma_count; i++) {
1185 		struct core_vma_metadata *m = cprm->vma_meta + i;
1186 
1187 		if (m->dump_size == DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER) {
1188 			char elfmag[SELFMAG];
1189 
1190 			if (copy_from_user(elfmag, (void __user *)m->start, SELFMAG) ||
1191 					memcmp(elfmag, ELFMAG, SELFMAG) != 0) {
1192 				m->dump_size = 0;
1193 			} else {
1194 				m->dump_size = PAGE_SIZE;
1195 			}
1196 		}
1197 
1198 		cprm->vma_data_size += m->dump_size;
1199 	}
1200 
1201 	return true;
1202 }
1203