xref: /openbmc/linux/fs/coredump.c (revision 9a938eba)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/slab.h>
3 #include <linux/file.h>
4 #include <linux/fdtable.h>
5 #include <linux/freezer.h>
6 #include <linux/mm.h>
7 #include <linux/stat.h>
8 #include <linux/fcntl.h>
9 #include <linux/swap.h>
10 #include <linux/ctype.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/pagemap.h>
14 #include <linux/perf_event.h>
15 #include <linux/highmem.h>
16 #include <linux/spinlock.h>
17 #include <linux/key.h>
18 #include <linux/personality.h>
19 #include <linux/binfmts.h>
20 #include <linux/coredump.h>
21 #include <linux/sched/coredump.h>
22 #include <linux/sched/signal.h>
23 #include <linux/sched/task_stack.h>
24 #include <linux/utsname.h>
25 #include <linux/pid_namespace.h>
26 #include <linux/module.h>
27 #include <linux/namei.h>
28 #include <linux/mount.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/tsacct_kern.h>
32 #include <linux/cn_proc.h>
33 #include <linux/audit.h>
34 #include <linux/kmod.h>
35 #include <linux/fsnotify.h>
36 #include <linux/fs_struct.h>
37 #include <linux/pipe_fs_i.h>
38 #include <linux/oom.h>
39 #include <linux/compat.h>
40 #include <linux/fs.h>
41 #include <linux/path.h>
42 #include <linux/timekeeping.h>
43 #include <linux/sysctl.h>
44 #include <linux/elf.h>
45 
46 #include <linux/uaccess.h>
47 #include <asm/mmu_context.h>
48 #include <asm/tlb.h>
49 #include <asm/exec.h>
50 
51 #include <trace/events/task.h>
52 #include "internal.h"
53 
54 #include <trace/events/sched.h>
55 
56 static bool dump_vma_snapshot(struct coredump_params *cprm);
57 static void free_vma_snapshot(struct coredump_params *cprm);
58 
59 static int core_uses_pid;
60 static unsigned int core_pipe_limit;
61 static char core_pattern[CORENAME_MAX_SIZE] = "core";
62 static int core_name_size = CORENAME_MAX_SIZE;
63 
64 struct core_name {
65 	char *corename;
66 	int used, size;
67 };
68 
69 static int expand_corename(struct core_name *cn, int size)
70 {
71 	char *corename = krealloc(cn->corename, size, GFP_KERNEL);
72 
73 	if (!corename)
74 		return -ENOMEM;
75 
76 	if (size > core_name_size) /* racy but harmless */
77 		core_name_size = size;
78 
79 	cn->size = ksize(corename);
80 	cn->corename = corename;
81 	return 0;
82 }
83 
84 static __printf(2, 0) int cn_vprintf(struct core_name *cn, const char *fmt,
85 				     va_list arg)
86 {
87 	int free, need;
88 	va_list arg_copy;
89 
90 again:
91 	free = cn->size - cn->used;
92 
93 	va_copy(arg_copy, arg);
94 	need = vsnprintf(cn->corename + cn->used, free, fmt, arg_copy);
95 	va_end(arg_copy);
96 
97 	if (need < free) {
98 		cn->used += need;
99 		return 0;
100 	}
101 
102 	if (!expand_corename(cn, cn->size + need - free + 1))
103 		goto again;
104 
105 	return -ENOMEM;
106 }
107 
108 static __printf(2, 3) int cn_printf(struct core_name *cn, const char *fmt, ...)
109 {
110 	va_list arg;
111 	int ret;
112 
113 	va_start(arg, fmt);
114 	ret = cn_vprintf(cn, fmt, arg);
115 	va_end(arg);
116 
117 	return ret;
118 }
119 
120 static __printf(2, 3)
121 int cn_esc_printf(struct core_name *cn, const char *fmt, ...)
122 {
123 	int cur = cn->used;
124 	va_list arg;
125 	int ret;
126 
127 	va_start(arg, fmt);
128 	ret = cn_vprintf(cn, fmt, arg);
129 	va_end(arg);
130 
131 	if (ret == 0) {
132 		/*
133 		 * Ensure that this coredump name component can't cause the
134 		 * resulting corefile path to consist of a ".." or ".".
135 		 */
136 		if ((cn->used - cur == 1 && cn->corename[cur] == '.') ||
137 				(cn->used - cur == 2 && cn->corename[cur] == '.'
138 				&& cn->corename[cur+1] == '.'))
139 			cn->corename[cur] = '!';
140 
141 		/*
142 		 * Empty names are fishy and could be used to create a "//" in a
143 		 * corefile name, causing the coredump to happen one directory
144 		 * level too high. Enforce that all components of the core
145 		 * pattern are at least one character long.
146 		 */
147 		if (cn->used == cur)
148 			ret = cn_printf(cn, "!");
149 	}
150 
151 	for (; cur < cn->used; ++cur) {
152 		if (cn->corename[cur] == '/')
153 			cn->corename[cur] = '!';
154 	}
155 	return ret;
156 }
157 
158 static int cn_print_exe_file(struct core_name *cn, bool name_only)
159 {
160 	struct file *exe_file;
161 	char *pathbuf, *path, *ptr;
162 	int ret;
163 
164 	exe_file = get_mm_exe_file(current->mm);
165 	if (!exe_file)
166 		return cn_esc_printf(cn, "%s (path unknown)", current->comm);
167 
168 	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
169 	if (!pathbuf) {
170 		ret = -ENOMEM;
171 		goto put_exe_file;
172 	}
173 
174 	path = file_path(exe_file, pathbuf, PATH_MAX);
175 	if (IS_ERR(path)) {
176 		ret = PTR_ERR(path);
177 		goto free_buf;
178 	}
179 
180 	if (name_only) {
181 		ptr = strrchr(path, '/');
182 		if (ptr)
183 			path = ptr + 1;
184 	}
185 	ret = cn_esc_printf(cn, "%s", path);
186 
187 free_buf:
188 	kfree(pathbuf);
189 put_exe_file:
190 	fput(exe_file);
191 	return ret;
192 }
193 
194 /* format_corename will inspect the pattern parameter, and output a
195  * name into corename, which must have space for at least
196  * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
197  */
198 static int format_corename(struct core_name *cn, struct coredump_params *cprm,
199 			   size_t **argv, int *argc)
200 {
201 	const struct cred *cred = current_cred();
202 	const char *pat_ptr = core_pattern;
203 	int ispipe = (*pat_ptr == '|');
204 	bool was_space = false;
205 	int pid_in_pattern = 0;
206 	int err = 0;
207 
208 	cn->used = 0;
209 	cn->corename = NULL;
210 	if (expand_corename(cn, core_name_size))
211 		return -ENOMEM;
212 	cn->corename[0] = '\0';
213 
214 	if (ispipe) {
215 		int argvs = sizeof(core_pattern) / 2;
216 		(*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL);
217 		if (!(*argv))
218 			return -ENOMEM;
219 		(*argv)[(*argc)++] = 0;
220 		++pat_ptr;
221 		if (!(*pat_ptr))
222 			return -ENOMEM;
223 	}
224 
225 	/* Repeat as long as we have more pattern to process and more output
226 	   space */
227 	while (*pat_ptr) {
228 		/*
229 		 * Split on spaces before doing template expansion so that
230 		 * %e and %E don't get split if they have spaces in them
231 		 */
232 		if (ispipe) {
233 			if (isspace(*pat_ptr)) {
234 				if (cn->used != 0)
235 					was_space = true;
236 				pat_ptr++;
237 				continue;
238 			} else if (was_space) {
239 				was_space = false;
240 				err = cn_printf(cn, "%c", '\0');
241 				if (err)
242 					return err;
243 				(*argv)[(*argc)++] = cn->used;
244 			}
245 		}
246 		if (*pat_ptr != '%') {
247 			err = cn_printf(cn, "%c", *pat_ptr++);
248 		} else {
249 			switch (*++pat_ptr) {
250 			/* single % at the end, drop that */
251 			case 0:
252 				goto out;
253 			/* Double percent, output one percent */
254 			case '%':
255 				err = cn_printf(cn, "%c", '%');
256 				break;
257 			/* pid */
258 			case 'p':
259 				pid_in_pattern = 1;
260 				err = cn_printf(cn, "%d",
261 					      task_tgid_vnr(current));
262 				break;
263 			/* global pid */
264 			case 'P':
265 				err = cn_printf(cn, "%d",
266 					      task_tgid_nr(current));
267 				break;
268 			case 'i':
269 				err = cn_printf(cn, "%d",
270 					      task_pid_vnr(current));
271 				break;
272 			case 'I':
273 				err = cn_printf(cn, "%d",
274 					      task_pid_nr(current));
275 				break;
276 			/* uid */
277 			case 'u':
278 				err = cn_printf(cn, "%u",
279 						from_kuid(&init_user_ns,
280 							  cred->uid));
281 				break;
282 			/* gid */
283 			case 'g':
284 				err = cn_printf(cn, "%u",
285 						from_kgid(&init_user_ns,
286 							  cred->gid));
287 				break;
288 			case 'd':
289 				err = cn_printf(cn, "%d",
290 					__get_dumpable(cprm->mm_flags));
291 				break;
292 			/* signal that caused the coredump */
293 			case 's':
294 				err = cn_printf(cn, "%d",
295 						cprm->siginfo->si_signo);
296 				break;
297 			/* UNIX time of coredump */
298 			case 't': {
299 				time64_t time;
300 
301 				time = ktime_get_real_seconds();
302 				err = cn_printf(cn, "%lld", time);
303 				break;
304 			}
305 			/* hostname */
306 			case 'h':
307 				down_read(&uts_sem);
308 				err = cn_esc_printf(cn, "%s",
309 					      utsname()->nodename);
310 				up_read(&uts_sem);
311 				break;
312 			/* executable, could be changed by prctl PR_SET_NAME etc */
313 			case 'e':
314 				err = cn_esc_printf(cn, "%s", current->comm);
315 				break;
316 			/* file name of executable */
317 			case 'f':
318 				err = cn_print_exe_file(cn, true);
319 				break;
320 			case 'E':
321 				err = cn_print_exe_file(cn, false);
322 				break;
323 			/* core limit size */
324 			case 'c':
325 				err = cn_printf(cn, "%lu",
326 					      rlimit(RLIMIT_CORE));
327 				break;
328 			default:
329 				break;
330 			}
331 			++pat_ptr;
332 		}
333 
334 		if (err)
335 			return err;
336 	}
337 
338 out:
339 	/* Backward compatibility with core_uses_pid:
340 	 *
341 	 * If core_pattern does not include a %p (as is the default)
342 	 * and core_uses_pid is set, then .%pid will be appended to
343 	 * the filename. Do not do this for piped commands. */
344 	if (!ispipe && !pid_in_pattern && core_uses_pid) {
345 		err = cn_printf(cn, ".%d", task_tgid_vnr(current));
346 		if (err)
347 			return err;
348 	}
349 	return ispipe;
350 }
351 
352 static int zap_process(struct task_struct *start, int exit_code)
353 {
354 	struct task_struct *t;
355 	int nr = 0;
356 
357 	/* Allow SIGKILL, see prepare_signal() */
358 	start->signal->flags = SIGNAL_GROUP_EXIT;
359 	start->signal->group_exit_code = exit_code;
360 	start->signal->group_stop_count = 0;
361 
362 	for_each_thread(start, t) {
363 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
364 		if (t != current && !(t->flags & PF_POSTCOREDUMP)) {
365 			sigaddset(&t->pending.signal, SIGKILL);
366 			signal_wake_up(t, 1);
367 			nr++;
368 		}
369 	}
370 
371 	return nr;
372 }
373 
374 static int zap_threads(struct task_struct *tsk,
375 			struct core_state *core_state, int exit_code)
376 {
377 	struct signal_struct *signal = tsk->signal;
378 	int nr = -EAGAIN;
379 
380 	spin_lock_irq(&tsk->sighand->siglock);
381 	if (!(signal->flags & SIGNAL_GROUP_EXIT) && !signal->group_exec_task) {
382 		signal->core_state = core_state;
383 		nr = zap_process(tsk, exit_code);
384 		clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
385 		tsk->flags |= PF_DUMPCORE;
386 		atomic_set(&core_state->nr_threads, nr);
387 	}
388 	spin_unlock_irq(&tsk->sighand->siglock);
389 	return nr;
390 }
391 
392 static int coredump_wait(int exit_code, struct core_state *core_state)
393 {
394 	struct task_struct *tsk = current;
395 	int core_waiters = -EBUSY;
396 
397 	init_completion(&core_state->startup);
398 	core_state->dumper.task = tsk;
399 	core_state->dumper.next = NULL;
400 
401 	core_waiters = zap_threads(tsk, core_state, exit_code);
402 	if (core_waiters > 0) {
403 		struct core_thread *ptr;
404 
405 		wait_for_completion_state(&core_state->startup,
406 					  TASK_UNINTERRUPTIBLE|TASK_FREEZABLE);
407 		/*
408 		 * Wait for all the threads to become inactive, so that
409 		 * all the thread context (extended register state, like
410 		 * fpu etc) gets copied to the memory.
411 		 */
412 		ptr = core_state->dumper.next;
413 		while (ptr != NULL) {
414 			wait_task_inactive(ptr->task, TASK_ANY);
415 			ptr = ptr->next;
416 		}
417 	}
418 
419 	return core_waiters;
420 }
421 
422 static void coredump_finish(bool core_dumped)
423 {
424 	struct core_thread *curr, *next;
425 	struct task_struct *task;
426 
427 	spin_lock_irq(&current->sighand->siglock);
428 	if (core_dumped && !__fatal_signal_pending(current))
429 		current->signal->group_exit_code |= 0x80;
430 	next = current->signal->core_state->dumper.next;
431 	current->signal->core_state = NULL;
432 	spin_unlock_irq(&current->sighand->siglock);
433 
434 	while ((curr = next) != NULL) {
435 		next = curr->next;
436 		task = curr->task;
437 		/*
438 		 * see coredump_task_exit(), curr->task must not see
439 		 * ->task == NULL before we read ->next.
440 		 */
441 		smp_mb();
442 		curr->task = NULL;
443 		wake_up_process(task);
444 	}
445 }
446 
447 static bool dump_interrupted(void)
448 {
449 	/*
450 	 * SIGKILL or freezing() interrupt the coredumping. Perhaps we
451 	 * can do try_to_freeze() and check __fatal_signal_pending(),
452 	 * but then we need to teach dump_write() to restart and clear
453 	 * TIF_SIGPENDING.
454 	 */
455 	return fatal_signal_pending(current) || freezing(current);
456 }
457 
458 static void wait_for_dump_helpers(struct file *file)
459 {
460 	struct pipe_inode_info *pipe = file->private_data;
461 
462 	pipe_lock(pipe);
463 	pipe->readers++;
464 	pipe->writers--;
465 	wake_up_interruptible_sync(&pipe->rd_wait);
466 	kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
467 	pipe_unlock(pipe);
468 
469 	/*
470 	 * We actually want wait_event_freezable() but then we need
471 	 * to clear TIF_SIGPENDING and improve dump_interrupted().
472 	 */
473 	wait_event_interruptible(pipe->rd_wait, pipe->readers == 1);
474 
475 	pipe_lock(pipe);
476 	pipe->readers--;
477 	pipe->writers++;
478 	pipe_unlock(pipe);
479 }
480 
481 /*
482  * umh_pipe_setup
483  * helper function to customize the process used
484  * to collect the core in userspace.  Specifically
485  * it sets up a pipe and installs it as fd 0 (stdin)
486  * for the process.  Returns 0 on success, or
487  * PTR_ERR on failure.
488  * Note that it also sets the core limit to 1.  This
489  * is a special value that we use to trap recursive
490  * core dumps
491  */
492 static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
493 {
494 	struct file *files[2];
495 	struct coredump_params *cp = (struct coredump_params *)info->data;
496 	int err = create_pipe_files(files, 0);
497 	if (err)
498 		return err;
499 
500 	cp->file = files[1];
501 
502 	err = replace_fd(0, files[0], 0);
503 	fput(files[0]);
504 	/* and disallow core files too */
505 	current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
506 
507 	return err;
508 }
509 
510 void do_coredump(const kernel_siginfo_t *siginfo)
511 {
512 	struct core_state core_state;
513 	struct core_name cn;
514 	struct mm_struct *mm = current->mm;
515 	struct linux_binfmt * binfmt;
516 	const struct cred *old_cred;
517 	struct cred *cred;
518 	int retval = 0;
519 	int ispipe;
520 	size_t *argv = NULL;
521 	int argc = 0;
522 	/* require nonrelative corefile path and be extra careful */
523 	bool need_suid_safe = false;
524 	bool core_dumped = false;
525 	static atomic_t core_dump_count = ATOMIC_INIT(0);
526 	struct coredump_params cprm = {
527 		.siginfo = siginfo,
528 		.limit = rlimit(RLIMIT_CORE),
529 		/*
530 		 * We must use the same mm->flags while dumping core to avoid
531 		 * inconsistency of bit flags, since this flag is not protected
532 		 * by any locks.
533 		 */
534 		.mm_flags = mm->flags,
535 		.vma_meta = NULL,
536 	};
537 
538 	audit_core_dumps(siginfo->si_signo);
539 
540 	binfmt = mm->binfmt;
541 	if (!binfmt || !binfmt->core_dump)
542 		goto fail;
543 	if (!__get_dumpable(cprm.mm_flags))
544 		goto fail;
545 
546 	cred = prepare_creds();
547 	if (!cred)
548 		goto fail;
549 	/*
550 	 * We cannot trust fsuid as being the "true" uid of the process
551 	 * nor do we know its entire history. We only know it was tainted
552 	 * so we dump it as root in mode 2, and only into a controlled
553 	 * environment (pipe handler or fully qualified path).
554 	 */
555 	if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
556 		/* Setuid core dump mode */
557 		cred->fsuid = GLOBAL_ROOT_UID;	/* Dump root private */
558 		need_suid_safe = true;
559 	}
560 
561 	retval = coredump_wait(siginfo->si_signo, &core_state);
562 	if (retval < 0)
563 		goto fail_creds;
564 
565 	old_cred = override_creds(cred);
566 
567 	ispipe = format_corename(&cn, &cprm, &argv, &argc);
568 
569 	if (ispipe) {
570 		int argi;
571 		int dump_count;
572 		char **helper_argv;
573 		struct subprocess_info *sub_info;
574 
575 		if (ispipe < 0) {
576 			printk(KERN_WARNING "format_corename failed\n");
577 			printk(KERN_WARNING "Aborting core\n");
578 			goto fail_unlock;
579 		}
580 
581 		if (cprm.limit == 1) {
582 			/* See umh_pipe_setup() which sets RLIMIT_CORE = 1.
583 			 *
584 			 * Normally core limits are irrelevant to pipes, since
585 			 * we're not writing to the file system, but we use
586 			 * cprm.limit of 1 here as a special value, this is a
587 			 * consistent way to catch recursive crashes.
588 			 * We can still crash if the core_pattern binary sets
589 			 * RLIM_CORE = !1, but it runs as root, and can do
590 			 * lots of stupid things.
591 			 *
592 			 * Note that we use task_tgid_vnr here to grab the pid
593 			 * of the process group leader.  That way we get the
594 			 * right pid if a thread in a multi-threaded
595 			 * core_pattern process dies.
596 			 */
597 			printk(KERN_WARNING
598 				"Process %d(%s) has RLIMIT_CORE set to 1\n",
599 				task_tgid_vnr(current), current->comm);
600 			printk(KERN_WARNING "Aborting core\n");
601 			goto fail_unlock;
602 		}
603 		cprm.limit = RLIM_INFINITY;
604 
605 		dump_count = atomic_inc_return(&core_dump_count);
606 		if (core_pipe_limit && (core_pipe_limit < dump_count)) {
607 			printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
608 			       task_tgid_vnr(current), current->comm);
609 			printk(KERN_WARNING "Skipping core dump\n");
610 			goto fail_dropcount;
611 		}
612 
613 		helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv),
614 					    GFP_KERNEL);
615 		if (!helper_argv) {
616 			printk(KERN_WARNING "%s failed to allocate memory\n",
617 			       __func__);
618 			goto fail_dropcount;
619 		}
620 		for (argi = 0; argi < argc; argi++)
621 			helper_argv[argi] = cn.corename + argv[argi];
622 		helper_argv[argi] = NULL;
623 
624 		retval = -ENOMEM;
625 		sub_info = call_usermodehelper_setup(helper_argv[0],
626 						helper_argv, NULL, GFP_KERNEL,
627 						umh_pipe_setup, NULL, &cprm);
628 		if (sub_info)
629 			retval = call_usermodehelper_exec(sub_info,
630 							  UMH_WAIT_EXEC);
631 
632 		kfree(helper_argv);
633 		if (retval) {
634 			printk(KERN_INFO "Core dump to |%s pipe failed\n",
635 			       cn.corename);
636 			goto close_fail;
637 		}
638 	} else {
639 		struct user_namespace *mnt_userns;
640 		struct inode *inode;
641 		int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
642 				 O_LARGEFILE | O_EXCL;
643 
644 		if (cprm.limit < binfmt->min_coredump)
645 			goto fail_unlock;
646 
647 		if (need_suid_safe && cn.corename[0] != '/') {
648 			printk(KERN_WARNING "Pid %d(%s) can only dump core "\
649 				"to fully qualified path!\n",
650 				task_tgid_vnr(current), current->comm);
651 			printk(KERN_WARNING "Skipping core dump\n");
652 			goto fail_unlock;
653 		}
654 
655 		/*
656 		 * Unlink the file if it exists unless this is a SUID
657 		 * binary - in that case, we're running around with root
658 		 * privs and don't want to unlink another user's coredump.
659 		 */
660 		if (!need_suid_safe) {
661 			/*
662 			 * If it doesn't exist, that's fine. If there's some
663 			 * other problem, we'll catch it at the filp_open().
664 			 */
665 			do_unlinkat(AT_FDCWD, getname_kernel(cn.corename));
666 		}
667 
668 		/*
669 		 * There is a race between unlinking and creating the
670 		 * file, but if that causes an EEXIST here, that's
671 		 * fine - another process raced with us while creating
672 		 * the corefile, and the other process won. To userspace,
673 		 * what matters is that at least one of the two processes
674 		 * writes its coredump successfully, not which one.
675 		 */
676 		if (need_suid_safe) {
677 			/*
678 			 * Using user namespaces, normal user tasks can change
679 			 * their current->fs->root to point to arbitrary
680 			 * directories. Since the intention of the "only dump
681 			 * with a fully qualified path" rule is to control where
682 			 * coredumps may be placed using root privileges,
683 			 * current->fs->root must not be used. Instead, use the
684 			 * root directory of init_task.
685 			 */
686 			struct path root;
687 
688 			task_lock(&init_task);
689 			get_fs_root(init_task.fs, &root);
690 			task_unlock(&init_task);
691 			cprm.file = file_open_root(&root, cn.corename,
692 						   open_flags, 0600);
693 			path_put(&root);
694 		} else {
695 			cprm.file = filp_open(cn.corename, open_flags, 0600);
696 		}
697 		if (IS_ERR(cprm.file))
698 			goto fail_unlock;
699 
700 		inode = file_inode(cprm.file);
701 		if (inode->i_nlink > 1)
702 			goto close_fail;
703 		if (d_unhashed(cprm.file->f_path.dentry))
704 			goto close_fail;
705 		/*
706 		 * AK: actually i see no reason to not allow this for named
707 		 * pipes etc, but keep the previous behaviour for now.
708 		 */
709 		if (!S_ISREG(inode->i_mode))
710 			goto close_fail;
711 		/*
712 		 * Don't dump core if the filesystem changed owner or mode
713 		 * of the file during file creation. This is an issue when
714 		 * a process dumps core while its cwd is e.g. on a vfat
715 		 * filesystem.
716 		 */
717 		mnt_userns = file_mnt_user_ns(cprm.file);
718 		if (!uid_eq(i_uid_into_mnt(mnt_userns, inode),
719 			    current_fsuid())) {
720 			pr_info_ratelimited("Core dump to %s aborted: cannot preserve file owner\n",
721 					    cn.corename);
722 			goto close_fail;
723 		}
724 		if ((inode->i_mode & 0677) != 0600) {
725 			pr_info_ratelimited("Core dump to %s aborted: cannot preserve file permissions\n",
726 					    cn.corename);
727 			goto close_fail;
728 		}
729 		if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
730 			goto close_fail;
731 		if (do_truncate(mnt_userns, cprm.file->f_path.dentry,
732 				0, 0, cprm.file))
733 			goto close_fail;
734 	}
735 
736 	/* get us an unshared descriptor table; almost always a no-op */
737 	/* The cell spufs coredump code reads the file descriptor tables */
738 	retval = unshare_files();
739 	if (retval)
740 		goto close_fail;
741 	if (!dump_interrupted()) {
742 		/*
743 		 * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would
744 		 * have this set to NULL.
745 		 */
746 		if (!cprm.file) {
747 			pr_info("Core dump to |%s disabled\n", cn.corename);
748 			goto close_fail;
749 		}
750 		if (!dump_vma_snapshot(&cprm))
751 			goto close_fail;
752 
753 		file_start_write(cprm.file);
754 		core_dumped = binfmt->core_dump(&cprm);
755 		/*
756 		 * Ensures that file size is big enough to contain the current
757 		 * file postion. This prevents gdb from complaining about
758 		 * a truncated file if the last "write" to the file was
759 		 * dump_skip.
760 		 */
761 		if (cprm.to_skip) {
762 			cprm.to_skip--;
763 			dump_emit(&cprm, "", 1);
764 		}
765 		file_end_write(cprm.file);
766 		free_vma_snapshot(&cprm);
767 	}
768 	if (ispipe && core_pipe_limit)
769 		wait_for_dump_helpers(cprm.file);
770 close_fail:
771 	if (cprm.file)
772 		filp_close(cprm.file, NULL);
773 fail_dropcount:
774 	if (ispipe)
775 		atomic_dec(&core_dump_count);
776 fail_unlock:
777 	kfree(argv);
778 	kfree(cn.corename);
779 	coredump_finish(core_dumped);
780 	revert_creds(old_cred);
781 fail_creds:
782 	put_cred(cred);
783 fail:
784 	return;
785 }
786 
787 /*
788  * Core dumping helper functions.  These are the only things you should
789  * do on a core-file: use only these functions to write out all the
790  * necessary info.
791  */
792 static int __dump_emit(struct coredump_params *cprm, const void *addr, int nr)
793 {
794 	struct file *file = cprm->file;
795 	loff_t pos = file->f_pos;
796 	ssize_t n;
797 	if (cprm->written + nr > cprm->limit)
798 		return 0;
799 
800 
801 	if (dump_interrupted())
802 		return 0;
803 	n = __kernel_write(file, addr, nr, &pos);
804 	if (n != nr)
805 		return 0;
806 	file->f_pos = pos;
807 	cprm->written += n;
808 	cprm->pos += n;
809 
810 	return 1;
811 }
812 
813 static int __dump_skip(struct coredump_params *cprm, size_t nr)
814 {
815 	static char zeroes[PAGE_SIZE];
816 	struct file *file = cprm->file;
817 	if (file->f_mode & FMODE_LSEEK) {
818 		if (dump_interrupted() ||
819 		    vfs_llseek(file, nr, SEEK_CUR) < 0)
820 			return 0;
821 		cprm->pos += nr;
822 		return 1;
823 	} else {
824 		while (nr > PAGE_SIZE) {
825 			if (!__dump_emit(cprm, zeroes, PAGE_SIZE))
826 				return 0;
827 			nr -= PAGE_SIZE;
828 		}
829 		return __dump_emit(cprm, zeroes, nr);
830 	}
831 }
832 
833 static int dump_emit_page(struct coredump_params *cprm, struct page *page)
834 {
835 	struct bio_vec bvec = {
836 		.bv_page	= page,
837 		.bv_offset	= 0,
838 		.bv_len		= PAGE_SIZE,
839 	};
840 	struct iov_iter iter;
841 	struct file *file = cprm->file;
842 	loff_t pos;
843 	ssize_t n;
844 
845 	if (cprm->to_skip) {
846 		if (!__dump_skip(cprm, cprm->to_skip))
847 			return 0;
848 		cprm->to_skip = 0;
849 	}
850 	if (cprm->written + PAGE_SIZE > cprm->limit)
851 		return 0;
852 	if (dump_interrupted())
853 		return 0;
854 	pos = file->f_pos;
855 	iov_iter_bvec(&iter, WRITE, &bvec, 1, PAGE_SIZE);
856 	n = __kernel_write_iter(cprm->file, &iter, &pos);
857 	if (n != PAGE_SIZE)
858 		return 0;
859 	file->f_pos = pos;
860 	cprm->written += PAGE_SIZE;
861 	cprm->pos += PAGE_SIZE;
862 
863 	return 1;
864 }
865 
866 int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
867 {
868 	if (cprm->to_skip) {
869 		if (!__dump_skip(cprm, cprm->to_skip))
870 			return 0;
871 		cprm->to_skip = 0;
872 	}
873 	return __dump_emit(cprm, addr, nr);
874 }
875 EXPORT_SYMBOL(dump_emit);
876 
877 void dump_skip_to(struct coredump_params *cprm, unsigned long pos)
878 {
879 	cprm->to_skip = pos - cprm->pos;
880 }
881 EXPORT_SYMBOL(dump_skip_to);
882 
883 void dump_skip(struct coredump_params *cprm, size_t nr)
884 {
885 	cprm->to_skip += nr;
886 }
887 EXPORT_SYMBOL(dump_skip);
888 
889 #ifdef CONFIG_ELF_CORE
890 int dump_user_range(struct coredump_params *cprm, unsigned long start,
891 		    unsigned long len)
892 {
893 	unsigned long addr;
894 
895 	for (addr = start; addr < start + len; addr += PAGE_SIZE) {
896 		struct page *page;
897 
898 		/*
899 		 * To avoid having to allocate page tables for virtual address
900 		 * ranges that have never been used yet, and also to make it
901 		 * easy to generate sparse core files, use a helper that returns
902 		 * NULL when encountering an empty page table entry that would
903 		 * otherwise have been filled with the zero page.
904 		 */
905 		page = get_dump_page(addr);
906 		if (page) {
907 			int stop = !dump_emit_page(cprm, page);
908 			put_page(page);
909 			if (stop)
910 				return 0;
911 		} else {
912 			dump_skip(cprm, PAGE_SIZE);
913 		}
914 	}
915 	return 1;
916 }
917 #endif
918 
919 int dump_align(struct coredump_params *cprm, int align)
920 {
921 	unsigned mod = (cprm->pos + cprm->to_skip) & (align - 1);
922 	if (align & (align - 1))
923 		return 0;
924 	if (mod)
925 		cprm->to_skip += align - mod;
926 	return 1;
927 }
928 EXPORT_SYMBOL(dump_align);
929 
930 #ifdef CONFIG_SYSCTL
931 
932 void validate_coredump_safety(void)
933 {
934 	if (suid_dumpable == SUID_DUMP_ROOT &&
935 	    core_pattern[0] != '/' && core_pattern[0] != '|') {
936 		pr_warn(
937 "Unsafe core_pattern used with fs.suid_dumpable=2.\n"
938 "Pipe handler or fully qualified core dump path required.\n"
939 "Set kernel.core_pattern before fs.suid_dumpable.\n"
940 		);
941 	}
942 }
943 
944 static int proc_dostring_coredump(struct ctl_table *table, int write,
945 		  void *buffer, size_t *lenp, loff_t *ppos)
946 {
947 	int error = proc_dostring(table, write, buffer, lenp, ppos);
948 
949 	if (!error)
950 		validate_coredump_safety();
951 	return error;
952 }
953 
954 static struct ctl_table coredump_sysctls[] = {
955 	{
956 		.procname	= "core_uses_pid",
957 		.data		= &core_uses_pid,
958 		.maxlen		= sizeof(int),
959 		.mode		= 0644,
960 		.proc_handler	= proc_dointvec,
961 	},
962 	{
963 		.procname	= "core_pattern",
964 		.data		= core_pattern,
965 		.maxlen		= CORENAME_MAX_SIZE,
966 		.mode		= 0644,
967 		.proc_handler	= proc_dostring_coredump,
968 	},
969 	{
970 		.procname	= "core_pipe_limit",
971 		.data		= &core_pipe_limit,
972 		.maxlen		= sizeof(unsigned int),
973 		.mode		= 0644,
974 		.proc_handler	= proc_dointvec,
975 	},
976 	{ }
977 };
978 
979 static int __init init_fs_coredump_sysctls(void)
980 {
981 	register_sysctl_init("kernel", coredump_sysctls);
982 	return 0;
983 }
984 fs_initcall(init_fs_coredump_sysctls);
985 #endif /* CONFIG_SYSCTL */
986 
987 /*
988  * The purpose of always_dump_vma() is to make sure that special kernel mappings
989  * that are useful for post-mortem analysis are included in every core dump.
990  * In that way we ensure that the core dump is fully interpretable later
991  * without matching up the same kernel and hardware config to see what PC values
992  * meant. These special mappings include - vDSO, vsyscall, and other
993  * architecture specific mappings
994  */
995 static bool always_dump_vma(struct vm_area_struct *vma)
996 {
997 	/* Any vsyscall mappings? */
998 	if (vma == get_gate_vma(vma->vm_mm))
999 		return true;
1000 
1001 	/*
1002 	 * Assume that all vmas with a .name op should always be dumped.
1003 	 * If this changes, a new vm_ops field can easily be added.
1004 	 */
1005 	if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
1006 		return true;
1007 
1008 	/*
1009 	 * arch_vma_name() returns non-NULL for special architecture mappings,
1010 	 * such as vDSO sections.
1011 	 */
1012 	if (arch_vma_name(vma))
1013 		return true;
1014 
1015 	return false;
1016 }
1017 
1018 #define DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER 1
1019 
1020 /*
1021  * Decide how much of @vma's contents should be included in a core dump.
1022  */
1023 static unsigned long vma_dump_size(struct vm_area_struct *vma,
1024 				   unsigned long mm_flags)
1025 {
1026 #define FILTER(type)	(mm_flags & (1UL << MMF_DUMP_##type))
1027 
1028 	/* always dump the vdso and vsyscall sections */
1029 	if (always_dump_vma(vma))
1030 		goto whole;
1031 
1032 	if (vma->vm_flags & VM_DONTDUMP)
1033 		return 0;
1034 
1035 	/* support for DAX */
1036 	if (vma_is_dax(vma)) {
1037 		if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
1038 			goto whole;
1039 		if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
1040 			goto whole;
1041 		return 0;
1042 	}
1043 
1044 	/* Hugetlb memory check */
1045 	if (is_vm_hugetlb_page(vma)) {
1046 		if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1047 			goto whole;
1048 		if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1049 			goto whole;
1050 		return 0;
1051 	}
1052 
1053 	/* Do not dump I/O mapped devices or special mappings */
1054 	if (vma->vm_flags & VM_IO)
1055 		return 0;
1056 
1057 	/* By default, dump shared memory if mapped from an anonymous file. */
1058 	if (vma->vm_flags & VM_SHARED) {
1059 		if (file_inode(vma->vm_file)->i_nlink == 0 ?
1060 		    FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1061 			goto whole;
1062 		return 0;
1063 	}
1064 
1065 	/* Dump segments that have been written to.  */
1066 	if ((!IS_ENABLED(CONFIG_MMU) || vma->anon_vma) && FILTER(ANON_PRIVATE))
1067 		goto whole;
1068 	if (vma->vm_file == NULL)
1069 		return 0;
1070 
1071 	if (FILTER(MAPPED_PRIVATE))
1072 		goto whole;
1073 
1074 	/*
1075 	 * If this is the beginning of an executable file mapping,
1076 	 * dump the first page to aid in determining what was mapped here.
1077 	 */
1078 	if (FILTER(ELF_HEADERS) &&
1079 	    vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
1080 		if ((READ_ONCE(file_inode(vma->vm_file)->i_mode) & 0111) != 0)
1081 			return PAGE_SIZE;
1082 
1083 		/*
1084 		 * ELF libraries aren't always executable.
1085 		 * We'll want to check whether the mapping starts with the ELF
1086 		 * magic, but not now - we're holding the mmap lock,
1087 		 * so copy_from_user() doesn't work here.
1088 		 * Use a placeholder instead, and fix it up later in
1089 		 * dump_vma_snapshot().
1090 		 */
1091 		return DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER;
1092 	}
1093 
1094 #undef	FILTER
1095 
1096 	return 0;
1097 
1098 whole:
1099 	return vma->vm_end - vma->vm_start;
1100 }
1101 
1102 /*
1103  * Helper function for iterating across a vma list.  It ensures that the caller
1104  * will visit `gate_vma' prior to terminating the search.
1105  */
1106 static struct vm_area_struct *coredump_next_vma(struct ma_state *mas,
1107 				       struct vm_area_struct *vma,
1108 				       struct vm_area_struct *gate_vma)
1109 {
1110 	if (gate_vma && (vma == gate_vma))
1111 		return NULL;
1112 
1113 	vma = mas_next(mas, ULONG_MAX);
1114 	if (vma)
1115 		return vma;
1116 	return gate_vma;
1117 }
1118 
1119 static void free_vma_snapshot(struct coredump_params *cprm)
1120 {
1121 	if (cprm->vma_meta) {
1122 		int i;
1123 		for (i = 0; i < cprm->vma_count; i++) {
1124 			struct file *file = cprm->vma_meta[i].file;
1125 			if (file)
1126 				fput(file);
1127 		}
1128 		kvfree(cprm->vma_meta);
1129 		cprm->vma_meta = NULL;
1130 	}
1131 }
1132 
1133 /*
1134  * Under the mmap_lock, take a snapshot of relevant information about the task's
1135  * VMAs.
1136  */
1137 static bool dump_vma_snapshot(struct coredump_params *cprm)
1138 {
1139 	struct vm_area_struct *gate_vma, *vma = NULL;
1140 	struct mm_struct *mm = current->mm;
1141 	MA_STATE(mas, &mm->mm_mt, 0, 0);
1142 	int i = 0;
1143 
1144 	/*
1145 	 * Once the stack expansion code is fixed to not change VMA bounds
1146 	 * under mmap_lock in read mode, this can be changed to take the
1147 	 * mmap_lock in read mode.
1148 	 */
1149 	if (mmap_write_lock_killable(mm))
1150 		return false;
1151 
1152 	cprm->vma_data_size = 0;
1153 	gate_vma = get_gate_vma(mm);
1154 	cprm->vma_count = mm->map_count + (gate_vma ? 1 : 0);
1155 
1156 	cprm->vma_meta = kvmalloc_array(cprm->vma_count, sizeof(*cprm->vma_meta), GFP_KERNEL);
1157 	if (!cprm->vma_meta) {
1158 		mmap_write_unlock(mm);
1159 		return false;
1160 	}
1161 
1162 	while ((vma = coredump_next_vma(&mas, vma, gate_vma)) != NULL) {
1163 		struct core_vma_metadata *m = cprm->vma_meta + i;
1164 
1165 		m->start = vma->vm_start;
1166 		m->end = vma->vm_end;
1167 		m->flags = vma->vm_flags;
1168 		m->dump_size = vma_dump_size(vma, cprm->mm_flags);
1169 		m->pgoff = vma->vm_pgoff;
1170 		m->file = vma->vm_file;
1171 		if (m->file)
1172 			get_file(m->file);
1173 		i++;
1174 	}
1175 
1176 	mmap_write_unlock(mm);
1177 
1178 	for (i = 0; i < cprm->vma_count; i++) {
1179 		struct core_vma_metadata *m = cprm->vma_meta + i;
1180 
1181 		if (m->dump_size == DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER) {
1182 			char elfmag[SELFMAG];
1183 
1184 			if (copy_from_user(elfmag, (void __user *)m->start, SELFMAG) ||
1185 					memcmp(elfmag, ELFMAG, SELFMAG) != 0) {
1186 				m->dump_size = 0;
1187 			} else {
1188 				m->dump_size = PAGE_SIZE;
1189 			}
1190 		}
1191 
1192 		cprm->vma_data_size += m->dump_size;
1193 	}
1194 
1195 	return true;
1196 }
1197