xref: /openbmc/linux/fs/exec.c (revision 174cd4b1)
1 /*
2  *  linux/fs/exec.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 /*
8  * #!-checking implemented by tytso.
9  */
10 /*
11  * Demand-loading implemented 01.12.91 - no need to read anything but
12  * the header into memory. The inode of the executable is put into
13  * "current->executable", and page faults do the actual loading. Clean.
14  *
15  * Once more I can proudly say that linux stood up to being changed: it
16  * was less than 2 hours work to get demand-loading completely implemented.
17  *
18  * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
19  * current->executable is only used by the procfs.  This allows a dispatch
20  * table to check for several different types  of binary formats.  We keep
21  * trying until we recognize the file or we run out of supported binary
22  * formats.
23  */
24 
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
28 #include <linux/mm.h>
29 #include <linux/vmacache.h>
30 #include <linux/stat.h>
31 #include <linux/fcntl.h>
32 #include <linux/swap.h>
33 #include <linux/string.h>
34 #include <linux/init.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sched/coredump.h>
37 #include <linux/sched/signal.h>
38 #include <linux/sched/numa_balancing.h>
39 #include <linux/pagemap.h>
40 #include <linux/perf_event.h>
41 #include <linux/highmem.h>
42 #include <linux/spinlock.h>
43 #include <linux/key.h>
44 #include <linux/personality.h>
45 #include <linux/binfmts.h>
46 #include <linux/utsname.h>
47 #include <linux/pid_namespace.h>
48 #include <linux/module.h>
49 #include <linux/namei.h>
50 #include <linux/mount.h>
51 #include <linux/security.h>
52 #include <linux/syscalls.h>
53 #include <linux/tsacct_kern.h>
54 #include <linux/cn_proc.h>
55 #include <linux/audit.h>
56 #include <linux/tracehook.h>
57 #include <linux/kmod.h>
58 #include <linux/fsnotify.h>
59 #include <linux/fs_struct.h>
60 #include <linux/pipe_fs_i.h>
61 #include <linux/oom.h>
62 #include <linux/compat.h>
63 #include <linux/vmalloc.h>
64 
65 #include <linux/uaccess.h>
66 #include <asm/mmu_context.h>
67 #include <asm/tlb.h>
68 
69 #include <trace/events/task.h>
70 #include "internal.h"
71 
72 #include <trace/events/sched.h>
73 
74 int suid_dumpable = 0;
75 
76 static LIST_HEAD(formats);
77 static DEFINE_RWLOCK(binfmt_lock);
78 
79 void __register_binfmt(struct linux_binfmt * fmt, int insert)
80 {
81 	BUG_ON(!fmt);
82 	if (WARN_ON(!fmt->load_binary))
83 		return;
84 	write_lock(&binfmt_lock);
85 	insert ? list_add(&fmt->lh, &formats) :
86 		 list_add_tail(&fmt->lh, &formats);
87 	write_unlock(&binfmt_lock);
88 }
89 
90 EXPORT_SYMBOL(__register_binfmt);
91 
92 void unregister_binfmt(struct linux_binfmt * fmt)
93 {
94 	write_lock(&binfmt_lock);
95 	list_del(&fmt->lh);
96 	write_unlock(&binfmt_lock);
97 }
98 
99 EXPORT_SYMBOL(unregister_binfmt);
100 
101 static inline void put_binfmt(struct linux_binfmt * fmt)
102 {
103 	module_put(fmt->module);
104 }
105 
106 bool path_noexec(const struct path *path)
107 {
108 	return (path->mnt->mnt_flags & MNT_NOEXEC) ||
109 	       (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
110 }
111 
112 #ifdef CONFIG_USELIB
113 /*
114  * Note that a shared library must be both readable and executable due to
115  * security reasons.
116  *
117  * Also note that we take the address to load from from the file itself.
118  */
119 SYSCALL_DEFINE1(uselib, const char __user *, library)
120 {
121 	struct linux_binfmt *fmt;
122 	struct file *file;
123 	struct filename *tmp = getname(library);
124 	int error = PTR_ERR(tmp);
125 	static const struct open_flags uselib_flags = {
126 		.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
127 		.acc_mode = MAY_READ | MAY_EXEC,
128 		.intent = LOOKUP_OPEN,
129 		.lookup_flags = LOOKUP_FOLLOW,
130 	};
131 
132 	if (IS_ERR(tmp))
133 		goto out;
134 
135 	file = do_filp_open(AT_FDCWD, tmp, &uselib_flags);
136 	putname(tmp);
137 	error = PTR_ERR(file);
138 	if (IS_ERR(file))
139 		goto out;
140 
141 	error = -EINVAL;
142 	if (!S_ISREG(file_inode(file)->i_mode))
143 		goto exit;
144 
145 	error = -EACCES;
146 	if (path_noexec(&file->f_path))
147 		goto exit;
148 
149 	fsnotify_open(file);
150 
151 	error = -ENOEXEC;
152 
153 	read_lock(&binfmt_lock);
154 	list_for_each_entry(fmt, &formats, lh) {
155 		if (!fmt->load_shlib)
156 			continue;
157 		if (!try_module_get(fmt->module))
158 			continue;
159 		read_unlock(&binfmt_lock);
160 		error = fmt->load_shlib(file);
161 		read_lock(&binfmt_lock);
162 		put_binfmt(fmt);
163 		if (error != -ENOEXEC)
164 			break;
165 	}
166 	read_unlock(&binfmt_lock);
167 exit:
168 	fput(file);
169 out:
170   	return error;
171 }
172 #endif /* #ifdef CONFIG_USELIB */
173 
174 #ifdef CONFIG_MMU
175 /*
176  * The nascent bprm->mm is not visible until exec_mmap() but it can
177  * use a lot of memory, account these pages in current->mm temporary
178  * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
179  * change the counter back via acct_arg_size(0).
180  */
181 static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
182 {
183 	struct mm_struct *mm = current->mm;
184 	long diff = (long)(pages - bprm->vma_pages);
185 
186 	if (!mm || !diff)
187 		return;
188 
189 	bprm->vma_pages = pages;
190 	add_mm_counter(mm, MM_ANONPAGES, diff);
191 }
192 
193 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
194 		int write)
195 {
196 	struct page *page;
197 	int ret;
198 	unsigned int gup_flags = FOLL_FORCE;
199 
200 #ifdef CONFIG_STACK_GROWSUP
201 	if (write) {
202 		ret = expand_downwards(bprm->vma, pos);
203 		if (ret < 0)
204 			return NULL;
205 	}
206 #endif
207 
208 	if (write)
209 		gup_flags |= FOLL_WRITE;
210 
211 	/*
212 	 * We are doing an exec().  'current' is the process
213 	 * doing the exec and bprm->mm is the new process's mm.
214 	 */
215 	ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags,
216 			&page, NULL, NULL);
217 	if (ret <= 0)
218 		return NULL;
219 
220 	if (write) {
221 		unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
222 		struct rlimit *rlim;
223 
224 		acct_arg_size(bprm, size / PAGE_SIZE);
225 
226 		/*
227 		 * We've historically supported up to 32 pages (ARG_MAX)
228 		 * of argument strings even with small stacks
229 		 */
230 		if (size <= ARG_MAX)
231 			return page;
232 
233 		/*
234 		 * Limit to 1/4-th the stack size for the argv+env strings.
235 		 * This ensures that:
236 		 *  - the remaining binfmt code will not run out of stack space,
237 		 *  - the program will have a reasonable amount of stack left
238 		 *    to work from.
239 		 */
240 		rlim = current->signal->rlim;
241 		if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
242 			put_page(page);
243 			return NULL;
244 		}
245 	}
246 
247 	return page;
248 }
249 
250 static void put_arg_page(struct page *page)
251 {
252 	put_page(page);
253 }
254 
255 static void free_arg_pages(struct linux_binprm *bprm)
256 {
257 }
258 
259 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
260 		struct page *page)
261 {
262 	flush_cache_page(bprm->vma, pos, page_to_pfn(page));
263 }
264 
265 static int __bprm_mm_init(struct linux_binprm *bprm)
266 {
267 	int err;
268 	struct vm_area_struct *vma = NULL;
269 	struct mm_struct *mm = bprm->mm;
270 
271 	bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
272 	if (!vma)
273 		return -ENOMEM;
274 
275 	if (down_write_killable(&mm->mmap_sem)) {
276 		err = -EINTR;
277 		goto err_free;
278 	}
279 	vma->vm_mm = mm;
280 
281 	/*
282 	 * Place the stack at the largest stack address the architecture
283 	 * supports. Later, we'll move this to an appropriate place. We don't
284 	 * use STACK_TOP because that can depend on attributes which aren't
285 	 * configured yet.
286 	 */
287 	BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
288 	vma->vm_end = STACK_TOP_MAX;
289 	vma->vm_start = vma->vm_end - PAGE_SIZE;
290 	vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
291 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
292 	INIT_LIST_HEAD(&vma->anon_vma_chain);
293 
294 	err = insert_vm_struct(mm, vma);
295 	if (err)
296 		goto err;
297 
298 	mm->stack_vm = mm->total_vm = 1;
299 	arch_bprm_mm_init(mm, vma);
300 	up_write(&mm->mmap_sem);
301 	bprm->p = vma->vm_end - sizeof(void *);
302 	return 0;
303 err:
304 	up_write(&mm->mmap_sem);
305 err_free:
306 	bprm->vma = NULL;
307 	kmem_cache_free(vm_area_cachep, vma);
308 	return err;
309 }
310 
311 static bool valid_arg_len(struct linux_binprm *bprm, long len)
312 {
313 	return len <= MAX_ARG_STRLEN;
314 }
315 
316 #else
317 
318 static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
319 {
320 }
321 
322 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
323 		int write)
324 {
325 	struct page *page;
326 
327 	page = bprm->page[pos / PAGE_SIZE];
328 	if (!page && write) {
329 		page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
330 		if (!page)
331 			return NULL;
332 		bprm->page[pos / PAGE_SIZE] = page;
333 	}
334 
335 	return page;
336 }
337 
338 static void put_arg_page(struct page *page)
339 {
340 }
341 
342 static void free_arg_page(struct linux_binprm *bprm, int i)
343 {
344 	if (bprm->page[i]) {
345 		__free_page(bprm->page[i]);
346 		bprm->page[i] = NULL;
347 	}
348 }
349 
350 static void free_arg_pages(struct linux_binprm *bprm)
351 {
352 	int i;
353 
354 	for (i = 0; i < MAX_ARG_PAGES; i++)
355 		free_arg_page(bprm, i);
356 }
357 
358 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
359 		struct page *page)
360 {
361 }
362 
363 static int __bprm_mm_init(struct linux_binprm *bprm)
364 {
365 	bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
366 	return 0;
367 }
368 
369 static bool valid_arg_len(struct linux_binprm *bprm, long len)
370 {
371 	return len <= bprm->p;
372 }
373 
374 #endif /* CONFIG_MMU */
375 
376 /*
377  * Create a new mm_struct and populate it with a temporary stack
378  * vm_area_struct.  We don't have enough context at this point to set the stack
379  * flags, permissions, and offset, so we use temporary values.  We'll update
380  * them later in setup_arg_pages().
381  */
382 static int bprm_mm_init(struct linux_binprm *bprm)
383 {
384 	int err;
385 	struct mm_struct *mm = NULL;
386 
387 	bprm->mm = mm = mm_alloc();
388 	err = -ENOMEM;
389 	if (!mm)
390 		goto err;
391 
392 	err = __bprm_mm_init(bprm);
393 	if (err)
394 		goto err;
395 
396 	return 0;
397 
398 err:
399 	if (mm) {
400 		bprm->mm = NULL;
401 		mmdrop(mm);
402 	}
403 
404 	return err;
405 }
406 
407 struct user_arg_ptr {
408 #ifdef CONFIG_COMPAT
409 	bool is_compat;
410 #endif
411 	union {
412 		const char __user *const __user *native;
413 #ifdef CONFIG_COMPAT
414 		const compat_uptr_t __user *compat;
415 #endif
416 	} ptr;
417 };
418 
419 static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
420 {
421 	const char __user *native;
422 
423 #ifdef CONFIG_COMPAT
424 	if (unlikely(argv.is_compat)) {
425 		compat_uptr_t compat;
426 
427 		if (get_user(compat, argv.ptr.compat + nr))
428 			return ERR_PTR(-EFAULT);
429 
430 		return compat_ptr(compat);
431 	}
432 #endif
433 
434 	if (get_user(native, argv.ptr.native + nr))
435 		return ERR_PTR(-EFAULT);
436 
437 	return native;
438 }
439 
440 /*
441  * count() counts the number of strings in array ARGV.
442  */
443 static int count(struct user_arg_ptr argv, int max)
444 {
445 	int i = 0;
446 
447 	if (argv.ptr.native != NULL) {
448 		for (;;) {
449 			const char __user *p = get_user_arg_ptr(argv, i);
450 
451 			if (!p)
452 				break;
453 
454 			if (IS_ERR(p))
455 				return -EFAULT;
456 
457 			if (i >= max)
458 				return -E2BIG;
459 			++i;
460 
461 			if (fatal_signal_pending(current))
462 				return -ERESTARTNOHAND;
463 			cond_resched();
464 		}
465 	}
466 	return i;
467 }
468 
469 /*
470  * 'copy_strings()' copies argument/environment strings from the old
471  * processes's memory to the new process's stack.  The call to get_user_pages()
472  * ensures the destination page is created and not swapped out.
473  */
474 static int copy_strings(int argc, struct user_arg_ptr argv,
475 			struct linux_binprm *bprm)
476 {
477 	struct page *kmapped_page = NULL;
478 	char *kaddr = NULL;
479 	unsigned long kpos = 0;
480 	int ret;
481 
482 	while (argc-- > 0) {
483 		const char __user *str;
484 		int len;
485 		unsigned long pos;
486 
487 		ret = -EFAULT;
488 		str = get_user_arg_ptr(argv, argc);
489 		if (IS_ERR(str))
490 			goto out;
491 
492 		len = strnlen_user(str, MAX_ARG_STRLEN);
493 		if (!len)
494 			goto out;
495 
496 		ret = -E2BIG;
497 		if (!valid_arg_len(bprm, len))
498 			goto out;
499 
500 		/* We're going to work our way backwords. */
501 		pos = bprm->p;
502 		str += len;
503 		bprm->p -= len;
504 
505 		while (len > 0) {
506 			int offset, bytes_to_copy;
507 
508 			if (fatal_signal_pending(current)) {
509 				ret = -ERESTARTNOHAND;
510 				goto out;
511 			}
512 			cond_resched();
513 
514 			offset = pos % PAGE_SIZE;
515 			if (offset == 0)
516 				offset = PAGE_SIZE;
517 
518 			bytes_to_copy = offset;
519 			if (bytes_to_copy > len)
520 				bytes_to_copy = len;
521 
522 			offset -= bytes_to_copy;
523 			pos -= bytes_to_copy;
524 			str -= bytes_to_copy;
525 			len -= bytes_to_copy;
526 
527 			if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
528 				struct page *page;
529 
530 				page = get_arg_page(bprm, pos, 1);
531 				if (!page) {
532 					ret = -E2BIG;
533 					goto out;
534 				}
535 
536 				if (kmapped_page) {
537 					flush_kernel_dcache_page(kmapped_page);
538 					kunmap(kmapped_page);
539 					put_arg_page(kmapped_page);
540 				}
541 				kmapped_page = page;
542 				kaddr = kmap(kmapped_page);
543 				kpos = pos & PAGE_MASK;
544 				flush_arg_page(bprm, kpos, kmapped_page);
545 			}
546 			if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
547 				ret = -EFAULT;
548 				goto out;
549 			}
550 		}
551 	}
552 	ret = 0;
553 out:
554 	if (kmapped_page) {
555 		flush_kernel_dcache_page(kmapped_page);
556 		kunmap(kmapped_page);
557 		put_arg_page(kmapped_page);
558 	}
559 	return ret;
560 }
561 
562 /*
563  * Like copy_strings, but get argv and its values from kernel memory.
564  */
565 int copy_strings_kernel(int argc, const char *const *__argv,
566 			struct linux_binprm *bprm)
567 {
568 	int r;
569 	mm_segment_t oldfs = get_fs();
570 	struct user_arg_ptr argv = {
571 		.ptr.native = (const char __user *const  __user *)__argv,
572 	};
573 
574 	set_fs(KERNEL_DS);
575 	r = copy_strings(argc, argv, bprm);
576 	set_fs(oldfs);
577 
578 	return r;
579 }
580 EXPORT_SYMBOL(copy_strings_kernel);
581 
582 #ifdef CONFIG_MMU
583 
584 /*
585  * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX.  Once
586  * the binfmt code determines where the new stack should reside, we shift it to
587  * its final location.  The process proceeds as follows:
588  *
589  * 1) Use shift to calculate the new vma endpoints.
590  * 2) Extend vma to cover both the old and new ranges.  This ensures the
591  *    arguments passed to subsequent functions are consistent.
592  * 3) Move vma's page tables to the new range.
593  * 4) Free up any cleared pgd range.
594  * 5) Shrink the vma to cover only the new range.
595  */
596 static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
597 {
598 	struct mm_struct *mm = vma->vm_mm;
599 	unsigned long old_start = vma->vm_start;
600 	unsigned long old_end = vma->vm_end;
601 	unsigned long length = old_end - old_start;
602 	unsigned long new_start = old_start - shift;
603 	unsigned long new_end = old_end - shift;
604 	struct mmu_gather tlb;
605 
606 	BUG_ON(new_start > new_end);
607 
608 	/*
609 	 * ensure there are no vmas between where we want to go
610 	 * and where we are
611 	 */
612 	if (vma != find_vma(mm, new_start))
613 		return -EFAULT;
614 
615 	/*
616 	 * cover the whole range: [new_start, old_end)
617 	 */
618 	if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
619 		return -ENOMEM;
620 
621 	/*
622 	 * move the page tables downwards, on failure we rely on
623 	 * process cleanup to remove whatever mess we made.
624 	 */
625 	if (length != move_page_tables(vma, old_start,
626 				       vma, new_start, length, false))
627 		return -ENOMEM;
628 
629 	lru_add_drain();
630 	tlb_gather_mmu(&tlb, mm, old_start, old_end);
631 	if (new_end > old_start) {
632 		/*
633 		 * when the old and new regions overlap clear from new_end.
634 		 */
635 		free_pgd_range(&tlb, new_end, old_end, new_end,
636 			vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
637 	} else {
638 		/*
639 		 * otherwise, clean from old_start; this is done to not touch
640 		 * the address space in [new_end, old_start) some architectures
641 		 * have constraints on va-space that make this illegal (IA64) -
642 		 * for the others its just a little faster.
643 		 */
644 		free_pgd_range(&tlb, old_start, old_end, new_end,
645 			vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
646 	}
647 	tlb_finish_mmu(&tlb, old_start, old_end);
648 
649 	/*
650 	 * Shrink the vma to just the new range.  Always succeeds.
651 	 */
652 	vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
653 
654 	return 0;
655 }
656 
657 /*
658  * Finalizes the stack vm_area_struct. The flags and permissions are updated,
659  * the stack is optionally relocated, and some extra space is added.
660  */
661 int setup_arg_pages(struct linux_binprm *bprm,
662 		    unsigned long stack_top,
663 		    int executable_stack)
664 {
665 	unsigned long ret;
666 	unsigned long stack_shift;
667 	struct mm_struct *mm = current->mm;
668 	struct vm_area_struct *vma = bprm->vma;
669 	struct vm_area_struct *prev = NULL;
670 	unsigned long vm_flags;
671 	unsigned long stack_base;
672 	unsigned long stack_size;
673 	unsigned long stack_expand;
674 	unsigned long rlim_stack;
675 
676 #ifdef CONFIG_STACK_GROWSUP
677 	/* Limit stack size */
678 	stack_base = rlimit_max(RLIMIT_STACK);
679 	if (stack_base > STACK_SIZE_MAX)
680 		stack_base = STACK_SIZE_MAX;
681 
682 	/* Add space for stack randomization. */
683 	stack_base += (STACK_RND_MASK << PAGE_SHIFT);
684 
685 	/* Make sure we didn't let the argument array grow too large. */
686 	if (vma->vm_end - vma->vm_start > stack_base)
687 		return -ENOMEM;
688 
689 	stack_base = PAGE_ALIGN(stack_top - stack_base);
690 
691 	stack_shift = vma->vm_start - stack_base;
692 	mm->arg_start = bprm->p - stack_shift;
693 	bprm->p = vma->vm_end - stack_shift;
694 #else
695 	stack_top = arch_align_stack(stack_top);
696 	stack_top = PAGE_ALIGN(stack_top);
697 
698 	if (unlikely(stack_top < mmap_min_addr) ||
699 	    unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
700 		return -ENOMEM;
701 
702 	stack_shift = vma->vm_end - stack_top;
703 
704 	bprm->p -= stack_shift;
705 	mm->arg_start = bprm->p;
706 #endif
707 
708 	if (bprm->loader)
709 		bprm->loader -= stack_shift;
710 	bprm->exec -= stack_shift;
711 
712 	if (down_write_killable(&mm->mmap_sem))
713 		return -EINTR;
714 
715 	vm_flags = VM_STACK_FLAGS;
716 
717 	/*
718 	 * Adjust stack execute permissions; explicitly enable for
719 	 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
720 	 * (arch default) otherwise.
721 	 */
722 	if (unlikely(executable_stack == EXSTACK_ENABLE_X))
723 		vm_flags |= VM_EXEC;
724 	else if (executable_stack == EXSTACK_DISABLE_X)
725 		vm_flags &= ~VM_EXEC;
726 	vm_flags |= mm->def_flags;
727 	vm_flags |= VM_STACK_INCOMPLETE_SETUP;
728 
729 	ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
730 			vm_flags);
731 	if (ret)
732 		goto out_unlock;
733 	BUG_ON(prev != vma);
734 
735 	/* Move stack pages down in memory. */
736 	if (stack_shift) {
737 		ret = shift_arg_pages(vma, stack_shift);
738 		if (ret)
739 			goto out_unlock;
740 	}
741 
742 	/* mprotect_fixup is overkill to remove the temporary stack flags */
743 	vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
744 
745 	stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
746 	stack_size = vma->vm_end - vma->vm_start;
747 	/*
748 	 * Align this down to a page boundary as expand_stack
749 	 * will align it up.
750 	 */
751 	rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
752 #ifdef CONFIG_STACK_GROWSUP
753 	if (stack_size + stack_expand > rlim_stack)
754 		stack_base = vma->vm_start + rlim_stack;
755 	else
756 		stack_base = vma->vm_end + stack_expand;
757 #else
758 	if (stack_size + stack_expand > rlim_stack)
759 		stack_base = vma->vm_end - rlim_stack;
760 	else
761 		stack_base = vma->vm_start - stack_expand;
762 #endif
763 	current->mm->start_stack = bprm->p;
764 	ret = expand_stack(vma, stack_base);
765 	if (ret)
766 		ret = -EFAULT;
767 
768 out_unlock:
769 	up_write(&mm->mmap_sem);
770 	return ret;
771 }
772 EXPORT_SYMBOL(setup_arg_pages);
773 
774 #else
775 
776 /*
777  * Transfer the program arguments and environment from the holding pages
778  * onto the stack. The provided stack pointer is adjusted accordingly.
779  */
780 int transfer_args_to_stack(struct linux_binprm *bprm,
781 			   unsigned long *sp_location)
782 {
783 	unsigned long index, stop, sp;
784 	int ret = 0;
785 
786 	stop = bprm->p >> PAGE_SHIFT;
787 	sp = *sp_location;
788 
789 	for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
790 		unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0;
791 		char *src = kmap(bprm->page[index]) + offset;
792 		sp -= PAGE_SIZE - offset;
793 		if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0)
794 			ret = -EFAULT;
795 		kunmap(bprm->page[index]);
796 		if (ret)
797 			goto out;
798 	}
799 
800 	*sp_location = sp;
801 
802 out:
803 	return ret;
804 }
805 EXPORT_SYMBOL(transfer_args_to_stack);
806 
807 #endif /* CONFIG_MMU */
808 
809 static struct file *do_open_execat(int fd, struct filename *name, int flags)
810 {
811 	struct file *file;
812 	int err;
813 	struct open_flags open_exec_flags = {
814 		.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
815 		.acc_mode = MAY_EXEC,
816 		.intent = LOOKUP_OPEN,
817 		.lookup_flags = LOOKUP_FOLLOW,
818 	};
819 
820 	if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
821 		return ERR_PTR(-EINVAL);
822 	if (flags & AT_SYMLINK_NOFOLLOW)
823 		open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
824 	if (flags & AT_EMPTY_PATH)
825 		open_exec_flags.lookup_flags |= LOOKUP_EMPTY;
826 
827 	file = do_filp_open(fd, name, &open_exec_flags);
828 	if (IS_ERR(file))
829 		goto out;
830 
831 	err = -EACCES;
832 	if (!S_ISREG(file_inode(file)->i_mode))
833 		goto exit;
834 
835 	if (path_noexec(&file->f_path))
836 		goto exit;
837 
838 	err = deny_write_access(file);
839 	if (err)
840 		goto exit;
841 
842 	if (name->name[0] != '\0')
843 		fsnotify_open(file);
844 
845 out:
846 	return file;
847 
848 exit:
849 	fput(file);
850 	return ERR_PTR(err);
851 }
852 
853 struct file *open_exec(const char *name)
854 {
855 	struct filename *filename = getname_kernel(name);
856 	struct file *f = ERR_CAST(filename);
857 
858 	if (!IS_ERR(filename)) {
859 		f = do_open_execat(AT_FDCWD, filename, 0);
860 		putname(filename);
861 	}
862 	return f;
863 }
864 EXPORT_SYMBOL(open_exec);
865 
866 int kernel_read(struct file *file, loff_t offset,
867 		char *addr, unsigned long count)
868 {
869 	mm_segment_t old_fs;
870 	loff_t pos = offset;
871 	int result;
872 
873 	old_fs = get_fs();
874 	set_fs(get_ds());
875 	/* The cast to a user pointer is valid due to the set_fs() */
876 	result = vfs_read(file, (void __user *)addr, count, &pos);
877 	set_fs(old_fs);
878 	return result;
879 }
880 
881 EXPORT_SYMBOL(kernel_read);
882 
883 int kernel_read_file(struct file *file, void **buf, loff_t *size,
884 		     loff_t max_size, enum kernel_read_file_id id)
885 {
886 	loff_t i_size, pos;
887 	ssize_t bytes = 0;
888 	int ret;
889 
890 	if (!S_ISREG(file_inode(file)->i_mode) || max_size < 0)
891 		return -EINVAL;
892 
893 	ret = security_kernel_read_file(file, id);
894 	if (ret)
895 		return ret;
896 
897 	ret = deny_write_access(file);
898 	if (ret)
899 		return ret;
900 
901 	i_size = i_size_read(file_inode(file));
902 	if (max_size > 0 && i_size > max_size) {
903 		ret = -EFBIG;
904 		goto out;
905 	}
906 	if (i_size <= 0) {
907 		ret = -EINVAL;
908 		goto out;
909 	}
910 
911 	if (id != READING_FIRMWARE_PREALLOC_BUFFER)
912 		*buf = vmalloc(i_size);
913 	if (!*buf) {
914 		ret = -ENOMEM;
915 		goto out;
916 	}
917 
918 	pos = 0;
919 	while (pos < i_size) {
920 		bytes = kernel_read(file, pos, (char *)(*buf) + pos,
921 				    i_size - pos);
922 		if (bytes < 0) {
923 			ret = bytes;
924 			goto out;
925 		}
926 
927 		if (bytes == 0)
928 			break;
929 		pos += bytes;
930 	}
931 
932 	if (pos != i_size) {
933 		ret = -EIO;
934 		goto out_free;
935 	}
936 
937 	ret = security_kernel_post_read_file(file, *buf, i_size, id);
938 	if (!ret)
939 		*size = pos;
940 
941 out_free:
942 	if (ret < 0) {
943 		if (id != READING_FIRMWARE_PREALLOC_BUFFER) {
944 			vfree(*buf);
945 			*buf = NULL;
946 		}
947 	}
948 
949 out:
950 	allow_write_access(file);
951 	return ret;
952 }
953 EXPORT_SYMBOL_GPL(kernel_read_file);
954 
955 int kernel_read_file_from_path(char *path, void **buf, loff_t *size,
956 			       loff_t max_size, enum kernel_read_file_id id)
957 {
958 	struct file *file;
959 	int ret;
960 
961 	if (!path || !*path)
962 		return -EINVAL;
963 
964 	file = filp_open(path, O_RDONLY, 0);
965 	if (IS_ERR(file))
966 		return PTR_ERR(file);
967 
968 	ret = kernel_read_file(file, buf, size, max_size, id);
969 	fput(file);
970 	return ret;
971 }
972 EXPORT_SYMBOL_GPL(kernel_read_file_from_path);
973 
974 int kernel_read_file_from_fd(int fd, void **buf, loff_t *size, loff_t max_size,
975 			     enum kernel_read_file_id id)
976 {
977 	struct fd f = fdget(fd);
978 	int ret = -EBADF;
979 
980 	if (!f.file)
981 		goto out;
982 
983 	ret = kernel_read_file(f.file, buf, size, max_size, id);
984 out:
985 	fdput(f);
986 	return ret;
987 }
988 EXPORT_SYMBOL_GPL(kernel_read_file_from_fd);
989 
990 ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
991 {
992 	ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
993 	if (res > 0)
994 		flush_icache_range(addr, addr + len);
995 	return res;
996 }
997 EXPORT_SYMBOL(read_code);
998 
999 static int exec_mmap(struct mm_struct *mm)
1000 {
1001 	struct task_struct *tsk;
1002 	struct mm_struct *old_mm, *active_mm;
1003 
1004 	/* Notify parent that we're no longer interested in the old VM */
1005 	tsk = current;
1006 	old_mm = current->mm;
1007 	mm_release(tsk, old_mm);
1008 
1009 	if (old_mm) {
1010 		sync_mm_rss(old_mm);
1011 		/*
1012 		 * Make sure that if there is a core dump in progress
1013 		 * for the old mm, we get out and die instead of going
1014 		 * through with the exec.  We must hold mmap_sem around
1015 		 * checking core_state and changing tsk->mm.
1016 		 */
1017 		down_read(&old_mm->mmap_sem);
1018 		if (unlikely(old_mm->core_state)) {
1019 			up_read(&old_mm->mmap_sem);
1020 			return -EINTR;
1021 		}
1022 	}
1023 	task_lock(tsk);
1024 	active_mm = tsk->active_mm;
1025 	tsk->mm = mm;
1026 	tsk->active_mm = mm;
1027 	activate_mm(active_mm, mm);
1028 	tsk->mm->vmacache_seqnum = 0;
1029 	vmacache_flush(tsk);
1030 	task_unlock(tsk);
1031 	if (old_mm) {
1032 		up_read(&old_mm->mmap_sem);
1033 		BUG_ON(active_mm != old_mm);
1034 		setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
1035 		mm_update_next_owner(old_mm);
1036 		mmput(old_mm);
1037 		return 0;
1038 	}
1039 	mmdrop(active_mm);
1040 	return 0;
1041 }
1042 
1043 /*
1044  * This function makes sure the current process has its own signal table,
1045  * so that flush_signal_handlers can later reset the handlers without
1046  * disturbing other processes.  (Other processes might share the signal
1047  * table via the CLONE_SIGHAND option to clone().)
1048  */
1049 static int de_thread(struct task_struct *tsk)
1050 {
1051 	struct signal_struct *sig = tsk->signal;
1052 	struct sighand_struct *oldsighand = tsk->sighand;
1053 	spinlock_t *lock = &oldsighand->siglock;
1054 
1055 	if (thread_group_empty(tsk))
1056 		goto no_thread_group;
1057 
1058 	/*
1059 	 * Kill all other threads in the thread group.
1060 	 */
1061 	spin_lock_irq(lock);
1062 	if (signal_group_exit(sig)) {
1063 		/*
1064 		 * Another group action in progress, just
1065 		 * return so that the signal is processed.
1066 		 */
1067 		spin_unlock_irq(lock);
1068 		return -EAGAIN;
1069 	}
1070 
1071 	sig->group_exit_task = tsk;
1072 	sig->notify_count = zap_other_threads(tsk);
1073 	if (!thread_group_leader(tsk))
1074 		sig->notify_count--;
1075 
1076 	while (sig->notify_count) {
1077 		__set_current_state(TASK_KILLABLE);
1078 		spin_unlock_irq(lock);
1079 		schedule();
1080 		if (unlikely(__fatal_signal_pending(tsk)))
1081 			goto killed;
1082 		spin_lock_irq(lock);
1083 	}
1084 	spin_unlock_irq(lock);
1085 
1086 	/*
1087 	 * At this point all other threads have exited, all we have to
1088 	 * do is to wait for the thread group leader to become inactive,
1089 	 * and to assume its PID:
1090 	 */
1091 	if (!thread_group_leader(tsk)) {
1092 		struct task_struct *leader = tsk->group_leader;
1093 
1094 		for (;;) {
1095 			cgroup_threadgroup_change_begin(tsk);
1096 			write_lock_irq(&tasklist_lock);
1097 			/*
1098 			 * Do this under tasklist_lock to ensure that
1099 			 * exit_notify() can't miss ->group_exit_task
1100 			 */
1101 			sig->notify_count = -1;
1102 			if (likely(leader->exit_state))
1103 				break;
1104 			__set_current_state(TASK_KILLABLE);
1105 			write_unlock_irq(&tasklist_lock);
1106 			cgroup_threadgroup_change_end(tsk);
1107 			schedule();
1108 			if (unlikely(__fatal_signal_pending(tsk)))
1109 				goto killed;
1110 		}
1111 
1112 		/*
1113 		 * The only record we have of the real-time age of a
1114 		 * process, regardless of execs it's done, is start_time.
1115 		 * All the past CPU time is accumulated in signal_struct
1116 		 * from sister threads now dead.  But in this non-leader
1117 		 * exec, nothing survives from the original leader thread,
1118 		 * whose birth marks the true age of this process now.
1119 		 * When we take on its identity by switching to its PID, we
1120 		 * also take its birthdate (always earlier than our own).
1121 		 */
1122 		tsk->start_time = leader->start_time;
1123 		tsk->real_start_time = leader->real_start_time;
1124 
1125 		BUG_ON(!same_thread_group(leader, tsk));
1126 		BUG_ON(has_group_leader_pid(tsk));
1127 		/*
1128 		 * An exec() starts a new thread group with the
1129 		 * TGID of the previous thread group. Rehash the
1130 		 * two threads with a switched PID, and release
1131 		 * the former thread group leader:
1132 		 */
1133 
1134 		/* Become a process group leader with the old leader's pid.
1135 		 * The old leader becomes a thread of the this thread group.
1136 		 * Note: The old leader also uses this pid until release_task
1137 		 *       is called.  Odd but simple and correct.
1138 		 */
1139 		tsk->pid = leader->pid;
1140 		change_pid(tsk, PIDTYPE_PID, task_pid(leader));
1141 		transfer_pid(leader, tsk, PIDTYPE_PGID);
1142 		transfer_pid(leader, tsk, PIDTYPE_SID);
1143 
1144 		list_replace_rcu(&leader->tasks, &tsk->tasks);
1145 		list_replace_init(&leader->sibling, &tsk->sibling);
1146 
1147 		tsk->group_leader = tsk;
1148 		leader->group_leader = tsk;
1149 
1150 		tsk->exit_signal = SIGCHLD;
1151 		leader->exit_signal = -1;
1152 
1153 		BUG_ON(leader->exit_state != EXIT_ZOMBIE);
1154 		leader->exit_state = EXIT_DEAD;
1155 
1156 		/*
1157 		 * We are going to release_task()->ptrace_unlink() silently,
1158 		 * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
1159 		 * the tracer wont't block again waiting for this thread.
1160 		 */
1161 		if (unlikely(leader->ptrace))
1162 			__wake_up_parent(leader, leader->parent);
1163 		write_unlock_irq(&tasklist_lock);
1164 		cgroup_threadgroup_change_end(tsk);
1165 
1166 		release_task(leader);
1167 	}
1168 
1169 	sig->group_exit_task = NULL;
1170 	sig->notify_count = 0;
1171 
1172 no_thread_group:
1173 	/* we have changed execution domain */
1174 	tsk->exit_signal = SIGCHLD;
1175 
1176 #ifdef CONFIG_POSIX_TIMERS
1177 	exit_itimers(sig);
1178 	flush_itimer_signals();
1179 #endif
1180 
1181 	if (atomic_read(&oldsighand->count) != 1) {
1182 		struct sighand_struct *newsighand;
1183 		/*
1184 		 * This ->sighand is shared with the CLONE_SIGHAND
1185 		 * but not CLONE_THREAD task, switch to the new one.
1186 		 */
1187 		newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1188 		if (!newsighand)
1189 			return -ENOMEM;
1190 
1191 		atomic_set(&newsighand->count, 1);
1192 		memcpy(newsighand->action, oldsighand->action,
1193 		       sizeof(newsighand->action));
1194 
1195 		write_lock_irq(&tasklist_lock);
1196 		spin_lock(&oldsighand->siglock);
1197 		rcu_assign_pointer(tsk->sighand, newsighand);
1198 		spin_unlock(&oldsighand->siglock);
1199 		write_unlock_irq(&tasklist_lock);
1200 
1201 		__cleanup_sighand(oldsighand);
1202 	}
1203 
1204 	BUG_ON(!thread_group_leader(tsk));
1205 	return 0;
1206 
1207 killed:
1208 	/* protects against exit_notify() and __exit_signal() */
1209 	read_lock(&tasklist_lock);
1210 	sig->group_exit_task = NULL;
1211 	sig->notify_count = 0;
1212 	read_unlock(&tasklist_lock);
1213 	return -EAGAIN;
1214 }
1215 
1216 char *get_task_comm(char *buf, struct task_struct *tsk)
1217 {
1218 	/* buf must be at least sizeof(tsk->comm) in size */
1219 	task_lock(tsk);
1220 	strncpy(buf, tsk->comm, sizeof(tsk->comm));
1221 	task_unlock(tsk);
1222 	return buf;
1223 }
1224 EXPORT_SYMBOL_GPL(get_task_comm);
1225 
1226 /*
1227  * These functions flushes out all traces of the currently running executable
1228  * so that a new one can be started
1229  */
1230 
1231 void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
1232 {
1233 	task_lock(tsk);
1234 	trace_task_rename(tsk, buf);
1235 	strlcpy(tsk->comm, buf, sizeof(tsk->comm));
1236 	task_unlock(tsk);
1237 	perf_event_comm(tsk, exec);
1238 }
1239 
1240 int flush_old_exec(struct linux_binprm * bprm)
1241 {
1242 	int retval;
1243 
1244 	/*
1245 	 * Make sure we have a private signal table and that
1246 	 * we are unassociated from the previous thread group.
1247 	 */
1248 	retval = de_thread(current);
1249 	if (retval)
1250 		goto out;
1251 
1252 	/*
1253 	 * Must be called _before_ exec_mmap() as bprm->mm is
1254 	 * not visibile until then. This also enables the update
1255 	 * to be lockless.
1256 	 */
1257 	set_mm_exe_file(bprm->mm, bprm->file);
1258 
1259 	/*
1260 	 * Release all of the old mmap stuff
1261 	 */
1262 	acct_arg_size(bprm, 0);
1263 	retval = exec_mmap(bprm->mm);
1264 	if (retval)
1265 		goto out;
1266 
1267 	bprm->mm = NULL;		/* We're using it now */
1268 
1269 	set_fs(USER_DS);
1270 	current->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
1271 					PF_NOFREEZE | PF_NO_SETAFFINITY);
1272 	flush_thread();
1273 	current->personality &= ~bprm->per_clear;
1274 
1275 	/*
1276 	 * We have to apply CLOEXEC before we change whether the process is
1277 	 * dumpable (in setup_new_exec) to avoid a race with a process in userspace
1278 	 * trying to access the should-be-closed file descriptors of a process
1279 	 * undergoing exec(2).
1280 	 */
1281 	do_close_on_exec(current->files);
1282 	return 0;
1283 
1284 out:
1285 	return retval;
1286 }
1287 EXPORT_SYMBOL(flush_old_exec);
1288 
1289 void would_dump(struct linux_binprm *bprm, struct file *file)
1290 {
1291 	struct inode *inode = file_inode(file);
1292 	if (inode_permission(inode, MAY_READ) < 0) {
1293 		struct user_namespace *old, *user_ns;
1294 		bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
1295 
1296 		/* Ensure mm->user_ns contains the executable */
1297 		user_ns = old = bprm->mm->user_ns;
1298 		while ((user_ns != &init_user_ns) &&
1299 		       !privileged_wrt_inode_uidgid(user_ns, inode))
1300 			user_ns = user_ns->parent;
1301 
1302 		if (old != user_ns) {
1303 			bprm->mm->user_ns = get_user_ns(user_ns);
1304 			put_user_ns(old);
1305 		}
1306 	}
1307 }
1308 EXPORT_SYMBOL(would_dump);
1309 
1310 void setup_new_exec(struct linux_binprm * bprm)
1311 {
1312 	arch_pick_mmap_layout(current->mm);
1313 
1314 	/* This is the point of no return */
1315 	current->sas_ss_sp = current->sas_ss_size = 0;
1316 
1317 	if (uid_eq(current_euid(), current_uid()) && gid_eq(current_egid(), current_gid()))
1318 		set_dumpable(current->mm, SUID_DUMP_USER);
1319 	else
1320 		set_dumpable(current->mm, suid_dumpable);
1321 
1322 	perf_event_exec();
1323 	__set_task_comm(current, kbasename(bprm->filename), true);
1324 
1325 	/* Set the new mm task size. We have to do that late because it may
1326 	 * depend on TIF_32BIT which is only updated in flush_thread() on
1327 	 * some architectures like powerpc
1328 	 */
1329 	current->mm->task_size = TASK_SIZE;
1330 
1331 	/* install the new credentials */
1332 	if (!uid_eq(bprm->cred->uid, current_euid()) ||
1333 	    !gid_eq(bprm->cred->gid, current_egid())) {
1334 		current->pdeath_signal = 0;
1335 	} else {
1336 		if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)
1337 			set_dumpable(current->mm, suid_dumpable);
1338 	}
1339 
1340 	/* An exec changes our domain. We are no longer part of the thread
1341 	   group */
1342 	current->self_exec_id++;
1343 	flush_signal_handlers(current, 0);
1344 }
1345 EXPORT_SYMBOL(setup_new_exec);
1346 
1347 /*
1348  * Prepare credentials and lock ->cred_guard_mutex.
1349  * install_exec_creds() commits the new creds and drops the lock.
1350  * Or, if exec fails before, free_bprm() should release ->cred and
1351  * and unlock.
1352  */
1353 int prepare_bprm_creds(struct linux_binprm *bprm)
1354 {
1355 	if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
1356 		return -ERESTARTNOINTR;
1357 
1358 	bprm->cred = prepare_exec_creds();
1359 	if (likely(bprm->cred))
1360 		return 0;
1361 
1362 	mutex_unlock(&current->signal->cred_guard_mutex);
1363 	return -ENOMEM;
1364 }
1365 
1366 static void free_bprm(struct linux_binprm *bprm)
1367 {
1368 	free_arg_pages(bprm);
1369 	if (bprm->cred) {
1370 		mutex_unlock(&current->signal->cred_guard_mutex);
1371 		abort_creds(bprm->cred);
1372 	}
1373 	if (bprm->file) {
1374 		allow_write_access(bprm->file);
1375 		fput(bprm->file);
1376 	}
1377 	/* If a binfmt changed the interp, free it. */
1378 	if (bprm->interp != bprm->filename)
1379 		kfree(bprm->interp);
1380 	kfree(bprm);
1381 }
1382 
1383 int bprm_change_interp(char *interp, struct linux_binprm *bprm)
1384 {
1385 	/* If a binfmt changed the interp, free it first. */
1386 	if (bprm->interp != bprm->filename)
1387 		kfree(bprm->interp);
1388 	bprm->interp = kstrdup(interp, GFP_KERNEL);
1389 	if (!bprm->interp)
1390 		return -ENOMEM;
1391 	return 0;
1392 }
1393 EXPORT_SYMBOL(bprm_change_interp);
1394 
1395 /*
1396  * install the new credentials for this executable
1397  */
1398 void install_exec_creds(struct linux_binprm *bprm)
1399 {
1400 	security_bprm_committing_creds(bprm);
1401 
1402 	commit_creds(bprm->cred);
1403 	bprm->cred = NULL;
1404 
1405 	/*
1406 	 * Disable monitoring for regular users
1407 	 * when executing setuid binaries. Must
1408 	 * wait until new credentials are committed
1409 	 * by commit_creds() above
1410 	 */
1411 	if (get_dumpable(current->mm) != SUID_DUMP_USER)
1412 		perf_event_exit_task(current);
1413 	/*
1414 	 * cred_guard_mutex must be held at least to this point to prevent
1415 	 * ptrace_attach() from altering our determination of the task's
1416 	 * credentials; any time after this it may be unlocked.
1417 	 */
1418 	security_bprm_committed_creds(bprm);
1419 	mutex_unlock(&current->signal->cred_guard_mutex);
1420 }
1421 EXPORT_SYMBOL(install_exec_creds);
1422 
1423 /*
1424  * determine how safe it is to execute the proposed program
1425  * - the caller must hold ->cred_guard_mutex to protect against
1426  *   PTRACE_ATTACH or seccomp thread-sync
1427  */
1428 static void check_unsafe_exec(struct linux_binprm *bprm)
1429 {
1430 	struct task_struct *p = current, *t;
1431 	unsigned n_fs;
1432 
1433 	if (p->ptrace)
1434 		bprm->unsafe |= LSM_UNSAFE_PTRACE;
1435 
1436 	/*
1437 	 * This isn't strictly necessary, but it makes it harder for LSMs to
1438 	 * mess up.
1439 	 */
1440 	if (task_no_new_privs(current))
1441 		bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
1442 
1443 	t = p;
1444 	n_fs = 1;
1445 	spin_lock(&p->fs->lock);
1446 	rcu_read_lock();
1447 	while_each_thread(p, t) {
1448 		if (t->fs == p->fs)
1449 			n_fs++;
1450 	}
1451 	rcu_read_unlock();
1452 
1453 	if (p->fs->users > n_fs)
1454 		bprm->unsafe |= LSM_UNSAFE_SHARE;
1455 	else
1456 		p->fs->in_exec = 1;
1457 	spin_unlock(&p->fs->lock);
1458 }
1459 
1460 static void bprm_fill_uid(struct linux_binprm *bprm)
1461 {
1462 	struct inode *inode;
1463 	unsigned int mode;
1464 	kuid_t uid;
1465 	kgid_t gid;
1466 
1467 	/*
1468 	 * Since this can be called multiple times (via prepare_binprm),
1469 	 * we must clear any previous work done when setting set[ug]id
1470 	 * bits from any earlier bprm->file uses (for example when run
1471 	 * first for a setuid script then again for its interpreter).
1472 	 */
1473 	bprm->cred->euid = current_euid();
1474 	bprm->cred->egid = current_egid();
1475 
1476 	if (!mnt_may_suid(bprm->file->f_path.mnt))
1477 		return;
1478 
1479 	if (task_no_new_privs(current))
1480 		return;
1481 
1482 	inode = bprm->file->f_path.dentry->d_inode;
1483 	mode = READ_ONCE(inode->i_mode);
1484 	if (!(mode & (S_ISUID|S_ISGID)))
1485 		return;
1486 
1487 	/* Be careful if suid/sgid is set */
1488 	inode_lock(inode);
1489 
1490 	/* reload atomically mode/uid/gid now that lock held */
1491 	mode = inode->i_mode;
1492 	uid = inode->i_uid;
1493 	gid = inode->i_gid;
1494 	inode_unlock(inode);
1495 
1496 	/* We ignore suid/sgid if there are no mappings for them in the ns */
1497 	if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
1498 		 !kgid_has_mapping(bprm->cred->user_ns, gid))
1499 		return;
1500 
1501 	if (mode & S_ISUID) {
1502 		bprm->per_clear |= PER_CLEAR_ON_SETID;
1503 		bprm->cred->euid = uid;
1504 	}
1505 
1506 	if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1507 		bprm->per_clear |= PER_CLEAR_ON_SETID;
1508 		bprm->cred->egid = gid;
1509 	}
1510 }
1511 
1512 /*
1513  * Fill the binprm structure from the inode.
1514  * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1515  *
1516  * This may be called multiple times for binary chains (scripts for example).
1517  */
1518 int prepare_binprm(struct linux_binprm *bprm)
1519 {
1520 	int retval;
1521 
1522 	bprm_fill_uid(bprm);
1523 
1524 	/* fill in binprm security blob */
1525 	retval = security_bprm_set_creds(bprm);
1526 	if (retval)
1527 		return retval;
1528 	bprm->cred_prepared = 1;
1529 
1530 	memset(bprm->buf, 0, BINPRM_BUF_SIZE);
1531 	return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE);
1532 }
1533 
1534 EXPORT_SYMBOL(prepare_binprm);
1535 
1536 /*
1537  * Arguments are '\0' separated strings found at the location bprm->p
1538  * points to; chop off the first by relocating brpm->p to right after
1539  * the first '\0' encountered.
1540  */
1541 int remove_arg_zero(struct linux_binprm *bprm)
1542 {
1543 	int ret = 0;
1544 	unsigned long offset;
1545 	char *kaddr;
1546 	struct page *page;
1547 
1548 	if (!bprm->argc)
1549 		return 0;
1550 
1551 	do {
1552 		offset = bprm->p & ~PAGE_MASK;
1553 		page = get_arg_page(bprm, bprm->p, 0);
1554 		if (!page) {
1555 			ret = -EFAULT;
1556 			goto out;
1557 		}
1558 		kaddr = kmap_atomic(page);
1559 
1560 		for (; offset < PAGE_SIZE && kaddr[offset];
1561 				offset++, bprm->p++)
1562 			;
1563 
1564 		kunmap_atomic(kaddr);
1565 		put_arg_page(page);
1566 	} while (offset == PAGE_SIZE);
1567 
1568 	bprm->p++;
1569 	bprm->argc--;
1570 	ret = 0;
1571 
1572 out:
1573 	return ret;
1574 }
1575 EXPORT_SYMBOL(remove_arg_zero);
1576 
1577 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1578 /*
1579  * cycle the list of binary formats handler, until one recognizes the image
1580  */
1581 int search_binary_handler(struct linux_binprm *bprm)
1582 {
1583 	bool need_retry = IS_ENABLED(CONFIG_MODULES);
1584 	struct linux_binfmt *fmt;
1585 	int retval;
1586 
1587 	/* This allows 4 levels of binfmt rewrites before failing hard. */
1588 	if (bprm->recursion_depth > 5)
1589 		return -ELOOP;
1590 
1591 	retval = security_bprm_check(bprm);
1592 	if (retval)
1593 		return retval;
1594 
1595 	retval = -ENOENT;
1596  retry:
1597 	read_lock(&binfmt_lock);
1598 	list_for_each_entry(fmt, &formats, lh) {
1599 		if (!try_module_get(fmt->module))
1600 			continue;
1601 		read_unlock(&binfmt_lock);
1602 		bprm->recursion_depth++;
1603 		retval = fmt->load_binary(bprm);
1604 		read_lock(&binfmt_lock);
1605 		put_binfmt(fmt);
1606 		bprm->recursion_depth--;
1607 		if (retval < 0 && !bprm->mm) {
1608 			/* we got to flush_old_exec() and failed after it */
1609 			read_unlock(&binfmt_lock);
1610 			force_sigsegv(SIGSEGV, current);
1611 			return retval;
1612 		}
1613 		if (retval != -ENOEXEC || !bprm->file) {
1614 			read_unlock(&binfmt_lock);
1615 			return retval;
1616 		}
1617 	}
1618 	read_unlock(&binfmt_lock);
1619 
1620 	if (need_retry) {
1621 		if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
1622 		    printable(bprm->buf[2]) && printable(bprm->buf[3]))
1623 			return retval;
1624 		if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
1625 			return retval;
1626 		need_retry = false;
1627 		goto retry;
1628 	}
1629 
1630 	return retval;
1631 }
1632 EXPORT_SYMBOL(search_binary_handler);
1633 
1634 static int exec_binprm(struct linux_binprm *bprm)
1635 {
1636 	pid_t old_pid, old_vpid;
1637 	int ret;
1638 
1639 	/* Need to fetch pid before load_binary changes it */
1640 	old_pid = current->pid;
1641 	rcu_read_lock();
1642 	old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
1643 	rcu_read_unlock();
1644 
1645 	ret = search_binary_handler(bprm);
1646 	if (ret >= 0) {
1647 		audit_bprm(bprm);
1648 		trace_sched_process_exec(current, old_pid, bprm);
1649 		ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
1650 		proc_exec_connector(current);
1651 	}
1652 
1653 	return ret;
1654 }
1655 
1656 /*
1657  * sys_execve() executes a new program.
1658  */
1659 static int do_execveat_common(int fd, struct filename *filename,
1660 			      struct user_arg_ptr argv,
1661 			      struct user_arg_ptr envp,
1662 			      int flags)
1663 {
1664 	char *pathbuf = NULL;
1665 	struct linux_binprm *bprm;
1666 	struct file *file;
1667 	struct files_struct *displaced;
1668 	int retval;
1669 
1670 	if (IS_ERR(filename))
1671 		return PTR_ERR(filename);
1672 
1673 	/*
1674 	 * We move the actual failure in case of RLIMIT_NPROC excess from
1675 	 * set*uid() to execve() because too many poorly written programs
1676 	 * don't check setuid() return code.  Here we additionally recheck
1677 	 * whether NPROC limit is still exceeded.
1678 	 */
1679 	if ((current->flags & PF_NPROC_EXCEEDED) &&
1680 	    atomic_read(&current_user()->processes) > rlimit(RLIMIT_NPROC)) {
1681 		retval = -EAGAIN;
1682 		goto out_ret;
1683 	}
1684 
1685 	/* We're below the limit (still or again), so we don't want to make
1686 	 * further execve() calls fail. */
1687 	current->flags &= ~PF_NPROC_EXCEEDED;
1688 
1689 	retval = unshare_files(&displaced);
1690 	if (retval)
1691 		goto out_ret;
1692 
1693 	retval = -ENOMEM;
1694 	bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1695 	if (!bprm)
1696 		goto out_files;
1697 
1698 	retval = prepare_bprm_creds(bprm);
1699 	if (retval)
1700 		goto out_free;
1701 
1702 	check_unsafe_exec(bprm);
1703 	current->in_execve = 1;
1704 
1705 	file = do_open_execat(fd, filename, flags);
1706 	retval = PTR_ERR(file);
1707 	if (IS_ERR(file))
1708 		goto out_unmark;
1709 
1710 	sched_exec();
1711 
1712 	bprm->file = file;
1713 	if (fd == AT_FDCWD || filename->name[0] == '/') {
1714 		bprm->filename = filename->name;
1715 	} else {
1716 		if (filename->name[0] == '\0')
1717 			pathbuf = kasprintf(GFP_TEMPORARY, "/dev/fd/%d", fd);
1718 		else
1719 			pathbuf = kasprintf(GFP_TEMPORARY, "/dev/fd/%d/%s",
1720 					    fd, filename->name);
1721 		if (!pathbuf) {
1722 			retval = -ENOMEM;
1723 			goto out_unmark;
1724 		}
1725 		/*
1726 		 * Record that a name derived from an O_CLOEXEC fd will be
1727 		 * inaccessible after exec. Relies on having exclusive access to
1728 		 * current->files (due to unshare_files above).
1729 		 */
1730 		if (close_on_exec(fd, rcu_dereference_raw(current->files->fdt)))
1731 			bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
1732 		bprm->filename = pathbuf;
1733 	}
1734 	bprm->interp = bprm->filename;
1735 
1736 	retval = bprm_mm_init(bprm);
1737 	if (retval)
1738 		goto out_unmark;
1739 
1740 	bprm->argc = count(argv, MAX_ARG_STRINGS);
1741 	if ((retval = bprm->argc) < 0)
1742 		goto out;
1743 
1744 	bprm->envc = count(envp, MAX_ARG_STRINGS);
1745 	if ((retval = bprm->envc) < 0)
1746 		goto out;
1747 
1748 	retval = prepare_binprm(bprm);
1749 	if (retval < 0)
1750 		goto out;
1751 
1752 	retval = copy_strings_kernel(1, &bprm->filename, bprm);
1753 	if (retval < 0)
1754 		goto out;
1755 
1756 	bprm->exec = bprm->p;
1757 	retval = copy_strings(bprm->envc, envp, bprm);
1758 	if (retval < 0)
1759 		goto out;
1760 
1761 	retval = copy_strings(bprm->argc, argv, bprm);
1762 	if (retval < 0)
1763 		goto out;
1764 
1765 	would_dump(bprm, bprm->file);
1766 
1767 	retval = exec_binprm(bprm);
1768 	if (retval < 0)
1769 		goto out;
1770 
1771 	/* execve succeeded */
1772 	current->fs->in_exec = 0;
1773 	current->in_execve = 0;
1774 	acct_update_integrals(current);
1775 	task_numa_free(current);
1776 	free_bprm(bprm);
1777 	kfree(pathbuf);
1778 	putname(filename);
1779 	if (displaced)
1780 		put_files_struct(displaced);
1781 	return retval;
1782 
1783 out:
1784 	if (bprm->mm) {
1785 		acct_arg_size(bprm, 0);
1786 		mmput(bprm->mm);
1787 	}
1788 
1789 out_unmark:
1790 	current->fs->in_exec = 0;
1791 	current->in_execve = 0;
1792 
1793 out_free:
1794 	free_bprm(bprm);
1795 	kfree(pathbuf);
1796 
1797 out_files:
1798 	if (displaced)
1799 		reset_files_struct(displaced);
1800 out_ret:
1801 	putname(filename);
1802 	return retval;
1803 }
1804 
1805 int do_execve(struct filename *filename,
1806 	const char __user *const __user *__argv,
1807 	const char __user *const __user *__envp)
1808 {
1809 	struct user_arg_ptr argv = { .ptr.native = __argv };
1810 	struct user_arg_ptr envp = { .ptr.native = __envp };
1811 	return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
1812 }
1813 
1814 int do_execveat(int fd, struct filename *filename,
1815 		const char __user *const __user *__argv,
1816 		const char __user *const __user *__envp,
1817 		int flags)
1818 {
1819 	struct user_arg_ptr argv = { .ptr.native = __argv };
1820 	struct user_arg_ptr envp = { .ptr.native = __envp };
1821 
1822 	return do_execveat_common(fd, filename, argv, envp, flags);
1823 }
1824 
1825 #ifdef CONFIG_COMPAT
1826 static int compat_do_execve(struct filename *filename,
1827 	const compat_uptr_t __user *__argv,
1828 	const compat_uptr_t __user *__envp)
1829 {
1830 	struct user_arg_ptr argv = {
1831 		.is_compat = true,
1832 		.ptr.compat = __argv,
1833 	};
1834 	struct user_arg_ptr envp = {
1835 		.is_compat = true,
1836 		.ptr.compat = __envp,
1837 	};
1838 	return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
1839 }
1840 
1841 static int compat_do_execveat(int fd, struct filename *filename,
1842 			      const compat_uptr_t __user *__argv,
1843 			      const compat_uptr_t __user *__envp,
1844 			      int flags)
1845 {
1846 	struct user_arg_ptr argv = {
1847 		.is_compat = true,
1848 		.ptr.compat = __argv,
1849 	};
1850 	struct user_arg_ptr envp = {
1851 		.is_compat = true,
1852 		.ptr.compat = __envp,
1853 	};
1854 	return do_execveat_common(fd, filename, argv, envp, flags);
1855 }
1856 #endif
1857 
1858 void set_binfmt(struct linux_binfmt *new)
1859 {
1860 	struct mm_struct *mm = current->mm;
1861 
1862 	if (mm->binfmt)
1863 		module_put(mm->binfmt->module);
1864 
1865 	mm->binfmt = new;
1866 	if (new)
1867 		__module_get(new->module);
1868 }
1869 EXPORT_SYMBOL(set_binfmt);
1870 
1871 /*
1872  * set_dumpable stores three-value SUID_DUMP_* into mm->flags.
1873  */
1874 void set_dumpable(struct mm_struct *mm, int value)
1875 {
1876 	unsigned long old, new;
1877 
1878 	if (WARN_ON((unsigned)value > SUID_DUMP_ROOT))
1879 		return;
1880 
1881 	do {
1882 		old = ACCESS_ONCE(mm->flags);
1883 		new = (old & ~MMF_DUMPABLE_MASK) | value;
1884 	} while (cmpxchg(&mm->flags, old, new) != old);
1885 }
1886 
1887 SYSCALL_DEFINE3(execve,
1888 		const char __user *, filename,
1889 		const char __user *const __user *, argv,
1890 		const char __user *const __user *, envp)
1891 {
1892 	return do_execve(getname(filename), argv, envp);
1893 }
1894 
1895 SYSCALL_DEFINE5(execveat,
1896 		int, fd, const char __user *, filename,
1897 		const char __user *const __user *, argv,
1898 		const char __user *const __user *, envp,
1899 		int, flags)
1900 {
1901 	int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
1902 
1903 	return do_execveat(fd,
1904 			   getname_flags(filename, lookup_flags, NULL),
1905 			   argv, envp, flags);
1906 }
1907 
1908 #ifdef CONFIG_COMPAT
1909 COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
1910 	const compat_uptr_t __user *, argv,
1911 	const compat_uptr_t __user *, envp)
1912 {
1913 	return compat_do_execve(getname(filename), argv, envp);
1914 }
1915 
1916 COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
1917 		       const char __user *, filename,
1918 		       const compat_uptr_t __user *, argv,
1919 		       const compat_uptr_t __user *, envp,
1920 		       int,  flags)
1921 {
1922 	int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
1923 
1924 	return compat_do_execveat(fd,
1925 				  getname_flags(filename, lookup_flags, NULL),
1926 				  argv, envp, flags);
1927 }
1928 #endif
1929