xref: /openbmc/linux/fs/exec.c (revision 3864601387cf4196371e3c1897fdffa5228296f9)
1 /*
2  *  linux/fs/exec.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 /*
8  * #!-checking implemented by tytso.
9  */
10 /*
11  * Demand-loading implemented 01.12.91 - no need to read anything but
12  * the header into memory. The inode of the executable is put into
13  * "current->executable", and page faults do the actual loading. Clean.
14  *
15  * Once more I can proudly say that linux stood up to being changed: it
16  * was less than 2 hours work to get demand-loading completely implemented.
17  *
18  * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
19  * current->executable is only used by the procfs.  This allows a dispatch
20  * table to check for several different types  of binary formats.  We keep
21  * trying until we recognize the file or we run out of supported binary
22  * formats.
23  */
24 
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
28 #include <linux/mm.h>
29 #include <linux/stat.h>
30 #include <linux/fcntl.h>
31 #include <linux/swap.h>
32 #include <linux/string.h>
33 #include <linux/init.h>
34 #include <linux/pagemap.h>
35 #include <linux/perf_event.h>
36 #include <linux/highmem.h>
37 #include <linux/spinlock.h>
38 #include <linux/key.h>
39 #include <linux/personality.h>
40 #include <linux/binfmts.h>
41 #include <linux/utsname.h>
42 #include <linux/pid_namespace.h>
43 #include <linux/module.h>
44 #include <linux/namei.h>
45 #include <linux/mount.h>
46 #include <linux/security.h>
47 #include <linux/syscalls.h>
48 #include <linux/tsacct_kern.h>
49 #include <linux/cn_proc.h>
50 #include <linux/audit.h>
51 #include <linux/tracehook.h>
52 #include <linux/kmod.h>
53 #include <linux/fsnotify.h>
54 #include <linux/fs_struct.h>
55 #include <linux/pipe_fs_i.h>
56 #include <linux/oom.h>
57 #include <linux/compat.h>
58 
59 #include <asm/uaccess.h>
60 #include <asm/mmu_context.h>
61 #include <asm/tlb.h>
62 #include "internal.h"
63 
64 int core_uses_pid;
65 char core_pattern[CORENAME_MAX_SIZE] = "core";
66 unsigned int core_pipe_limit;
67 int suid_dumpable = 0;
68 
69 struct core_name {
70 	char *corename;
71 	int used, size;
72 };
73 static atomic_t call_count = ATOMIC_INIT(1);
74 
75 /* The maximal length of core_pattern is also specified in sysctl.c */
76 
77 static LIST_HEAD(formats);
78 static DEFINE_RWLOCK(binfmt_lock);
79 
80 int __register_binfmt(struct linux_binfmt * fmt, int insert)
81 {
82 	if (!fmt)
83 		return -EINVAL;
84 	write_lock(&binfmt_lock);
85 	insert ? list_add(&fmt->lh, &formats) :
86 		 list_add_tail(&fmt->lh, &formats);
87 	write_unlock(&binfmt_lock);
88 	return 0;
89 }
90 
91 EXPORT_SYMBOL(__register_binfmt);
92 
93 void unregister_binfmt(struct linux_binfmt * fmt)
94 {
95 	write_lock(&binfmt_lock);
96 	list_del(&fmt->lh);
97 	write_unlock(&binfmt_lock);
98 }
99 
100 EXPORT_SYMBOL(unregister_binfmt);
101 
102 static inline void put_binfmt(struct linux_binfmt * fmt)
103 {
104 	module_put(fmt->module);
105 }
106 
107 /*
108  * Note that a shared library must be both readable and executable due to
109  * security reasons.
110  *
111  * Also note that we take the address to load from from the file itself.
112  */
113 SYSCALL_DEFINE1(uselib, const char __user *, library)
114 {
115 	struct file *file;
116 	char *tmp = getname(library);
117 	int error = PTR_ERR(tmp);
118 	static const struct open_flags uselib_flags = {
119 		.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
120 		.acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
121 		.intent = LOOKUP_OPEN
122 	};
123 
124 	if (IS_ERR(tmp))
125 		goto out;
126 
127 	file = do_filp_open(AT_FDCWD, tmp, &uselib_flags, LOOKUP_FOLLOW);
128 	putname(tmp);
129 	error = PTR_ERR(file);
130 	if (IS_ERR(file))
131 		goto out;
132 
133 	error = -EINVAL;
134 	if (!S_ISREG(file->f_path.dentry->d_inode->i_mode))
135 		goto exit;
136 
137 	error = -EACCES;
138 	if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
139 		goto exit;
140 
141 	fsnotify_open(file);
142 
143 	error = -ENOEXEC;
144 	if(file->f_op) {
145 		struct linux_binfmt * fmt;
146 
147 		read_lock(&binfmt_lock);
148 		list_for_each_entry(fmt, &formats, lh) {
149 			if (!fmt->load_shlib)
150 				continue;
151 			if (!try_module_get(fmt->module))
152 				continue;
153 			read_unlock(&binfmt_lock);
154 			error = fmt->load_shlib(file);
155 			read_lock(&binfmt_lock);
156 			put_binfmt(fmt);
157 			if (error != -ENOEXEC)
158 				break;
159 		}
160 		read_unlock(&binfmt_lock);
161 	}
162 exit:
163 	fput(file);
164 out:
165   	return error;
166 }
167 
168 #ifdef CONFIG_MMU
169 /*
170  * The nascent bprm->mm is not visible until exec_mmap() but it can
171  * use a lot of memory, account these pages in current->mm temporary
172  * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
173  * change the counter back via acct_arg_size(0).
174  */
175 static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
176 {
177 	struct mm_struct *mm = current->mm;
178 	long diff = (long)(pages - bprm->vma_pages);
179 
180 	if (!mm || !diff)
181 		return;
182 
183 	bprm->vma_pages = pages;
184 
185 #ifdef SPLIT_RSS_COUNTING
186 	add_mm_counter(mm, MM_ANONPAGES, diff);
187 #else
188 	spin_lock(&mm->page_table_lock);
189 	add_mm_counter(mm, MM_ANONPAGES, diff);
190 	spin_unlock(&mm->page_table_lock);
191 #endif
192 }
193 
194 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
195 		int write)
196 {
197 	struct page *page;
198 	int ret;
199 
200 #ifdef CONFIG_STACK_GROWSUP
201 	if (write) {
202 		ret = expand_downwards(bprm->vma, pos);
203 		if (ret < 0)
204 			return NULL;
205 	}
206 #endif
207 	ret = get_user_pages(current, bprm->mm, pos,
208 			1, write, 1, &page, NULL);
209 	if (ret <= 0)
210 		return NULL;
211 
212 	if (write) {
213 		unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
214 		struct rlimit *rlim;
215 
216 		acct_arg_size(bprm, size / PAGE_SIZE);
217 
218 		/*
219 		 * We've historically supported up to 32 pages (ARG_MAX)
220 		 * of argument strings even with small stacks
221 		 */
222 		if (size <= ARG_MAX)
223 			return page;
224 
225 		/*
226 		 * Limit to 1/4-th the stack size for the argv+env strings.
227 		 * This ensures that:
228 		 *  - the remaining binfmt code will not run out of stack space,
229 		 *  - the program will have a reasonable amount of stack left
230 		 *    to work from.
231 		 */
232 		rlim = current->signal->rlim;
233 		if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
234 			put_page(page);
235 			return NULL;
236 		}
237 	}
238 
239 	return page;
240 }
241 
242 static void put_arg_page(struct page *page)
243 {
244 	put_page(page);
245 }
246 
247 static void free_arg_page(struct linux_binprm *bprm, int i)
248 {
249 }
250 
251 static void free_arg_pages(struct linux_binprm *bprm)
252 {
253 }
254 
255 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
256 		struct page *page)
257 {
258 	flush_cache_page(bprm->vma, pos, page_to_pfn(page));
259 }
260 
261 static int __bprm_mm_init(struct linux_binprm *bprm)
262 {
263 	int err;
264 	struct vm_area_struct *vma = NULL;
265 	struct mm_struct *mm = bprm->mm;
266 
267 	bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
268 	if (!vma)
269 		return -ENOMEM;
270 
271 	down_write(&mm->mmap_sem);
272 	vma->vm_mm = mm;
273 
274 	/*
275 	 * Place the stack at the largest stack address the architecture
276 	 * supports. Later, we'll move this to an appropriate place. We don't
277 	 * use STACK_TOP because that can depend on attributes which aren't
278 	 * configured yet.
279 	 */
280 	BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
281 	vma->vm_end = STACK_TOP_MAX;
282 	vma->vm_start = vma->vm_end - PAGE_SIZE;
283 	vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
284 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
285 	INIT_LIST_HEAD(&vma->anon_vma_chain);
286 
287 	err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
288 	if (err)
289 		goto err;
290 
291 	err = insert_vm_struct(mm, vma);
292 	if (err)
293 		goto err;
294 
295 	mm->stack_vm = mm->total_vm = 1;
296 	up_write(&mm->mmap_sem);
297 	bprm->p = vma->vm_end - sizeof(void *);
298 	return 0;
299 err:
300 	up_write(&mm->mmap_sem);
301 	bprm->vma = NULL;
302 	kmem_cache_free(vm_area_cachep, vma);
303 	return err;
304 }
305 
306 static bool valid_arg_len(struct linux_binprm *bprm, long len)
307 {
308 	return len <= MAX_ARG_STRLEN;
309 }
310 
311 #else
312 
313 static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
314 {
315 }
316 
317 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
318 		int write)
319 {
320 	struct page *page;
321 
322 	page = bprm->page[pos / PAGE_SIZE];
323 	if (!page && write) {
324 		page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
325 		if (!page)
326 			return NULL;
327 		bprm->page[pos / PAGE_SIZE] = page;
328 	}
329 
330 	return page;
331 }
332 
333 static void put_arg_page(struct page *page)
334 {
335 }
336 
337 static void free_arg_page(struct linux_binprm *bprm, int i)
338 {
339 	if (bprm->page[i]) {
340 		__free_page(bprm->page[i]);
341 		bprm->page[i] = NULL;
342 	}
343 }
344 
345 static void free_arg_pages(struct linux_binprm *bprm)
346 {
347 	int i;
348 
349 	for (i = 0; i < MAX_ARG_PAGES; i++)
350 		free_arg_page(bprm, i);
351 }
352 
353 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
354 		struct page *page)
355 {
356 }
357 
358 static int __bprm_mm_init(struct linux_binprm *bprm)
359 {
360 	bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
361 	return 0;
362 }
363 
364 static bool valid_arg_len(struct linux_binprm *bprm, long len)
365 {
366 	return len <= bprm->p;
367 }
368 
369 #endif /* CONFIG_MMU */
370 
371 /*
372  * Create a new mm_struct and populate it with a temporary stack
373  * vm_area_struct.  We don't have enough context at this point to set the stack
374  * flags, permissions, and offset, so we use temporary values.  We'll update
375  * them later in setup_arg_pages().
376  */
377 int bprm_mm_init(struct linux_binprm *bprm)
378 {
379 	int err;
380 	struct mm_struct *mm = NULL;
381 
382 	bprm->mm = mm = mm_alloc();
383 	err = -ENOMEM;
384 	if (!mm)
385 		goto err;
386 
387 	err = init_new_context(current, mm);
388 	if (err)
389 		goto err;
390 
391 	err = __bprm_mm_init(bprm);
392 	if (err)
393 		goto err;
394 
395 	return 0;
396 
397 err:
398 	if (mm) {
399 		bprm->mm = NULL;
400 		mmdrop(mm);
401 	}
402 
403 	return err;
404 }
405 
406 struct user_arg_ptr {
407 #ifdef CONFIG_COMPAT
408 	bool is_compat;
409 #endif
410 	union {
411 		const char __user *const __user *native;
412 #ifdef CONFIG_COMPAT
413 		compat_uptr_t __user *compat;
414 #endif
415 	} ptr;
416 };
417 
418 static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
419 {
420 	const char __user *native;
421 
422 #ifdef CONFIG_COMPAT
423 	if (unlikely(argv.is_compat)) {
424 		compat_uptr_t compat;
425 
426 		if (get_user(compat, argv.ptr.compat + nr))
427 			return ERR_PTR(-EFAULT);
428 
429 		return compat_ptr(compat);
430 	}
431 #endif
432 
433 	if (get_user(native, argv.ptr.native + nr))
434 		return ERR_PTR(-EFAULT);
435 
436 	return native;
437 }
438 
439 /*
440  * count() counts the number of strings in array ARGV.
441  */
442 static int count(struct user_arg_ptr argv, int max)
443 {
444 	int i = 0;
445 
446 	if (argv.ptr.native != NULL) {
447 		for (;;) {
448 			const char __user *p = get_user_arg_ptr(argv, i);
449 
450 			if (!p)
451 				break;
452 
453 			if (IS_ERR(p))
454 				return -EFAULT;
455 
456 			if (i++ >= max)
457 				return -E2BIG;
458 
459 			if (fatal_signal_pending(current))
460 				return -ERESTARTNOHAND;
461 			cond_resched();
462 		}
463 	}
464 	return i;
465 }
466 
467 /*
468  * 'copy_strings()' copies argument/environment strings from the old
469  * processes's memory to the new process's stack.  The call to get_user_pages()
470  * ensures the destination page is created and not swapped out.
471  */
472 static int copy_strings(int argc, struct user_arg_ptr argv,
473 			struct linux_binprm *bprm)
474 {
475 	struct page *kmapped_page = NULL;
476 	char *kaddr = NULL;
477 	unsigned long kpos = 0;
478 	int ret;
479 
480 	while (argc-- > 0) {
481 		const char __user *str;
482 		int len;
483 		unsigned long pos;
484 
485 		ret = -EFAULT;
486 		str = get_user_arg_ptr(argv, argc);
487 		if (IS_ERR(str))
488 			goto out;
489 
490 		len = strnlen_user(str, MAX_ARG_STRLEN);
491 		if (!len)
492 			goto out;
493 
494 		ret = -E2BIG;
495 		if (!valid_arg_len(bprm, len))
496 			goto out;
497 
498 		/* We're going to work our way backwords. */
499 		pos = bprm->p;
500 		str += len;
501 		bprm->p -= len;
502 
503 		while (len > 0) {
504 			int offset, bytes_to_copy;
505 
506 			if (fatal_signal_pending(current)) {
507 				ret = -ERESTARTNOHAND;
508 				goto out;
509 			}
510 			cond_resched();
511 
512 			offset = pos % PAGE_SIZE;
513 			if (offset == 0)
514 				offset = PAGE_SIZE;
515 
516 			bytes_to_copy = offset;
517 			if (bytes_to_copy > len)
518 				bytes_to_copy = len;
519 
520 			offset -= bytes_to_copy;
521 			pos -= bytes_to_copy;
522 			str -= bytes_to_copy;
523 			len -= bytes_to_copy;
524 
525 			if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
526 				struct page *page;
527 
528 				page = get_arg_page(bprm, pos, 1);
529 				if (!page) {
530 					ret = -E2BIG;
531 					goto out;
532 				}
533 
534 				if (kmapped_page) {
535 					flush_kernel_dcache_page(kmapped_page);
536 					kunmap(kmapped_page);
537 					put_arg_page(kmapped_page);
538 				}
539 				kmapped_page = page;
540 				kaddr = kmap(kmapped_page);
541 				kpos = pos & PAGE_MASK;
542 				flush_arg_page(bprm, kpos, kmapped_page);
543 			}
544 			if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
545 				ret = -EFAULT;
546 				goto out;
547 			}
548 		}
549 	}
550 	ret = 0;
551 out:
552 	if (kmapped_page) {
553 		flush_kernel_dcache_page(kmapped_page);
554 		kunmap(kmapped_page);
555 		put_arg_page(kmapped_page);
556 	}
557 	return ret;
558 }
559 
560 /*
561  * Like copy_strings, but get argv and its values from kernel memory.
562  */
563 int copy_strings_kernel(int argc, const char *const *__argv,
564 			struct linux_binprm *bprm)
565 {
566 	int r;
567 	mm_segment_t oldfs = get_fs();
568 	struct user_arg_ptr argv = {
569 		.ptr.native = (const char __user *const  __user *)__argv,
570 	};
571 
572 	set_fs(KERNEL_DS);
573 	r = copy_strings(argc, argv, bprm);
574 	set_fs(oldfs);
575 
576 	return r;
577 }
578 EXPORT_SYMBOL(copy_strings_kernel);
579 
580 #ifdef CONFIG_MMU
581 
582 /*
583  * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX.  Once
584  * the binfmt code determines where the new stack should reside, we shift it to
585  * its final location.  The process proceeds as follows:
586  *
587  * 1) Use shift to calculate the new vma endpoints.
588  * 2) Extend vma to cover both the old and new ranges.  This ensures the
589  *    arguments passed to subsequent functions are consistent.
590  * 3) Move vma's page tables to the new range.
591  * 4) Free up any cleared pgd range.
592  * 5) Shrink the vma to cover only the new range.
593  */
594 static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
595 {
596 	struct mm_struct *mm = vma->vm_mm;
597 	unsigned long old_start = vma->vm_start;
598 	unsigned long old_end = vma->vm_end;
599 	unsigned long length = old_end - old_start;
600 	unsigned long new_start = old_start - shift;
601 	unsigned long new_end = old_end - shift;
602 	struct mmu_gather tlb;
603 
604 	BUG_ON(new_start > new_end);
605 
606 	/*
607 	 * ensure there are no vmas between where we want to go
608 	 * and where we are
609 	 */
610 	if (vma != find_vma(mm, new_start))
611 		return -EFAULT;
612 
613 	/*
614 	 * cover the whole range: [new_start, old_end)
615 	 */
616 	if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
617 		return -ENOMEM;
618 
619 	/*
620 	 * move the page tables downwards, on failure we rely on
621 	 * process cleanup to remove whatever mess we made.
622 	 */
623 	if (length != move_page_tables(vma, old_start,
624 				       vma, new_start, length))
625 		return -ENOMEM;
626 
627 	lru_add_drain();
628 	tlb_gather_mmu(&tlb, mm, 0);
629 	if (new_end > old_start) {
630 		/*
631 		 * when the old and new regions overlap clear from new_end.
632 		 */
633 		free_pgd_range(&tlb, new_end, old_end, new_end,
634 			vma->vm_next ? vma->vm_next->vm_start : 0);
635 	} else {
636 		/*
637 		 * otherwise, clean from old_start; this is done to not touch
638 		 * the address space in [new_end, old_start) some architectures
639 		 * have constraints on va-space that make this illegal (IA64) -
640 		 * for the others its just a little faster.
641 		 */
642 		free_pgd_range(&tlb, old_start, old_end, new_end,
643 			vma->vm_next ? vma->vm_next->vm_start : 0);
644 	}
645 	tlb_finish_mmu(&tlb, new_end, old_end);
646 
647 	/*
648 	 * Shrink the vma to just the new range.  Always succeeds.
649 	 */
650 	vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
651 
652 	return 0;
653 }
654 
655 /*
656  * Finalizes the stack vm_area_struct. The flags and permissions are updated,
657  * the stack is optionally relocated, and some extra space is added.
658  */
659 int setup_arg_pages(struct linux_binprm *bprm,
660 		    unsigned long stack_top,
661 		    int executable_stack)
662 {
663 	unsigned long ret;
664 	unsigned long stack_shift;
665 	struct mm_struct *mm = current->mm;
666 	struct vm_area_struct *vma = bprm->vma;
667 	struct vm_area_struct *prev = NULL;
668 	unsigned long vm_flags;
669 	unsigned long stack_base;
670 	unsigned long stack_size;
671 	unsigned long stack_expand;
672 	unsigned long rlim_stack;
673 
674 #ifdef CONFIG_STACK_GROWSUP
675 	/* Limit stack size to 1GB */
676 	stack_base = rlimit_max(RLIMIT_STACK);
677 	if (stack_base > (1 << 30))
678 		stack_base = 1 << 30;
679 
680 	/* Make sure we didn't let the argument array grow too large. */
681 	if (vma->vm_end - vma->vm_start > stack_base)
682 		return -ENOMEM;
683 
684 	stack_base = PAGE_ALIGN(stack_top - stack_base);
685 
686 	stack_shift = vma->vm_start - stack_base;
687 	mm->arg_start = bprm->p - stack_shift;
688 	bprm->p = vma->vm_end - stack_shift;
689 #else
690 	stack_top = arch_align_stack(stack_top);
691 	stack_top = PAGE_ALIGN(stack_top);
692 
693 	if (unlikely(stack_top < mmap_min_addr) ||
694 	    unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
695 		return -ENOMEM;
696 
697 	stack_shift = vma->vm_end - stack_top;
698 
699 	bprm->p -= stack_shift;
700 	mm->arg_start = bprm->p;
701 #endif
702 
703 	if (bprm->loader)
704 		bprm->loader -= stack_shift;
705 	bprm->exec -= stack_shift;
706 
707 	down_write(&mm->mmap_sem);
708 	vm_flags = VM_STACK_FLAGS;
709 
710 	/*
711 	 * Adjust stack execute permissions; explicitly enable for
712 	 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
713 	 * (arch default) otherwise.
714 	 */
715 	if (unlikely(executable_stack == EXSTACK_ENABLE_X))
716 		vm_flags |= VM_EXEC;
717 	else if (executable_stack == EXSTACK_DISABLE_X)
718 		vm_flags &= ~VM_EXEC;
719 	vm_flags |= mm->def_flags;
720 	vm_flags |= VM_STACK_INCOMPLETE_SETUP;
721 
722 	ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
723 			vm_flags);
724 	if (ret)
725 		goto out_unlock;
726 	BUG_ON(prev != vma);
727 
728 	/* Move stack pages down in memory. */
729 	if (stack_shift) {
730 		ret = shift_arg_pages(vma, stack_shift);
731 		if (ret)
732 			goto out_unlock;
733 	}
734 
735 	/* mprotect_fixup is overkill to remove the temporary stack flags */
736 	vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
737 
738 	stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
739 	stack_size = vma->vm_end - vma->vm_start;
740 	/*
741 	 * Align this down to a page boundary as expand_stack
742 	 * will align it up.
743 	 */
744 	rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
745 #ifdef CONFIG_STACK_GROWSUP
746 	if (stack_size + stack_expand > rlim_stack)
747 		stack_base = vma->vm_start + rlim_stack;
748 	else
749 		stack_base = vma->vm_end + stack_expand;
750 #else
751 	if (stack_size + stack_expand > rlim_stack)
752 		stack_base = vma->vm_end - rlim_stack;
753 	else
754 		stack_base = vma->vm_start - stack_expand;
755 #endif
756 	current->mm->start_stack = bprm->p;
757 	ret = expand_stack(vma, stack_base);
758 	if (ret)
759 		ret = -EFAULT;
760 
761 out_unlock:
762 	up_write(&mm->mmap_sem);
763 	return ret;
764 }
765 EXPORT_SYMBOL(setup_arg_pages);
766 
767 #endif /* CONFIG_MMU */
768 
769 struct file *open_exec(const char *name)
770 {
771 	struct file *file;
772 	int err;
773 	static const struct open_flags open_exec_flags = {
774 		.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
775 		.acc_mode = MAY_EXEC | MAY_OPEN,
776 		.intent = LOOKUP_OPEN
777 	};
778 
779 	file = do_filp_open(AT_FDCWD, name, &open_exec_flags, LOOKUP_FOLLOW);
780 	if (IS_ERR(file))
781 		goto out;
782 
783 	err = -EACCES;
784 	if (!S_ISREG(file->f_path.dentry->d_inode->i_mode))
785 		goto exit;
786 
787 	if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
788 		goto exit;
789 
790 	fsnotify_open(file);
791 
792 	err = deny_write_access(file);
793 	if (err)
794 		goto exit;
795 
796 out:
797 	return file;
798 
799 exit:
800 	fput(file);
801 	return ERR_PTR(err);
802 }
803 EXPORT_SYMBOL(open_exec);
804 
805 int kernel_read(struct file *file, loff_t offset,
806 		char *addr, unsigned long count)
807 {
808 	mm_segment_t old_fs;
809 	loff_t pos = offset;
810 	int result;
811 
812 	old_fs = get_fs();
813 	set_fs(get_ds());
814 	/* The cast to a user pointer is valid due to the set_fs() */
815 	result = vfs_read(file, (void __user *)addr, count, &pos);
816 	set_fs(old_fs);
817 	return result;
818 }
819 
820 EXPORT_SYMBOL(kernel_read);
821 
822 static int exec_mmap(struct mm_struct *mm)
823 {
824 	struct task_struct *tsk;
825 	struct mm_struct * old_mm, *active_mm;
826 
827 	/* Notify parent that we're no longer interested in the old VM */
828 	tsk = current;
829 	old_mm = current->mm;
830 	sync_mm_rss(tsk, old_mm);
831 	mm_release(tsk, old_mm);
832 
833 	if (old_mm) {
834 		/*
835 		 * Make sure that if there is a core dump in progress
836 		 * for the old mm, we get out and die instead of going
837 		 * through with the exec.  We must hold mmap_sem around
838 		 * checking core_state and changing tsk->mm.
839 		 */
840 		down_read(&old_mm->mmap_sem);
841 		if (unlikely(old_mm->core_state)) {
842 			up_read(&old_mm->mmap_sem);
843 			return -EINTR;
844 		}
845 	}
846 	task_lock(tsk);
847 	active_mm = tsk->active_mm;
848 	tsk->mm = mm;
849 	tsk->active_mm = mm;
850 	activate_mm(active_mm, mm);
851 	if (old_mm && tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
852 		atomic_dec(&old_mm->oom_disable_count);
853 		atomic_inc(&tsk->mm->oom_disable_count);
854 	}
855 	task_unlock(tsk);
856 	arch_pick_mmap_layout(mm);
857 	if (old_mm) {
858 		up_read(&old_mm->mmap_sem);
859 		BUG_ON(active_mm != old_mm);
860 		mm_update_next_owner(old_mm);
861 		mmput(old_mm);
862 		return 0;
863 	}
864 	mmdrop(active_mm);
865 	return 0;
866 }
867 
868 /*
869  * This function makes sure the current process has its own signal table,
870  * so that flush_signal_handlers can later reset the handlers without
871  * disturbing other processes.  (Other processes might share the signal
872  * table via the CLONE_SIGHAND option to clone().)
873  */
874 static int de_thread(struct task_struct *tsk)
875 {
876 	struct signal_struct *sig = tsk->signal;
877 	struct sighand_struct *oldsighand = tsk->sighand;
878 	spinlock_t *lock = &oldsighand->siglock;
879 
880 	if (thread_group_empty(tsk))
881 		goto no_thread_group;
882 
883 	/*
884 	 * Kill all other threads in the thread group.
885 	 */
886 	spin_lock_irq(lock);
887 	if (signal_group_exit(sig)) {
888 		/*
889 		 * Another group action in progress, just
890 		 * return so that the signal is processed.
891 		 */
892 		spin_unlock_irq(lock);
893 		return -EAGAIN;
894 	}
895 
896 	sig->group_exit_task = tsk;
897 	sig->notify_count = zap_other_threads(tsk);
898 	if (!thread_group_leader(tsk))
899 		sig->notify_count--;
900 
901 	while (sig->notify_count) {
902 		__set_current_state(TASK_UNINTERRUPTIBLE);
903 		spin_unlock_irq(lock);
904 		schedule();
905 		spin_lock_irq(lock);
906 	}
907 	spin_unlock_irq(lock);
908 
909 	/*
910 	 * At this point all other threads have exited, all we have to
911 	 * do is to wait for the thread group leader to become inactive,
912 	 * and to assume its PID:
913 	 */
914 	if (!thread_group_leader(tsk)) {
915 		struct task_struct *leader = tsk->group_leader;
916 
917 		sig->notify_count = -1;	/* for exit_notify() */
918 		for (;;) {
919 			write_lock_irq(&tasklist_lock);
920 			if (likely(leader->exit_state))
921 				break;
922 			__set_current_state(TASK_UNINTERRUPTIBLE);
923 			write_unlock_irq(&tasklist_lock);
924 			schedule();
925 		}
926 
927 		/*
928 		 * The only record we have of the real-time age of a
929 		 * process, regardless of execs it's done, is start_time.
930 		 * All the past CPU time is accumulated in signal_struct
931 		 * from sister threads now dead.  But in this non-leader
932 		 * exec, nothing survives from the original leader thread,
933 		 * whose birth marks the true age of this process now.
934 		 * When we take on its identity by switching to its PID, we
935 		 * also take its birthdate (always earlier than our own).
936 		 */
937 		tsk->start_time = leader->start_time;
938 
939 		BUG_ON(!same_thread_group(leader, tsk));
940 		BUG_ON(has_group_leader_pid(tsk));
941 		/*
942 		 * An exec() starts a new thread group with the
943 		 * TGID of the previous thread group. Rehash the
944 		 * two threads with a switched PID, and release
945 		 * the former thread group leader:
946 		 */
947 
948 		/* Become a process group leader with the old leader's pid.
949 		 * The old leader becomes a thread of the this thread group.
950 		 * Note: The old leader also uses this pid until release_task
951 		 *       is called.  Odd but simple and correct.
952 		 */
953 		detach_pid(tsk, PIDTYPE_PID);
954 		tsk->pid = leader->pid;
955 		attach_pid(tsk, PIDTYPE_PID,  task_pid(leader));
956 		transfer_pid(leader, tsk, PIDTYPE_PGID);
957 		transfer_pid(leader, tsk, PIDTYPE_SID);
958 
959 		list_replace_rcu(&leader->tasks, &tsk->tasks);
960 		list_replace_init(&leader->sibling, &tsk->sibling);
961 
962 		tsk->group_leader = tsk;
963 		leader->group_leader = tsk;
964 
965 		tsk->exit_signal = SIGCHLD;
966 
967 		BUG_ON(leader->exit_state != EXIT_ZOMBIE);
968 		leader->exit_state = EXIT_DEAD;
969 		write_unlock_irq(&tasklist_lock);
970 
971 		release_task(leader);
972 	}
973 
974 	sig->group_exit_task = NULL;
975 	sig->notify_count = 0;
976 
977 no_thread_group:
978 	if (current->mm)
979 		setmax_mm_hiwater_rss(&sig->maxrss, current->mm);
980 
981 	exit_itimers(sig);
982 	flush_itimer_signals();
983 
984 	if (atomic_read(&oldsighand->count) != 1) {
985 		struct sighand_struct *newsighand;
986 		/*
987 		 * This ->sighand is shared with the CLONE_SIGHAND
988 		 * but not CLONE_THREAD task, switch to the new one.
989 		 */
990 		newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
991 		if (!newsighand)
992 			return -ENOMEM;
993 
994 		atomic_set(&newsighand->count, 1);
995 		memcpy(newsighand->action, oldsighand->action,
996 		       sizeof(newsighand->action));
997 
998 		write_lock_irq(&tasklist_lock);
999 		spin_lock(&oldsighand->siglock);
1000 		rcu_assign_pointer(tsk->sighand, newsighand);
1001 		spin_unlock(&oldsighand->siglock);
1002 		write_unlock_irq(&tasklist_lock);
1003 
1004 		__cleanup_sighand(oldsighand);
1005 	}
1006 
1007 	BUG_ON(!thread_group_leader(tsk));
1008 	return 0;
1009 }
1010 
1011 /*
1012  * These functions flushes out all traces of the currently running executable
1013  * so that a new one can be started
1014  */
1015 static void flush_old_files(struct files_struct * files)
1016 {
1017 	long j = -1;
1018 	struct fdtable *fdt;
1019 
1020 	spin_lock(&files->file_lock);
1021 	for (;;) {
1022 		unsigned long set, i;
1023 
1024 		j++;
1025 		i = j * __NFDBITS;
1026 		fdt = files_fdtable(files);
1027 		if (i >= fdt->max_fds)
1028 			break;
1029 		set = fdt->close_on_exec->fds_bits[j];
1030 		if (!set)
1031 			continue;
1032 		fdt->close_on_exec->fds_bits[j] = 0;
1033 		spin_unlock(&files->file_lock);
1034 		for ( ; set ; i++,set >>= 1) {
1035 			if (set & 1) {
1036 				sys_close(i);
1037 			}
1038 		}
1039 		spin_lock(&files->file_lock);
1040 
1041 	}
1042 	spin_unlock(&files->file_lock);
1043 }
1044 
1045 char *get_task_comm(char *buf, struct task_struct *tsk)
1046 {
1047 	/* buf must be at least sizeof(tsk->comm) in size */
1048 	task_lock(tsk);
1049 	strncpy(buf, tsk->comm, sizeof(tsk->comm));
1050 	task_unlock(tsk);
1051 	return buf;
1052 }
1053 EXPORT_SYMBOL_GPL(get_task_comm);
1054 
1055 void set_task_comm(struct task_struct *tsk, char *buf)
1056 {
1057 	task_lock(tsk);
1058 
1059 	/*
1060 	 * Threads may access current->comm without holding
1061 	 * the task lock, so write the string carefully.
1062 	 * Readers without a lock may see incomplete new
1063 	 * names but are safe from non-terminating string reads.
1064 	 */
1065 	memset(tsk->comm, 0, TASK_COMM_LEN);
1066 	wmb();
1067 	strlcpy(tsk->comm, buf, sizeof(tsk->comm));
1068 	task_unlock(tsk);
1069 	perf_event_comm(tsk);
1070 }
1071 
1072 int flush_old_exec(struct linux_binprm * bprm)
1073 {
1074 	int retval;
1075 
1076 	/*
1077 	 * Make sure we have a private signal table and that
1078 	 * we are unassociated from the previous thread group.
1079 	 */
1080 	retval = de_thread(current);
1081 	if (retval)
1082 		goto out;
1083 
1084 	set_mm_exe_file(bprm->mm, bprm->file);
1085 
1086 	/*
1087 	 * Release all of the old mmap stuff
1088 	 */
1089 	acct_arg_size(bprm, 0);
1090 	retval = exec_mmap(bprm->mm);
1091 	if (retval)
1092 		goto out;
1093 
1094 	bprm->mm = NULL;		/* We're using it now */
1095 
1096 	current->flags &= ~(PF_RANDOMIZE | PF_KTHREAD);
1097 	flush_thread();
1098 	current->personality &= ~bprm->per_clear;
1099 
1100 	return 0;
1101 
1102 out:
1103 	return retval;
1104 }
1105 EXPORT_SYMBOL(flush_old_exec);
1106 
1107 void setup_new_exec(struct linux_binprm * bprm)
1108 {
1109 	int i, ch;
1110 	const char *name;
1111 	char tcomm[sizeof(current->comm)];
1112 
1113 	arch_pick_mmap_layout(current->mm);
1114 
1115 	/* This is the point of no return */
1116 	current->sas_ss_sp = current->sas_ss_size = 0;
1117 
1118 	if (current_euid() == current_uid() && current_egid() == current_gid())
1119 		set_dumpable(current->mm, 1);
1120 	else
1121 		set_dumpable(current->mm, suid_dumpable);
1122 
1123 	name = bprm->filename;
1124 
1125 	/* Copies the binary name from after last slash */
1126 	for (i=0; (ch = *(name++)) != '\0';) {
1127 		if (ch == '/')
1128 			i = 0; /* overwrite what we wrote */
1129 		else
1130 			if (i < (sizeof(tcomm) - 1))
1131 				tcomm[i++] = ch;
1132 	}
1133 	tcomm[i] = '\0';
1134 	set_task_comm(current, tcomm);
1135 
1136 	/* Set the new mm task size. We have to do that late because it may
1137 	 * depend on TIF_32BIT which is only updated in flush_thread() on
1138 	 * some architectures like powerpc
1139 	 */
1140 	current->mm->task_size = TASK_SIZE;
1141 
1142 	/* install the new credentials */
1143 	if (bprm->cred->uid != current_euid() ||
1144 	    bprm->cred->gid != current_egid()) {
1145 		current->pdeath_signal = 0;
1146 	} else if (file_permission(bprm->file, MAY_READ) ||
1147 		   bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP) {
1148 		set_dumpable(current->mm, suid_dumpable);
1149 	}
1150 
1151 	/*
1152 	 * Flush performance counters when crossing a
1153 	 * security domain:
1154 	 */
1155 	if (!get_dumpable(current->mm))
1156 		perf_event_exit_task(current);
1157 
1158 	/* An exec changes our domain. We are no longer part of the thread
1159 	   group */
1160 
1161 	current->self_exec_id++;
1162 
1163 	flush_signal_handlers(current, 0);
1164 	flush_old_files(current->files);
1165 }
1166 EXPORT_SYMBOL(setup_new_exec);
1167 
1168 /*
1169  * Prepare credentials and lock ->cred_guard_mutex.
1170  * install_exec_creds() commits the new creds and drops the lock.
1171  * Or, if exec fails before, free_bprm() should release ->cred and
1172  * and unlock.
1173  */
1174 int prepare_bprm_creds(struct linux_binprm *bprm)
1175 {
1176 	if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
1177 		return -ERESTARTNOINTR;
1178 
1179 	bprm->cred = prepare_exec_creds();
1180 	if (likely(bprm->cred))
1181 		return 0;
1182 
1183 	mutex_unlock(&current->signal->cred_guard_mutex);
1184 	return -ENOMEM;
1185 }
1186 
1187 void free_bprm(struct linux_binprm *bprm)
1188 {
1189 	free_arg_pages(bprm);
1190 	if (bprm->cred) {
1191 		mutex_unlock(&current->signal->cred_guard_mutex);
1192 		abort_creds(bprm->cred);
1193 	}
1194 	kfree(bprm);
1195 }
1196 
1197 /*
1198  * install the new credentials for this executable
1199  */
1200 void install_exec_creds(struct linux_binprm *bprm)
1201 {
1202 	security_bprm_committing_creds(bprm);
1203 
1204 	commit_creds(bprm->cred);
1205 	bprm->cred = NULL;
1206 	/*
1207 	 * cred_guard_mutex must be held at least to this point to prevent
1208 	 * ptrace_attach() from altering our determination of the task's
1209 	 * credentials; any time after this it may be unlocked.
1210 	 */
1211 	security_bprm_committed_creds(bprm);
1212 	mutex_unlock(&current->signal->cred_guard_mutex);
1213 }
1214 EXPORT_SYMBOL(install_exec_creds);
1215 
1216 /*
1217  * determine how safe it is to execute the proposed program
1218  * - the caller must hold ->cred_guard_mutex to protect against
1219  *   PTRACE_ATTACH
1220  */
1221 int check_unsafe_exec(struct linux_binprm *bprm)
1222 {
1223 	struct task_struct *p = current, *t;
1224 	unsigned n_fs;
1225 	int res = 0;
1226 
1227 	bprm->unsafe = tracehook_unsafe_exec(p);
1228 
1229 	n_fs = 1;
1230 	spin_lock(&p->fs->lock);
1231 	rcu_read_lock();
1232 	for (t = next_thread(p); t != p; t = next_thread(t)) {
1233 		if (t->fs == p->fs)
1234 			n_fs++;
1235 	}
1236 	rcu_read_unlock();
1237 
1238 	if (p->fs->users > n_fs) {
1239 		bprm->unsafe |= LSM_UNSAFE_SHARE;
1240 	} else {
1241 		res = -EAGAIN;
1242 		if (!p->fs->in_exec) {
1243 			p->fs->in_exec = 1;
1244 			res = 1;
1245 		}
1246 	}
1247 	spin_unlock(&p->fs->lock);
1248 
1249 	return res;
1250 }
1251 
1252 /*
1253  * Fill the binprm structure from the inode.
1254  * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1255  *
1256  * This may be called multiple times for binary chains (scripts for example).
1257  */
1258 int prepare_binprm(struct linux_binprm *bprm)
1259 {
1260 	umode_t mode;
1261 	struct inode * inode = bprm->file->f_path.dentry->d_inode;
1262 	int retval;
1263 
1264 	mode = inode->i_mode;
1265 	if (bprm->file->f_op == NULL)
1266 		return -EACCES;
1267 
1268 	/* clear any previous set[ug]id data from a previous binary */
1269 	bprm->cred->euid = current_euid();
1270 	bprm->cred->egid = current_egid();
1271 
1272 	if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) {
1273 		/* Set-uid? */
1274 		if (mode & S_ISUID) {
1275 			bprm->per_clear |= PER_CLEAR_ON_SETID;
1276 			bprm->cred->euid = inode->i_uid;
1277 		}
1278 
1279 		/* Set-gid? */
1280 		/*
1281 		 * If setgid is set but no group execute bit then this
1282 		 * is a candidate for mandatory locking, not a setgid
1283 		 * executable.
1284 		 */
1285 		if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1286 			bprm->per_clear |= PER_CLEAR_ON_SETID;
1287 			bprm->cred->egid = inode->i_gid;
1288 		}
1289 	}
1290 
1291 	/* fill in binprm security blob */
1292 	retval = security_bprm_set_creds(bprm);
1293 	if (retval)
1294 		return retval;
1295 	bprm->cred_prepared = 1;
1296 
1297 	memset(bprm->buf, 0, BINPRM_BUF_SIZE);
1298 	return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE);
1299 }
1300 
1301 EXPORT_SYMBOL(prepare_binprm);
1302 
1303 /*
1304  * Arguments are '\0' separated strings found at the location bprm->p
1305  * points to; chop off the first by relocating brpm->p to right after
1306  * the first '\0' encountered.
1307  */
1308 int remove_arg_zero(struct linux_binprm *bprm)
1309 {
1310 	int ret = 0;
1311 	unsigned long offset;
1312 	char *kaddr;
1313 	struct page *page;
1314 
1315 	if (!bprm->argc)
1316 		return 0;
1317 
1318 	do {
1319 		offset = bprm->p & ~PAGE_MASK;
1320 		page = get_arg_page(bprm, bprm->p, 0);
1321 		if (!page) {
1322 			ret = -EFAULT;
1323 			goto out;
1324 		}
1325 		kaddr = kmap_atomic(page, KM_USER0);
1326 
1327 		for (; offset < PAGE_SIZE && kaddr[offset];
1328 				offset++, bprm->p++)
1329 			;
1330 
1331 		kunmap_atomic(kaddr, KM_USER0);
1332 		put_arg_page(page);
1333 
1334 		if (offset == PAGE_SIZE)
1335 			free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
1336 	} while (offset == PAGE_SIZE);
1337 
1338 	bprm->p++;
1339 	bprm->argc--;
1340 	ret = 0;
1341 
1342 out:
1343 	return ret;
1344 }
1345 EXPORT_SYMBOL(remove_arg_zero);
1346 
1347 /*
1348  * cycle the list of binary formats handler, until one recognizes the image
1349  */
1350 int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1351 {
1352 	unsigned int depth = bprm->recursion_depth;
1353 	int try,retval;
1354 	struct linux_binfmt *fmt;
1355 
1356 	retval = security_bprm_check(bprm);
1357 	if (retval)
1358 		return retval;
1359 
1360 	/* kernel module loader fixup */
1361 	/* so we don't try to load run modprobe in kernel space. */
1362 	set_fs(USER_DS);
1363 
1364 	retval = audit_bprm(bprm);
1365 	if (retval)
1366 		return retval;
1367 
1368 	retval = -ENOENT;
1369 	for (try=0; try<2; try++) {
1370 		read_lock(&binfmt_lock);
1371 		list_for_each_entry(fmt, &formats, lh) {
1372 			int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
1373 			if (!fn)
1374 				continue;
1375 			if (!try_module_get(fmt->module))
1376 				continue;
1377 			read_unlock(&binfmt_lock);
1378 			retval = fn(bprm, regs);
1379 			/*
1380 			 * Restore the depth counter to its starting value
1381 			 * in this call, so we don't have to rely on every
1382 			 * load_binary function to restore it on return.
1383 			 */
1384 			bprm->recursion_depth = depth;
1385 			if (retval >= 0) {
1386 				if (depth == 0)
1387 					tracehook_report_exec(fmt, bprm, regs);
1388 				put_binfmt(fmt);
1389 				allow_write_access(bprm->file);
1390 				if (bprm->file)
1391 					fput(bprm->file);
1392 				bprm->file = NULL;
1393 				current->did_exec = 1;
1394 				proc_exec_connector(current);
1395 				return retval;
1396 			}
1397 			read_lock(&binfmt_lock);
1398 			put_binfmt(fmt);
1399 			if (retval != -ENOEXEC || bprm->mm == NULL)
1400 				break;
1401 			if (!bprm->file) {
1402 				read_unlock(&binfmt_lock);
1403 				return retval;
1404 			}
1405 		}
1406 		read_unlock(&binfmt_lock);
1407 		if (retval != -ENOEXEC || bprm->mm == NULL) {
1408 			break;
1409 #ifdef CONFIG_MODULES
1410 		} else {
1411 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1412 			if (printable(bprm->buf[0]) &&
1413 			    printable(bprm->buf[1]) &&
1414 			    printable(bprm->buf[2]) &&
1415 			    printable(bprm->buf[3]))
1416 				break; /* -ENOEXEC */
1417 			request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
1418 #endif
1419 		}
1420 	}
1421 	return retval;
1422 }
1423 
1424 EXPORT_SYMBOL(search_binary_handler);
1425 
1426 /*
1427  * sys_execve() executes a new program.
1428  */
1429 static int do_execve_common(const char *filename,
1430 				struct user_arg_ptr argv,
1431 				struct user_arg_ptr envp,
1432 				struct pt_regs *regs)
1433 {
1434 	struct linux_binprm *bprm;
1435 	struct file *file;
1436 	struct files_struct *displaced;
1437 	bool clear_in_exec;
1438 	int retval;
1439 
1440 	retval = unshare_files(&displaced);
1441 	if (retval)
1442 		goto out_ret;
1443 
1444 	retval = -ENOMEM;
1445 	bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1446 	if (!bprm)
1447 		goto out_files;
1448 
1449 	retval = prepare_bprm_creds(bprm);
1450 	if (retval)
1451 		goto out_free;
1452 
1453 	retval = check_unsafe_exec(bprm);
1454 	if (retval < 0)
1455 		goto out_free;
1456 	clear_in_exec = retval;
1457 	current->in_execve = 1;
1458 
1459 	file = open_exec(filename);
1460 	retval = PTR_ERR(file);
1461 	if (IS_ERR(file))
1462 		goto out_unmark;
1463 
1464 	sched_exec();
1465 
1466 	bprm->file = file;
1467 	bprm->filename = filename;
1468 	bprm->interp = filename;
1469 
1470 	retval = bprm_mm_init(bprm);
1471 	if (retval)
1472 		goto out_file;
1473 
1474 	bprm->argc = count(argv, MAX_ARG_STRINGS);
1475 	if ((retval = bprm->argc) < 0)
1476 		goto out;
1477 
1478 	bprm->envc = count(envp, MAX_ARG_STRINGS);
1479 	if ((retval = bprm->envc) < 0)
1480 		goto out;
1481 
1482 	retval = prepare_binprm(bprm);
1483 	if (retval < 0)
1484 		goto out;
1485 
1486 	retval = copy_strings_kernel(1, &bprm->filename, bprm);
1487 	if (retval < 0)
1488 		goto out;
1489 
1490 	bprm->exec = bprm->p;
1491 	retval = copy_strings(bprm->envc, envp, bprm);
1492 	if (retval < 0)
1493 		goto out;
1494 
1495 	retval = copy_strings(bprm->argc, argv, bprm);
1496 	if (retval < 0)
1497 		goto out;
1498 
1499 	retval = search_binary_handler(bprm,regs);
1500 	if (retval < 0)
1501 		goto out;
1502 
1503 	/* execve succeeded */
1504 	current->fs->in_exec = 0;
1505 	current->in_execve = 0;
1506 	acct_update_integrals(current);
1507 	free_bprm(bprm);
1508 	if (displaced)
1509 		put_files_struct(displaced);
1510 	return retval;
1511 
1512 out:
1513 	if (bprm->mm) {
1514 		acct_arg_size(bprm, 0);
1515 		mmput(bprm->mm);
1516 	}
1517 
1518 out_file:
1519 	if (bprm->file) {
1520 		allow_write_access(bprm->file);
1521 		fput(bprm->file);
1522 	}
1523 
1524 out_unmark:
1525 	if (clear_in_exec)
1526 		current->fs->in_exec = 0;
1527 	current->in_execve = 0;
1528 
1529 out_free:
1530 	free_bprm(bprm);
1531 
1532 out_files:
1533 	if (displaced)
1534 		reset_files_struct(displaced);
1535 out_ret:
1536 	return retval;
1537 }
1538 
1539 int do_execve(const char *filename,
1540 	const char __user *const __user *__argv,
1541 	const char __user *const __user *__envp,
1542 	struct pt_regs *regs)
1543 {
1544 	struct user_arg_ptr argv = { .ptr.native = __argv };
1545 	struct user_arg_ptr envp = { .ptr.native = __envp };
1546 	return do_execve_common(filename, argv, envp, regs);
1547 }
1548 
1549 #ifdef CONFIG_COMPAT
1550 int compat_do_execve(char *filename,
1551 	compat_uptr_t __user *__argv,
1552 	compat_uptr_t __user *__envp,
1553 	struct pt_regs *regs)
1554 {
1555 	struct user_arg_ptr argv = {
1556 		.is_compat = true,
1557 		.ptr.compat = __argv,
1558 	};
1559 	struct user_arg_ptr envp = {
1560 		.is_compat = true,
1561 		.ptr.compat = __envp,
1562 	};
1563 	return do_execve_common(filename, argv, envp, regs);
1564 }
1565 #endif
1566 
1567 void set_binfmt(struct linux_binfmt *new)
1568 {
1569 	struct mm_struct *mm = current->mm;
1570 
1571 	if (mm->binfmt)
1572 		module_put(mm->binfmt->module);
1573 
1574 	mm->binfmt = new;
1575 	if (new)
1576 		__module_get(new->module);
1577 }
1578 
1579 EXPORT_SYMBOL(set_binfmt);
1580 
1581 static int expand_corename(struct core_name *cn)
1582 {
1583 	char *old_corename = cn->corename;
1584 
1585 	cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
1586 	cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
1587 
1588 	if (!cn->corename) {
1589 		kfree(old_corename);
1590 		return -ENOMEM;
1591 	}
1592 
1593 	return 0;
1594 }
1595 
1596 static int cn_printf(struct core_name *cn, const char *fmt, ...)
1597 {
1598 	char *cur;
1599 	int need;
1600 	int ret;
1601 	va_list arg;
1602 
1603 	va_start(arg, fmt);
1604 	need = vsnprintf(NULL, 0, fmt, arg);
1605 	va_end(arg);
1606 
1607 	if (likely(need < cn->size - cn->used - 1))
1608 		goto out_printf;
1609 
1610 	ret = expand_corename(cn);
1611 	if (ret)
1612 		goto expand_fail;
1613 
1614 out_printf:
1615 	cur = cn->corename + cn->used;
1616 	va_start(arg, fmt);
1617 	vsnprintf(cur, need + 1, fmt, arg);
1618 	va_end(arg);
1619 	cn->used += need;
1620 	return 0;
1621 
1622 expand_fail:
1623 	return ret;
1624 }
1625 
1626 /* format_corename will inspect the pattern parameter, and output a
1627  * name into corename, which must have space for at least
1628  * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1629  */
1630 static int format_corename(struct core_name *cn, long signr)
1631 {
1632 	const struct cred *cred = current_cred();
1633 	const char *pat_ptr = core_pattern;
1634 	int ispipe = (*pat_ptr == '|');
1635 	int pid_in_pattern = 0;
1636 	int err = 0;
1637 
1638 	cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
1639 	cn->corename = kmalloc(cn->size, GFP_KERNEL);
1640 	cn->used = 0;
1641 
1642 	if (!cn->corename)
1643 		return -ENOMEM;
1644 
1645 	/* Repeat as long as we have more pattern to process and more output
1646 	   space */
1647 	while (*pat_ptr) {
1648 		if (*pat_ptr != '%') {
1649 			if (*pat_ptr == 0)
1650 				goto out;
1651 			err = cn_printf(cn, "%c", *pat_ptr++);
1652 		} else {
1653 			switch (*++pat_ptr) {
1654 			/* single % at the end, drop that */
1655 			case 0:
1656 				goto out;
1657 			/* Double percent, output one percent */
1658 			case '%':
1659 				err = cn_printf(cn, "%c", '%');
1660 				break;
1661 			/* pid */
1662 			case 'p':
1663 				pid_in_pattern = 1;
1664 				err = cn_printf(cn, "%d",
1665 					      task_tgid_vnr(current));
1666 				break;
1667 			/* uid */
1668 			case 'u':
1669 				err = cn_printf(cn, "%d", cred->uid);
1670 				break;
1671 			/* gid */
1672 			case 'g':
1673 				err = cn_printf(cn, "%d", cred->gid);
1674 				break;
1675 			/* signal that caused the coredump */
1676 			case 's':
1677 				err = cn_printf(cn, "%ld", signr);
1678 				break;
1679 			/* UNIX time of coredump */
1680 			case 't': {
1681 				struct timeval tv;
1682 				do_gettimeofday(&tv);
1683 				err = cn_printf(cn, "%lu", tv.tv_sec);
1684 				break;
1685 			}
1686 			/* hostname */
1687 			case 'h':
1688 				down_read(&uts_sem);
1689 				err = cn_printf(cn, "%s",
1690 					      utsname()->nodename);
1691 				up_read(&uts_sem);
1692 				break;
1693 			/* executable */
1694 			case 'e':
1695 				err = cn_printf(cn, "%s", current->comm);
1696 				break;
1697 			/* core limit size */
1698 			case 'c':
1699 				err = cn_printf(cn, "%lu",
1700 					      rlimit(RLIMIT_CORE));
1701 				break;
1702 			default:
1703 				break;
1704 			}
1705 			++pat_ptr;
1706 		}
1707 
1708 		if (err)
1709 			return err;
1710 	}
1711 
1712 	/* Backward compatibility with core_uses_pid:
1713 	 *
1714 	 * If core_pattern does not include a %p (as is the default)
1715 	 * and core_uses_pid is set, then .%pid will be appended to
1716 	 * the filename. Do not do this for piped commands. */
1717 	if (!ispipe && !pid_in_pattern && core_uses_pid) {
1718 		err = cn_printf(cn, ".%d", task_tgid_vnr(current));
1719 		if (err)
1720 			return err;
1721 	}
1722 out:
1723 	return ispipe;
1724 }
1725 
1726 static int zap_process(struct task_struct *start, int exit_code)
1727 {
1728 	struct task_struct *t;
1729 	int nr = 0;
1730 
1731 	start->signal->flags = SIGNAL_GROUP_EXIT;
1732 	start->signal->group_exit_code = exit_code;
1733 	start->signal->group_stop_count = 0;
1734 
1735 	t = start;
1736 	do {
1737 		task_clear_group_stop_pending(t);
1738 		if (t != current && t->mm) {
1739 			sigaddset(&t->pending.signal, SIGKILL);
1740 			signal_wake_up(t, 1);
1741 			nr++;
1742 		}
1743 	} while_each_thread(start, t);
1744 
1745 	return nr;
1746 }
1747 
1748 static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
1749 				struct core_state *core_state, int exit_code)
1750 {
1751 	struct task_struct *g, *p;
1752 	unsigned long flags;
1753 	int nr = -EAGAIN;
1754 
1755 	spin_lock_irq(&tsk->sighand->siglock);
1756 	if (!signal_group_exit(tsk->signal)) {
1757 		mm->core_state = core_state;
1758 		nr = zap_process(tsk, exit_code);
1759 	}
1760 	spin_unlock_irq(&tsk->sighand->siglock);
1761 	if (unlikely(nr < 0))
1762 		return nr;
1763 
1764 	if (atomic_read(&mm->mm_users) == nr + 1)
1765 		goto done;
1766 	/*
1767 	 * We should find and kill all tasks which use this mm, and we should
1768 	 * count them correctly into ->nr_threads. We don't take tasklist
1769 	 * lock, but this is safe wrt:
1770 	 *
1771 	 * fork:
1772 	 *	None of sub-threads can fork after zap_process(leader). All
1773 	 *	processes which were created before this point should be
1774 	 *	visible to zap_threads() because copy_process() adds the new
1775 	 *	process to the tail of init_task.tasks list, and lock/unlock
1776 	 *	of ->siglock provides a memory barrier.
1777 	 *
1778 	 * do_exit:
1779 	 *	The caller holds mm->mmap_sem. This means that the task which
1780 	 *	uses this mm can't pass exit_mm(), so it can't exit or clear
1781 	 *	its ->mm.
1782 	 *
1783 	 * de_thread:
1784 	 *	It does list_replace_rcu(&leader->tasks, &current->tasks),
1785 	 *	we must see either old or new leader, this does not matter.
1786 	 *	However, it can change p->sighand, so lock_task_sighand(p)
1787 	 *	must be used. Since p->mm != NULL and we hold ->mmap_sem
1788 	 *	it can't fail.
1789 	 *
1790 	 *	Note also that "g" can be the old leader with ->mm == NULL
1791 	 *	and already unhashed and thus removed from ->thread_group.
1792 	 *	This is OK, __unhash_process()->list_del_rcu() does not
1793 	 *	clear the ->next pointer, we will find the new leader via
1794 	 *	next_thread().
1795 	 */
1796 	rcu_read_lock();
1797 	for_each_process(g) {
1798 		if (g == tsk->group_leader)
1799 			continue;
1800 		if (g->flags & PF_KTHREAD)
1801 			continue;
1802 		p = g;
1803 		do {
1804 			if (p->mm) {
1805 				if (unlikely(p->mm == mm)) {
1806 					lock_task_sighand(p, &flags);
1807 					nr += zap_process(p, exit_code);
1808 					unlock_task_sighand(p, &flags);
1809 				}
1810 				break;
1811 			}
1812 		} while_each_thread(g, p);
1813 	}
1814 	rcu_read_unlock();
1815 done:
1816 	atomic_set(&core_state->nr_threads, nr);
1817 	return nr;
1818 }
1819 
1820 static int coredump_wait(int exit_code, struct core_state *core_state)
1821 {
1822 	struct task_struct *tsk = current;
1823 	struct mm_struct *mm = tsk->mm;
1824 	struct completion *vfork_done;
1825 	int core_waiters = -EBUSY;
1826 
1827 	init_completion(&core_state->startup);
1828 	core_state->dumper.task = tsk;
1829 	core_state->dumper.next = NULL;
1830 
1831 	down_write(&mm->mmap_sem);
1832 	if (!mm->core_state)
1833 		core_waiters = zap_threads(tsk, mm, core_state, exit_code);
1834 	up_write(&mm->mmap_sem);
1835 
1836 	if (unlikely(core_waiters < 0))
1837 		goto fail;
1838 
1839 	/*
1840 	 * Make sure nobody is waiting for us to release the VM,
1841 	 * otherwise we can deadlock when we wait on each other
1842 	 */
1843 	vfork_done = tsk->vfork_done;
1844 	if (vfork_done) {
1845 		tsk->vfork_done = NULL;
1846 		complete(vfork_done);
1847 	}
1848 
1849 	if (core_waiters)
1850 		wait_for_completion(&core_state->startup);
1851 fail:
1852 	return core_waiters;
1853 }
1854 
1855 static void coredump_finish(struct mm_struct *mm)
1856 {
1857 	struct core_thread *curr, *next;
1858 	struct task_struct *task;
1859 
1860 	next = mm->core_state->dumper.next;
1861 	while ((curr = next) != NULL) {
1862 		next = curr->next;
1863 		task = curr->task;
1864 		/*
1865 		 * see exit_mm(), curr->task must not see
1866 		 * ->task == NULL before we read ->next.
1867 		 */
1868 		smp_mb();
1869 		curr->task = NULL;
1870 		wake_up_process(task);
1871 	}
1872 
1873 	mm->core_state = NULL;
1874 }
1875 
1876 /*
1877  * set_dumpable converts traditional three-value dumpable to two flags and
1878  * stores them into mm->flags.  It modifies lower two bits of mm->flags, but
1879  * these bits are not changed atomically.  So get_dumpable can observe the
1880  * intermediate state.  To avoid doing unexpected behavior, get get_dumpable
1881  * return either old dumpable or new one by paying attention to the order of
1882  * modifying the bits.
1883  *
1884  * dumpable |   mm->flags (binary)
1885  * old  new | initial interim  final
1886  * ---------+-----------------------
1887  *  0    1  |   00      01      01
1888  *  0    2  |   00      10(*)   11
1889  *  1    0  |   01      00      00
1890  *  1    2  |   01      11      11
1891  *  2    0  |   11      10(*)   00
1892  *  2    1  |   11      11      01
1893  *
1894  * (*) get_dumpable regards interim value of 10 as 11.
1895  */
1896 void set_dumpable(struct mm_struct *mm, int value)
1897 {
1898 	switch (value) {
1899 	case 0:
1900 		clear_bit(MMF_DUMPABLE, &mm->flags);
1901 		smp_wmb();
1902 		clear_bit(MMF_DUMP_SECURELY, &mm->flags);
1903 		break;
1904 	case 1:
1905 		set_bit(MMF_DUMPABLE, &mm->flags);
1906 		smp_wmb();
1907 		clear_bit(MMF_DUMP_SECURELY, &mm->flags);
1908 		break;
1909 	case 2:
1910 		set_bit(MMF_DUMP_SECURELY, &mm->flags);
1911 		smp_wmb();
1912 		set_bit(MMF_DUMPABLE, &mm->flags);
1913 		break;
1914 	}
1915 }
1916 
1917 static int __get_dumpable(unsigned long mm_flags)
1918 {
1919 	int ret;
1920 
1921 	ret = mm_flags & MMF_DUMPABLE_MASK;
1922 	return (ret >= 2) ? 2 : ret;
1923 }
1924 
1925 int get_dumpable(struct mm_struct *mm)
1926 {
1927 	return __get_dumpable(mm->flags);
1928 }
1929 
1930 static void wait_for_dump_helpers(struct file *file)
1931 {
1932 	struct pipe_inode_info *pipe;
1933 
1934 	pipe = file->f_path.dentry->d_inode->i_pipe;
1935 
1936 	pipe_lock(pipe);
1937 	pipe->readers++;
1938 	pipe->writers--;
1939 
1940 	while ((pipe->readers > 1) && (!signal_pending(current))) {
1941 		wake_up_interruptible_sync(&pipe->wait);
1942 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1943 		pipe_wait(pipe);
1944 	}
1945 
1946 	pipe->readers--;
1947 	pipe->writers++;
1948 	pipe_unlock(pipe);
1949 
1950 }
1951 
1952 
1953 /*
1954  * umh_pipe_setup
1955  * helper function to customize the process used
1956  * to collect the core in userspace.  Specifically
1957  * it sets up a pipe and installs it as fd 0 (stdin)
1958  * for the process.  Returns 0 on success, or
1959  * PTR_ERR on failure.
1960  * Note that it also sets the core limit to 1.  This
1961  * is a special value that we use to trap recursive
1962  * core dumps
1963  */
1964 static int umh_pipe_setup(struct subprocess_info *info)
1965 {
1966 	struct file *rp, *wp;
1967 	struct fdtable *fdt;
1968 	struct coredump_params *cp = (struct coredump_params *)info->data;
1969 	struct files_struct *cf = current->files;
1970 
1971 	wp = create_write_pipe(0);
1972 	if (IS_ERR(wp))
1973 		return PTR_ERR(wp);
1974 
1975 	rp = create_read_pipe(wp, 0);
1976 	if (IS_ERR(rp)) {
1977 		free_write_pipe(wp);
1978 		return PTR_ERR(rp);
1979 	}
1980 
1981 	cp->file = wp;
1982 
1983 	sys_close(0);
1984 	fd_install(0, rp);
1985 	spin_lock(&cf->file_lock);
1986 	fdt = files_fdtable(cf);
1987 	FD_SET(0, fdt->open_fds);
1988 	FD_CLR(0, fdt->close_on_exec);
1989 	spin_unlock(&cf->file_lock);
1990 
1991 	/* and disallow core files too */
1992 	current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
1993 
1994 	return 0;
1995 }
1996 
1997 void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1998 {
1999 	struct core_state core_state;
2000 	struct core_name cn;
2001 	struct mm_struct *mm = current->mm;
2002 	struct linux_binfmt * binfmt;
2003 	const struct cred *old_cred;
2004 	struct cred *cred;
2005 	int retval = 0;
2006 	int flag = 0;
2007 	int ispipe;
2008 	static atomic_t core_dump_count = ATOMIC_INIT(0);
2009 	struct coredump_params cprm = {
2010 		.signr = signr,
2011 		.regs = regs,
2012 		.limit = rlimit(RLIMIT_CORE),
2013 		/*
2014 		 * We must use the same mm->flags while dumping core to avoid
2015 		 * inconsistency of bit flags, since this flag is not protected
2016 		 * by any locks.
2017 		 */
2018 		.mm_flags = mm->flags,
2019 	};
2020 
2021 	audit_core_dumps(signr);
2022 
2023 	binfmt = mm->binfmt;
2024 	if (!binfmt || !binfmt->core_dump)
2025 		goto fail;
2026 	if (!__get_dumpable(cprm.mm_flags))
2027 		goto fail;
2028 
2029 	cred = prepare_creds();
2030 	if (!cred)
2031 		goto fail;
2032 	/*
2033 	 *	We cannot trust fsuid as being the "true" uid of the
2034 	 *	process nor do we know its entire history. We only know it
2035 	 *	was tainted so we dump it as root in mode 2.
2036 	 */
2037 	if (__get_dumpable(cprm.mm_flags) == 2) {
2038 		/* Setuid core dump mode */
2039 		flag = O_EXCL;		/* Stop rewrite attacks */
2040 		cred->fsuid = 0;	/* Dump root private */
2041 	}
2042 
2043 	retval = coredump_wait(exit_code, &core_state);
2044 	if (retval < 0)
2045 		goto fail_creds;
2046 
2047 	old_cred = override_creds(cred);
2048 
2049 	/*
2050 	 * Clear any false indication of pending signals that might
2051 	 * be seen by the filesystem code called to write the core file.
2052 	 */
2053 	clear_thread_flag(TIF_SIGPENDING);
2054 
2055 	ispipe = format_corename(&cn, signr);
2056 
2057 	if (ispipe == -ENOMEM) {
2058 		printk(KERN_WARNING "format_corename failed\n");
2059 		printk(KERN_WARNING "Aborting core\n");
2060 		goto fail_corename;
2061 	}
2062 
2063  	if (ispipe) {
2064 		int dump_count;
2065 		char **helper_argv;
2066 
2067 		if (cprm.limit == 1) {
2068 			/*
2069 			 * Normally core limits are irrelevant to pipes, since
2070 			 * we're not writing to the file system, but we use
2071 			 * cprm.limit of 1 here as a speacial value. Any
2072 			 * non-1 limit gets set to RLIM_INFINITY below, but
2073 			 * a limit of 0 skips the dump.  This is a consistent
2074 			 * way to catch recursive crashes.  We can still crash
2075 			 * if the core_pattern binary sets RLIM_CORE =  !1
2076 			 * but it runs as root, and can do lots of stupid things
2077 			 * Note that we use task_tgid_vnr here to grab the pid
2078 			 * of the process group leader.  That way we get the
2079 			 * right pid if a thread in a multi-threaded
2080 			 * core_pattern process dies.
2081 			 */
2082 			printk(KERN_WARNING
2083 				"Process %d(%s) has RLIMIT_CORE set to 1\n",
2084 				task_tgid_vnr(current), current->comm);
2085 			printk(KERN_WARNING "Aborting core\n");
2086 			goto fail_unlock;
2087 		}
2088 		cprm.limit = RLIM_INFINITY;
2089 
2090 		dump_count = atomic_inc_return(&core_dump_count);
2091 		if (core_pipe_limit && (core_pipe_limit < dump_count)) {
2092 			printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
2093 			       task_tgid_vnr(current), current->comm);
2094 			printk(KERN_WARNING "Skipping core dump\n");
2095 			goto fail_dropcount;
2096 		}
2097 
2098 		helper_argv = argv_split(GFP_KERNEL, cn.corename+1, NULL);
2099 		if (!helper_argv) {
2100 			printk(KERN_WARNING "%s failed to allocate memory\n",
2101 			       __func__);
2102 			goto fail_dropcount;
2103 		}
2104 
2105 		retval = call_usermodehelper_fns(helper_argv[0], helper_argv,
2106 					NULL, UMH_WAIT_EXEC, umh_pipe_setup,
2107 					NULL, &cprm);
2108 		argv_free(helper_argv);
2109 		if (retval) {
2110  			printk(KERN_INFO "Core dump to %s pipe failed\n",
2111 			       cn.corename);
2112 			goto close_fail;
2113  		}
2114 	} else {
2115 		struct inode *inode;
2116 
2117 		if (cprm.limit < binfmt->min_coredump)
2118 			goto fail_unlock;
2119 
2120 		cprm.file = filp_open(cn.corename,
2121 				 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
2122 				 0600);
2123 		if (IS_ERR(cprm.file))
2124 			goto fail_unlock;
2125 
2126 		inode = cprm.file->f_path.dentry->d_inode;
2127 		if (inode->i_nlink > 1)
2128 			goto close_fail;
2129 		if (d_unhashed(cprm.file->f_path.dentry))
2130 			goto close_fail;
2131 		/*
2132 		 * AK: actually i see no reason to not allow this for named
2133 		 * pipes etc, but keep the previous behaviour for now.
2134 		 */
2135 		if (!S_ISREG(inode->i_mode))
2136 			goto close_fail;
2137 		/*
2138 		 * Dont allow local users get cute and trick others to coredump
2139 		 * into their pre-created files.
2140 		 */
2141 		if (inode->i_uid != current_fsuid())
2142 			goto close_fail;
2143 		if (!cprm.file->f_op || !cprm.file->f_op->write)
2144 			goto close_fail;
2145 		if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
2146 			goto close_fail;
2147 	}
2148 
2149 	retval = binfmt->core_dump(&cprm);
2150 	if (retval)
2151 		current->signal->group_exit_code |= 0x80;
2152 
2153 	if (ispipe && core_pipe_limit)
2154 		wait_for_dump_helpers(cprm.file);
2155 close_fail:
2156 	if (cprm.file)
2157 		filp_close(cprm.file, NULL);
2158 fail_dropcount:
2159 	if (ispipe)
2160 		atomic_dec(&core_dump_count);
2161 fail_unlock:
2162 	kfree(cn.corename);
2163 fail_corename:
2164 	coredump_finish(mm);
2165 	revert_creds(old_cred);
2166 fail_creds:
2167 	put_cred(cred);
2168 fail:
2169 	return;
2170 }
2171 
2172 /*
2173  * Core dumping helper functions.  These are the only things you should
2174  * do on a core-file: use only these functions to write out all the
2175  * necessary info.
2176  */
2177 int dump_write(struct file *file, const void *addr, int nr)
2178 {
2179 	return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
2180 }
2181 EXPORT_SYMBOL(dump_write);
2182 
2183 int dump_seek(struct file *file, loff_t off)
2184 {
2185 	int ret = 1;
2186 
2187 	if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
2188 		if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
2189 			return 0;
2190 	} else {
2191 		char *buf = (char *)get_zeroed_page(GFP_KERNEL);
2192 
2193 		if (!buf)
2194 			return 0;
2195 		while (off > 0) {
2196 			unsigned long n = off;
2197 
2198 			if (n > PAGE_SIZE)
2199 				n = PAGE_SIZE;
2200 			if (!dump_write(file, buf, n)) {
2201 				ret = 0;
2202 				break;
2203 			}
2204 			off -= n;
2205 		}
2206 		free_page((unsigned long)buf);
2207 	}
2208 	return ret;
2209 }
2210 EXPORT_SYMBOL(dump_seek);
2211