exec.c (44b56603c4c476b845a824cff6fe905c6268b2a1) exec.c (3af9e859281bda7eb7c20b51879cf43aa788ac2e)
1/*
2 * linux/fs/exec.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * #!-checking implemented by tytso.

--- 228 unchanged lines hidden (view full) ---

237 vma->vm_mm = mm;
238
239 /*
240 * Place the stack at the largest stack address the architecture
241 * supports. Later, we'll move this to an appropriate place. We don't
242 * use STACK_TOP because that can depend on attributes which aren't
243 * configured yet.
244 */
1/*
2 * linux/fs/exec.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * #!-checking implemented by tytso.

--- 228 unchanged lines hidden (view full) ---

237 vma->vm_mm = mm;
238
239 /*
240 * Place the stack at the largest stack address the architecture
241 * supports. Later, we'll move this to an appropriate place. We don't
242 * use STACK_TOP because that can depend on attributes which aren't
243 * configured yet.
244 */
245 BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
245 vma->vm_end = STACK_TOP_MAX;
246 vma->vm_start = vma->vm_end - PAGE_SIZE;
246 vma->vm_end = STACK_TOP_MAX;
247 vma->vm_start = vma->vm_end - PAGE_SIZE;
247 vma->vm_flags = VM_STACK_FLAGS;
248 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
248 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
249 INIT_LIST_HEAD(&vma->anon_vma_chain);
250 err = insert_vm_struct(mm, vma);
251 if (err)
252 goto err;
253
254 mm->stack_vm = mm->total_vm = 1;
255 up_write(&mm->mmap_sem);

--- 355 unchanged lines hidden (view full) ---

611 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
612 * (arch default) otherwise.
613 */
614 if (unlikely(executable_stack == EXSTACK_ENABLE_X))
615 vm_flags |= VM_EXEC;
616 else if (executable_stack == EXSTACK_DISABLE_X)
617 vm_flags &= ~VM_EXEC;
618 vm_flags |= mm->def_flags;
249 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
250 INIT_LIST_HEAD(&vma->anon_vma_chain);
251 err = insert_vm_struct(mm, vma);
252 if (err)
253 goto err;
254
255 mm->stack_vm = mm->total_vm = 1;
256 up_write(&mm->mmap_sem);

--- 355 unchanged lines hidden (view full) ---

612 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
613 * (arch default) otherwise.
614 */
615 if (unlikely(executable_stack == EXSTACK_ENABLE_X))
616 vm_flags |= VM_EXEC;
617 else if (executable_stack == EXSTACK_DISABLE_X)
618 vm_flags &= ~VM_EXEC;
619 vm_flags |= mm->def_flags;
620 vm_flags |= VM_STACK_INCOMPLETE_SETUP;
619
620 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
621 vm_flags);
622 if (ret)
623 goto out_unlock;
624 BUG_ON(prev != vma);
625
626 /* Move stack pages down in memory. */
627 if (stack_shift) {
628 ret = shift_arg_pages(vma, stack_shift);
629 if (ret)
630 goto out_unlock;
631 }
632
621
622 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
623 vm_flags);
624 if (ret)
625 goto out_unlock;
626 BUG_ON(prev != vma);
627
628 /* Move stack pages down in memory. */
629 if (stack_shift) {
630 ret = shift_arg_pages(vma, stack_shift);
631 if (ret)
632 goto out_unlock;
633 }
634
635 /* mprotect_fixup is overkill to remove the temporary stack flags */
636 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
637
633 stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
634 stack_size = vma->vm_end - vma->vm_start;
635 /*
636 * Align this down to a page boundary as expand_stack
637 * will align it up.
638 */
639 rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
640#ifdef CONFIG_STACK_GROWSUP
641 if (stack_size + stack_expand > rlim_stack)
642 stack_base = vma->vm_start + rlim_stack;
643 else
644 stack_base = vma->vm_end + stack_expand;
645#else
646 if (stack_size + stack_expand > rlim_stack)
647 stack_base = vma->vm_end - rlim_stack;
648 else
649 stack_base = vma->vm_start - stack_expand;
650#endif
638 stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
639 stack_size = vma->vm_end - vma->vm_start;
640 /*
641 * Align this down to a page boundary as expand_stack
642 * will align it up.
643 */
644 rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
645#ifdef CONFIG_STACK_GROWSUP
646 if (stack_size + stack_expand > rlim_stack)
647 stack_base = vma->vm_start + rlim_stack;
648 else
649 stack_base = vma->vm_end + stack_expand;
650#else
651 if (stack_size + stack_expand > rlim_stack)
652 stack_base = vma->vm_end - rlim_stack;
653 else
654 stack_base = vma->vm_start - stack_expand;
655#endif
656 current->mm->start_stack = bprm->p;
651 ret = expand_stack(vma, stack_base);
652 if (ret)
653 ret = -EFAULT;
654
655out_unlock:
656 up_write(&mm->mmap_sem);
657 return ret;
658}

--- 99 unchanged lines hidden (view full) ---

758 * disturbing other processes. (Other processes might share the signal
759 * table via the CLONE_SIGHAND option to clone().)
760 */
761static int de_thread(struct task_struct *tsk)
762{
763 struct signal_struct *sig = tsk->signal;
764 struct sighand_struct *oldsighand = tsk->sighand;
765 spinlock_t *lock = &oldsighand->siglock;
657 ret = expand_stack(vma, stack_base);
658 if (ret)
659 ret = -EFAULT;
660
661out_unlock:
662 up_write(&mm->mmap_sem);
663 return ret;
664}

--- 99 unchanged lines hidden (view full) ---

764 * disturbing other processes. (Other processes might share the signal
765 * table via the CLONE_SIGHAND option to clone().)
766 */
767static int de_thread(struct task_struct *tsk)
768{
769 struct signal_struct *sig = tsk->signal;
770 struct sighand_struct *oldsighand = tsk->sighand;
771 spinlock_t *lock = &oldsighand->siglock;
766 int count;
767
768 if (thread_group_empty(tsk))
769 goto no_thread_group;
770
771 /*
772 * Kill all other threads in the thread group.
773 */
774 spin_lock_irq(lock);
775 if (signal_group_exit(sig)) {
776 /*
777 * Another group action in progress, just
778 * return so that the signal is processed.
779 */
780 spin_unlock_irq(lock);
781 return -EAGAIN;
782 }
772
773 if (thread_group_empty(tsk))
774 goto no_thread_group;
775
776 /*
777 * Kill all other threads in the thread group.
778 */
779 spin_lock_irq(lock);
780 if (signal_group_exit(sig)) {
781 /*
782 * Another group action in progress, just
783 * return so that the signal is processed.
784 */
785 spin_unlock_irq(lock);
786 return -EAGAIN;
787 }
788
783 sig->group_exit_task = tsk;
789 sig->group_exit_task = tsk;
784 zap_other_threads(tsk);
790 sig->notify_count = zap_other_threads(tsk);
791 if (!thread_group_leader(tsk))
792 sig->notify_count--;
785
793
786 /* Account for the thread group leader hanging around: */
787 count = thread_group_leader(tsk) ? 1 : 2;
788 sig->notify_count = count;
789 while (atomic_read(&sig->count) > count) {
794 while (sig->notify_count) {
790 __set_current_state(TASK_UNINTERRUPTIBLE);
791 spin_unlock_irq(lock);
792 schedule();
793 spin_lock_irq(lock);
794 }
795 spin_unlock_irq(lock);
796
797 /*

--- 854 unchanged lines hidden (view full) ---

1652 return nr;
1653}
1654
1655static int coredump_wait(int exit_code, struct core_state *core_state)
1656{
1657 struct task_struct *tsk = current;
1658 struct mm_struct *mm = tsk->mm;
1659 struct completion *vfork_done;
795 __set_current_state(TASK_UNINTERRUPTIBLE);
796 spin_unlock_irq(lock);
797 schedule();
798 spin_lock_irq(lock);
799 }
800 spin_unlock_irq(lock);
801
802 /*

--- 854 unchanged lines hidden (view full) ---

1657 return nr;
1658}
1659
1660static int coredump_wait(int exit_code, struct core_state *core_state)
1661{
1662 struct task_struct *tsk = current;
1663 struct mm_struct *mm = tsk->mm;
1664 struct completion *vfork_done;
1660 int core_waiters;
1665 int core_waiters = -EBUSY;
1661
1662 init_completion(&core_state->startup);
1663 core_state->dumper.task = tsk;
1664 core_state->dumper.next = NULL;
1666
1667 init_completion(&core_state->startup);
1668 core_state->dumper.task = tsk;
1669 core_state->dumper.next = NULL;
1665 core_waiters = zap_threads(tsk, mm, core_state, exit_code);
1670
1671 down_write(&mm->mmap_sem);
1672 if (!mm->core_state)
1673 core_waiters = zap_threads(tsk, mm, core_state, exit_code);
1666 up_write(&mm->mmap_sem);
1667
1668 if (unlikely(core_waiters < 0))
1669 goto fail;
1670
1671 /*
1672 * Make sure nobody is waiting for us to release the VM,
1673 * otherwise we can deadlock when we wait on each other

--- 103 unchanged lines hidden (view full) ---

1777
1778 pipe->readers--;
1779 pipe->writers++;
1780 pipe_unlock(pipe);
1781
1782}
1783
1784
1674 up_write(&mm->mmap_sem);
1675
1676 if (unlikely(core_waiters < 0))
1677 goto fail;
1678
1679 /*
1680 * Make sure nobody is waiting for us to release the VM,
1681 * otherwise we can deadlock when we wait on each other

--- 103 unchanged lines hidden (view full) ---

1785
1786 pipe->readers--;
1787 pipe->writers++;
1788 pipe_unlock(pipe);
1789
1790}
1791
1792
1793/*
1794 * uhm_pipe_setup
1795 * helper function to customize the process used
1796 * to collect the core in userspace. Specifically
1797 * it sets up a pipe and installs it as fd 0 (stdin)
1798 * for the process. Returns 0 on success, or
1799 * PTR_ERR on failure.
1800 * Note that it also sets the core limit to 1. This
1801 * is a special value that we use to trap recursive
1802 * core dumps
1803 */
1804static int umh_pipe_setup(struct subprocess_info *info)
1805{
1806 struct file *rp, *wp;
1807 struct fdtable *fdt;
1808 struct coredump_params *cp = (struct coredump_params *)info->data;
1809 struct files_struct *cf = current->files;
1810
1811 wp = create_write_pipe(0);
1812 if (IS_ERR(wp))
1813 return PTR_ERR(wp);
1814
1815 rp = create_read_pipe(wp, 0);
1816 if (IS_ERR(rp)) {
1817 free_write_pipe(wp);
1818 return PTR_ERR(rp);
1819 }
1820
1821 cp->file = wp;
1822
1823 sys_close(0);
1824 fd_install(0, rp);
1825 spin_lock(&cf->file_lock);
1826 fdt = files_fdtable(cf);
1827 FD_SET(0, fdt->open_fds);
1828 FD_CLR(0, fdt->close_on_exec);
1829 spin_unlock(&cf->file_lock);
1830
1831 /* and disallow core files too */
1832 current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
1833
1834 return 0;
1835}
1836
1785void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1786{
1787 struct core_state core_state;
1788 char corename[CORENAME_MAX_SIZE + 1];
1789 struct mm_struct *mm = current->mm;
1790 struct linux_binfmt * binfmt;
1837void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1838{
1839 struct core_state core_state;
1840 char corename[CORENAME_MAX_SIZE + 1];
1841 struct mm_struct *mm = current->mm;
1842 struct linux_binfmt * binfmt;
1791 struct inode * inode;
1792 const struct cred *old_cred;
1793 struct cred *cred;
1794 int retval = 0;
1795 int flag = 0;
1843 const struct cred *old_cred;
1844 struct cred *cred;
1845 int retval = 0;
1846 int flag = 0;
1796 int ispipe = 0;
1797 char **helper_argv = NULL;
1798 int helper_argc = 0;
1799 int dump_count = 0;
1847 int ispipe;
1800 static atomic_t core_dump_count = ATOMIC_INIT(0);
1801 struct coredump_params cprm = {
1802 .signr = signr,
1803 .regs = regs,
1804 .limit = rlimit(RLIMIT_CORE),
1805 /*
1806 * We must use the same mm->flags while dumping core to avoid
1807 * inconsistency of bit flags, since this flag is not protected
1808 * by any locks.
1809 */
1810 .mm_flags = mm->flags,
1811 };
1812
1813 audit_core_dumps(signr);
1814
1815 binfmt = mm->binfmt;
1816 if (!binfmt || !binfmt->core_dump)
1817 goto fail;
1848 static atomic_t core_dump_count = ATOMIC_INIT(0);
1849 struct coredump_params cprm = {
1850 .signr = signr,
1851 .regs = regs,
1852 .limit = rlimit(RLIMIT_CORE),
1853 /*
1854 * We must use the same mm->flags while dumping core to avoid
1855 * inconsistency of bit flags, since this flag is not protected
1856 * by any locks.
1857 */
1858 .mm_flags = mm->flags,
1859 };
1860
1861 audit_core_dumps(signr);
1862
1863 binfmt = mm->binfmt;
1864 if (!binfmt || !binfmt->core_dump)
1865 goto fail;
1866 if (!__get_dumpable(cprm.mm_flags))
1867 goto fail;
1818
1819 cred = prepare_creds();
1868
1869 cred = prepare_creds();
1820 if (!cred) {
1821 retval = -ENOMEM;
1870 if (!cred)
1822 goto fail;
1871 goto fail;
1823 }
1824
1825 down_write(&mm->mmap_sem);
1826 /*
1872 /*
1827 * If another thread got here first, or we are not dumpable, bail out.
1828 */
1829 if (mm->core_state || !__get_dumpable(cprm.mm_flags)) {
1830 up_write(&mm->mmap_sem);
1831 put_cred(cred);
1832 goto fail;
1833 }
1834
1835 /*
1836 * We cannot trust fsuid as being the "true" uid of the
1837 * process nor do we know its entire history. We only know it
1838 * was tainted so we dump it as root in mode 2.
1839 */
1840 if (__get_dumpable(cprm.mm_flags) == 2) {
1841 /* Setuid core dump mode */
1842 flag = O_EXCL; /* Stop rewrite attacks */
1843 cred->fsuid = 0; /* Dump root private */
1844 }
1845
1846 retval = coredump_wait(exit_code, &core_state);
1873 * We cannot trust fsuid as being the "true" uid of the
1874 * process nor do we know its entire history. We only know it
1875 * was tainted so we dump it as root in mode 2.
1876 */
1877 if (__get_dumpable(cprm.mm_flags) == 2) {
1878 /* Setuid core dump mode */
1879 flag = O_EXCL; /* Stop rewrite attacks */
1880 cred->fsuid = 0; /* Dump root private */
1881 }
1882
1883 retval = coredump_wait(exit_code, &core_state);
1847 if (retval < 0) {
1848 put_cred(cred);
1849 goto fail;
1850 }
1884 if (retval < 0)
1885 goto fail_creds;
1851
1852 old_cred = override_creds(cred);
1853
1854 /*
1855 * Clear any false indication of pending signals that might
1856 * be seen by the filesystem code called to write the core file.
1857 */
1858 clear_thread_flag(TIF_SIGPENDING);
1859
1860 /*
1861 * lock_kernel() because format_corename() is controlled by sysctl, which
1862 * uses lock_kernel()
1863 */
1864 lock_kernel();
1865 ispipe = format_corename(corename, signr);
1866 unlock_kernel();
1867
1886
1887 old_cred = override_creds(cred);
1888
1889 /*
1890 * Clear any false indication of pending signals that might
1891 * be seen by the filesystem code called to write the core file.
1892 */
1893 clear_thread_flag(TIF_SIGPENDING);
1894
1895 /*
1896 * lock_kernel() because format_corename() is controlled by sysctl, which
1897 * uses lock_kernel()
1898 */
1899 lock_kernel();
1900 ispipe = format_corename(corename, signr);
1901 unlock_kernel();
1902
1868 if ((!ispipe) && (cprm.limit < binfmt->min_coredump))
1869 goto fail_unlock;
1870
1871 if (ispipe) {
1903 if (ispipe) {
1872 if (cprm.limit == 0) {
1904 int dump_count;
1905 char **helper_argv;
1906
1907 if (cprm.limit == 1) {
1873 /*
1874 * Normally core limits are irrelevant to pipes, since
1875 * we're not writing to the file system, but we use
1908 /*
1909 * Normally core limits are irrelevant to pipes, since
1910 * we're not writing to the file system, but we use
1876 * cprm.limit of 0 here as a speacial value. Any
1877 * non-zero limit gets set to RLIM_INFINITY below, but
1911 * cprm.limit of 1 here as a speacial value. Any
1912 * non-1 limit gets set to RLIM_INFINITY below, but
1878 * a limit of 0 skips the dump. This is a consistent
1879 * way to catch recursive crashes. We can still crash
1913 * a limit of 0 skips the dump. This is a consistent
1914 * way to catch recursive crashes. We can still crash
1880 * if the core_pattern binary sets RLIM_CORE = !0
1915 * if the core_pattern binary sets RLIM_CORE = !1
1881 * but it runs as root, and can do lots of stupid things
1882 * Note that we use task_tgid_vnr here to grab the pid
1883 * of the process group leader. That way we get the
1884 * right pid if a thread in a multi-threaded
1885 * core_pattern process dies.
1886 */
1887 printk(KERN_WARNING
1916 * but it runs as root, and can do lots of stupid things
1917 * Note that we use task_tgid_vnr here to grab the pid
1918 * of the process group leader. That way we get the
1919 * right pid if a thread in a multi-threaded
1920 * core_pattern process dies.
1921 */
1922 printk(KERN_WARNING
1888 "Process %d(%s) has RLIMIT_CORE set to 0\n",
1923 "Process %d(%s) has RLIMIT_CORE set to 1\n",
1889 task_tgid_vnr(current), current->comm);
1890 printk(KERN_WARNING "Aborting core\n");
1891 goto fail_unlock;
1892 }
1924 task_tgid_vnr(current), current->comm);
1925 printk(KERN_WARNING "Aborting core\n");
1926 goto fail_unlock;
1927 }
1928 cprm.limit = RLIM_INFINITY;
1893
1894 dump_count = atomic_inc_return(&core_dump_count);
1895 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
1896 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
1897 task_tgid_vnr(current), current->comm);
1898 printk(KERN_WARNING "Skipping core dump\n");
1899 goto fail_dropcount;
1900 }
1901
1929
1930 dump_count = atomic_inc_return(&core_dump_count);
1931 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
1932 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
1933 task_tgid_vnr(current), current->comm);
1934 printk(KERN_WARNING "Skipping core dump\n");
1935 goto fail_dropcount;
1936 }
1937
1902 helper_argv = argv_split(GFP_KERNEL, corename+1, &helper_argc);
1938 helper_argv = argv_split(GFP_KERNEL, corename+1, NULL);
1903 if (!helper_argv) {
1904 printk(KERN_WARNING "%s failed to allocate memory\n",
1905 __func__);
1906 goto fail_dropcount;
1907 }
1908
1939 if (!helper_argv) {
1940 printk(KERN_WARNING "%s failed to allocate memory\n",
1941 __func__);
1942 goto fail_dropcount;
1943 }
1944
1909 cprm.limit = RLIM_INFINITY;
1910
1911 /* SIGPIPE can happen, but it's just never processed */
1912 if (call_usermodehelper_pipe(helper_argv[0], helper_argv, NULL,
1913 &cprm.file)) {
1945 retval = call_usermodehelper_fns(helper_argv[0], helper_argv,
1946 NULL, UMH_WAIT_EXEC, umh_pipe_setup,
1947 NULL, &cprm);
1948 argv_free(helper_argv);
1949 if (retval) {
1914 printk(KERN_INFO "Core dump to %s pipe failed\n",
1915 corename);
1950 printk(KERN_INFO "Core dump to %s pipe failed\n",
1951 corename);
1916 goto fail_dropcount;
1952 goto close_fail;
1917 }
1953 }
1918 } else
1954 } else {
1955 struct inode *inode;
1956
1957 if (cprm.limit < binfmt->min_coredump)
1958 goto fail_unlock;
1959
1919 cprm.file = filp_open(corename,
1920 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
1921 0600);
1960 cprm.file = filp_open(corename,
1961 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
1962 0600);
1922 if (IS_ERR(cprm.file))
1923 goto fail_dropcount;
1924 inode = cprm.file->f_path.dentry->d_inode;
1925 if (inode->i_nlink > 1)
1926 goto close_fail; /* multiple links - don't dump */
1927 if (!ispipe && d_unhashed(cprm.file->f_path.dentry))
1928 goto close_fail;
1963 if (IS_ERR(cprm.file))
1964 goto fail_unlock;
1929
1965
1930 /* AK: actually i see no reason to not allow this for named pipes etc.,
1931 but keep the previous behaviour for now. */
1932 if (!ispipe && !S_ISREG(inode->i_mode))
1933 goto close_fail;
1934 /*
1935 * Dont allow local users get cute and trick others to coredump
1936 * into their pre-created files:
1937 * Note, this is not relevant for pipes
1938 */
1939 if (!ispipe && (inode->i_uid != current_fsuid()))
1940 goto close_fail;
1941 if (!cprm.file->f_op)
1942 goto close_fail;
1943 if (!cprm.file->f_op->write)
1944 goto close_fail;
1945 if (!ispipe &&
1946 do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file) != 0)
1947 goto close_fail;
1966 inode = cprm.file->f_path.dentry->d_inode;
1967 if (inode->i_nlink > 1)
1968 goto close_fail;
1969 if (d_unhashed(cprm.file->f_path.dentry))
1970 goto close_fail;
1971 /*
1972 * AK: actually i see no reason to not allow this for named
1973 * pipes etc, but keep the previous behaviour for now.
1974 */
1975 if (!S_ISREG(inode->i_mode))
1976 goto close_fail;
1977 /*
1978 * Dont allow local users get cute and trick others to coredump
1979 * into their pre-created files.
1980 */
1981 if (inode->i_uid != current_fsuid())
1982 goto close_fail;
1983 if (!cprm.file->f_op || !cprm.file->f_op->write)
1984 goto close_fail;
1985 if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
1986 goto close_fail;
1987 }
1948
1949 retval = binfmt->core_dump(&cprm);
1988
1989 retval = binfmt->core_dump(&cprm);
1950
1951 if (retval)
1952 current->signal->group_exit_code |= 0x80;
1990 if (retval)
1991 current->signal->group_exit_code |= 0x80;
1953close_fail:
1992
1954 if (ispipe && core_pipe_limit)
1955 wait_for_dump_helpers(cprm.file);
1993 if (ispipe && core_pipe_limit)
1994 wait_for_dump_helpers(cprm.file);
1956 filp_close(cprm.file, NULL);
1995close_fail:
1996 if (cprm.file)
1997 filp_close(cprm.file, NULL);
1957fail_dropcount:
1998fail_dropcount:
1958 if (dump_count)
1999 if (ispipe)
1959 atomic_dec(&core_dump_count);
1960fail_unlock:
2000 atomic_dec(&core_dump_count);
2001fail_unlock:
1961 if (helper_argv)
1962 argv_free(helper_argv);
1963
2002 coredump_finish(mm);
1964 revert_creds(old_cred);
2003 revert_creds(old_cred);
2004fail_creds:
1965 put_cred(cred);
2005 put_cred(cred);
1966 coredump_finish(mm);
1967fail:
1968 return;
1969}
2006fail:
2007 return;
2008}