fork.c (a1f817dc8ed79cdff2b76bc51d545fb9465982be) fork.c (b18b6a9cef7f30e9a8b7738d5fc8d568cf660855)
1/*
2 * linux/kernel/fork.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * 'fork.c' contains the help-routines for the 'fork' system call

--- 65 unchanged lines hidden (view full) ---

74#include <linux/uprobes.h>
75#include <linux/aio.h>
76#include <linux/compiler.h>
77#include <linux/sysctl.h>
78#include <linux/kcov.h>
79
80#include <asm/pgtable.h>
81#include <asm/pgalloc.h>
1/*
2 * linux/kernel/fork.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * 'fork.c' contains the help-routines for the 'fork' system call

--- 65 unchanged lines hidden (view full) ---

74#include <linux/uprobes.h>
75#include <linux/aio.h>
76#include <linux/compiler.h>
77#include <linux/sysctl.h>
78#include <linux/kcov.h>
79
80#include <asm/pgtable.h>
81#include <asm/pgalloc.h>
82#include <asm/uaccess.h>
82#include <linux/uaccess.h>
83#include <asm/mmu_context.h>
84#include <asm/cacheflush.h>
85#include <asm/tlbflush.h>
86
87#include <trace/events/sched.h>
88
89#define CREATE_TRACE_POINTS
90#include <trace/events/task.h>

--- 133 unchanged lines hidden (view full) ---

224 continue;
225
226 this_cpu_write(cached_stacks[i], tsk->stack_vm_area);
227 local_irq_restore(flags);
228 return;
229 }
230 local_irq_restore(flags);
231
83#include <asm/mmu_context.h>
84#include <asm/cacheflush.h>
85#include <asm/tlbflush.h>
86
87#include <trace/events/sched.h>
88
89#define CREATE_TRACE_POINTS
90#include <trace/events/task.h>

--- 133 unchanged lines hidden (view full) ---

224 continue;
225
226 this_cpu_write(cached_stacks[i], tsk->stack_vm_area);
227 local_irq_restore(flags);
228 return;
229 }
230 local_irq_restore(flags);
231
232 vfree(tsk->stack);
232 vfree_atomic(tsk->stack);
233 return;
234 }
235#endif
236
237 __free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER);
238}
239# else
240static struct kmem_cache *thread_stack_cache;

--- 108 unchanged lines hidden (view full) ---

349 * by now.
350 */
351 WARN_ON_ONCE(atomic_read(&tsk->stack_refcount) != 0);
352#endif
353 rt_mutex_debug_task_free(tsk);
354 ftrace_graph_exit_task(tsk);
355 put_seccomp_filter(tsk);
356 arch_release_task_struct(tsk);
233 return;
234 }
235#endif
236
237 __free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER);
238}
239# else
240static struct kmem_cache *thread_stack_cache;

--- 108 unchanged lines hidden (view full) ---

349 * by now.
350 */
351 WARN_ON_ONCE(atomic_read(&tsk->stack_refcount) != 0);
352#endif
353 rt_mutex_debug_task_free(tsk);
354 ftrace_graph_exit_task(tsk);
355 put_seccomp_filter(tsk);
356 arch_release_task_struct(tsk);
357 if (tsk->flags & PF_KTHREAD)
358 free_kthread_struct(tsk);
357 free_task_struct(tsk);
358}
359EXPORT_SYMBOL(free_task);
360
361static inline void free_signal_struct(struct signal_struct *sig)
362{
363 taskstats_tgid_free(sig);
364 sched_autogroup_exit(sig);

--- 375 unchanged lines hidden (view full) ---

740
741static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
742{
743#ifdef CONFIG_MEMCG
744 mm->owner = p;
745#endif
746}
747
359 free_task_struct(tsk);
360}
361EXPORT_SYMBOL(free_task);
362
363static inline void free_signal_struct(struct signal_struct *sig)
364{
365 taskstats_tgid_free(sig);
366 sched_autogroup_exit(sig);

--- 375 unchanged lines hidden (view full) ---

742
743static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
744{
745#ifdef CONFIG_MEMCG
746 mm->owner = p;
747#endif
748}
749
748static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
750static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
751 struct user_namespace *user_ns)
749{
750 mm->mmap = NULL;
751 mm->mm_rb = RB_ROOT;
752 mm->vmacache_seqnum = 0;
753 atomic_set(&mm->mm_users, 1);
754 atomic_set(&mm->mm_count, 1);
755 init_rwsem(&mm->mmap_sem);
756 INIT_LIST_HEAD(&mm->mmlist);

--- 23 unchanged lines hidden (view full) ---

780 }
781
782 if (mm_alloc_pgd(mm))
783 goto fail_nopgd;
784
785 if (init_new_context(p, mm))
786 goto fail_nocontext;
787
752{
753 mm->mmap = NULL;
754 mm->mm_rb = RB_ROOT;
755 mm->vmacache_seqnum = 0;
756 atomic_set(&mm->mm_users, 1);
757 atomic_set(&mm->mm_count, 1);
758 init_rwsem(&mm->mmap_sem);
759 INIT_LIST_HEAD(&mm->mmlist);

--- 23 unchanged lines hidden (view full) ---

783 }
784
785 if (mm_alloc_pgd(mm))
786 goto fail_nopgd;
787
788 if (init_new_context(p, mm))
789 goto fail_nocontext;
790
791 mm->user_ns = get_user_ns(user_ns);
788 return mm;
789
790fail_nocontext:
791 mm_free_pgd(mm);
792fail_nopgd:
793 free_mm(mm);
794 return NULL;
795}

--- 29 unchanged lines hidden (view full) ---

825{
826 struct mm_struct *mm;
827
828 mm = allocate_mm();
829 if (!mm)
830 return NULL;
831
832 memset(mm, 0, sizeof(*mm));
792 return mm;
793
794fail_nocontext:
795 mm_free_pgd(mm);
796fail_nopgd:
797 free_mm(mm);
798 return NULL;
799}

--- 29 unchanged lines hidden (view full) ---

829{
830 struct mm_struct *mm;
831
832 mm = allocate_mm();
833 if (!mm)
834 return NULL;
835
836 memset(mm, 0, sizeof(*mm));
833 return mm_init(mm, current);
837 return mm_init(mm, current, current_user_ns());
834}
835
836/*
837 * Called when the last reference to the mm
838 * is dropped: either by a lazy thread or by
839 * mmput. Free the page directory and the mm.
840 */
841void __mmdrop(struct mm_struct *mm)
842{
843 BUG_ON(mm == &init_mm);
844 mm_free_pgd(mm);
845 destroy_context(mm);
846 mmu_notifier_mm_destroy(mm);
847 check_mm(mm);
838}
839
840/*
841 * Called when the last reference to the mm
842 * is dropped: either by a lazy thread or by
843 * mmput. Free the page directory and the mm.
844 */
845void __mmdrop(struct mm_struct *mm)
846{
847 BUG_ON(mm == &init_mm);
848 mm_free_pgd(mm);
849 destroy_context(mm);
850 mmu_notifier_mm_destroy(mm);
851 check_mm(mm);
852 put_user_ns(mm->user_ns);
848 free_mm(mm);
849}
850EXPORT_SYMBOL_GPL(__mmdrop);
851
852static inline void __mmput(struct mm_struct *mm)
853{
854 VM_BUG_ON(atomic_read(&mm->mm_users));
855

--- 265 unchanged lines hidden (view full) ---

1121 int err;
1122
1123 mm = allocate_mm();
1124 if (!mm)
1125 goto fail_nomem;
1126
1127 memcpy(mm, oldmm, sizeof(*mm));
1128
853 free_mm(mm);
854}
855EXPORT_SYMBOL_GPL(__mmdrop);
856
857static inline void __mmput(struct mm_struct *mm)
858{
859 VM_BUG_ON(atomic_read(&mm->mm_users));
860

--- 265 unchanged lines hidden (view full) ---

1126 int err;
1127
1128 mm = allocate_mm();
1129 if (!mm)
1130 goto fail_nomem;
1131
1132 memcpy(mm, oldmm, sizeof(*mm));
1133
1129 if (!mm_init(mm, tsk))
1134 if (!mm_init(mm, tsk, mm->user_ns))
1130 goto fail_nomem;
1131
1132 err = dup_mmap(mm, oldmm);
1133 if (err)
1134 goto free_pt;
1135
1136 mm->hiwater_rss = get_mm_rss(mm);
1137 mm->hiwater_vm = mm->total_vm;

--- 156 unchanged lines hidden (view full) ---

1294 /*
1295 * sighand_cachep is SLAB_DESTROY_BY_RCU so we can free it
1296 * without an RCU grace period, see __lock_task_sighand().
1297 */
1298 kmem_cache_free(sighand_cachep, sighand);
1299 }
1300}
1301
1135 goto fail_nomem;
1136
1137 err = dup_mmap(mm, oldmm);
1138 if (err)
1139 goto free_pt;
1140
1141 mm->hiwater_rss = get_mm_rss(mm);
1142 mm->hiwater_vm = mm->total_vm;

--- 156 unchanged lines hidden (view full) ---

1299 /*
1300 * sighand_cachep is SLAB_DESTROY_BY_RCU so we can free it
1301 * without an RCU grace period, see __lock_task_sighand().
1302 */
1303 kmem_cache_free(sighand_cachep, sighand);
1304 }
1305}
1306
1307#ifdef CONFIG_POSIX_TIMERS
1302/*
1303 * Initialize POSIX timer handling for a thread group.
1304 */
1305static void posix_cpu_timers_init_group(struct signal_struct *sig)
1306{
1307 unsigned long cpu_limit;
1308
1309 cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1310 if (cpu_limit != RLIM_INFINITY) {
1311 sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
1312 sig->cputimer.running = true;
1313 }
1314
1315 /* The timer lists. */
1316 INIT_LIST_HEAD(&sig->cpu_timers[0]);
1317 INIT_LIST_HEAD(&sig->cpu_timers[1]);
1318 INIT_LIST_HEAD(&sig->cpu_timers[2]);
1319}
1308/*
1309 * Initialize POSIX timer handling for a thread group.
1310 */
1311static void posix_cpu_timers_init_group(struct signal_struct *sig)
1312{
1313 unsigned long cpu_limit;
1314
1315 cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1316 if (cpu_limit != RLIM_INFINITY) {
1317 sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
1318 sig->cputimer.running = true;
1319 }
1320
1321 /* The timer lists. */
1322 INIT_LIST_HEAD(&sig->cpu_timers[0]);
1323 INIT_LIST_HEAD(&sig->cpu_timers[1]);
1324 INIT_LIST_HEAD(&sig->cpu_timers[2]);
1325}
1326#else
1327static inline void posix_cpu_timers_init_group(struct signal_struct *sig) { }
1328#endif
1320
1321static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1322{
1323 struct signal_struct *sig;
1324
1325 if (clone_flags & CLONE_THREAD)
1326 return 0;
1327

--- 8 unchanged lines hidden (view full) ---

1336
1337 /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
1338 sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
1339 tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
1340
1341 init_waitqueue_head(&sig->wait_chldexit);
1342 sig->curr_target = tsk;
1343 init_sigpending(&sig->shared_pending);
1329
1330static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1331{
1332 struct signal_struct *sig;
1333
1334 if (clone_flags & CLONE_THREAD)
1335 return 0;
1336

--- 8 unchanged lines hidden (view full) ---

1345
1346 /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
1347 sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
1348 tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
1349
1350 init_waitqueue_head(&sig->wait_chldexit);
1351 sig->curr_target = tsk;
1352 init_sigpending(&sig->shared_pending);
1344 INIT_LIST_HEAD(&sig->posix_timers);
1345 seqlock_init(&sig->stats_lock);
1346 prev_cputime_init(&sig->prev_cputime);
1347
1353 seqlock_init(&sig->stats_lock);
1354 prev_cputime_init(&sig->prev_cputime);
1355
1356#ifdef CONFIG_POSIX_TIMERS
1357 INIT_LIST_HEAD(&sig->posix_timers);
1348 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1349 sig->real_timer.function = it_real_fn;
1358 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1359 sig->real_timer.function = it_real_fn;
1360#endif
1350
1351 task_lock(current->group_leader);
1352 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
1353 task_unlock(current->group_leader);
1354
1355 posix_cpu_timers_init_group(sig);
1356
1357 tty_audit_fork(sig);

--- 55 unchanged lines hidden (view full) ---

1413 raw_spin_lock_init(&p->pi_lock);
1414#ifdef CONFIG_RT_MUTEXES
1415 p->pi_waiters = RB_ROOT;
1416 p->pi_waiters_leftmost = NULL;
1417 p->pi_blocked_on = NULL;
1418#endif
1419}
1420
1361
1362 task_lock(current->group_leader);
1363 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
1364 task_unlock(current->group_leader);
1365
1366 posix_cpu_timers_init_group(sig);
1367
1368 tty_audit_fork(sig);

--- 55 unchanged lines hidden (view full) ---

1424 raw_spin_lock_init(&p->pi_lock);
1425#ifdef CONFIG_RT_MUTEXES
1426 p->pi_waiters = RB_ROOT;
1427 p->pi_waiters_leftmost = NULL;
1428 p->pi_blocked_on = NULL;
1429#endif
1430}
1431
1432#ifdef CONFIG_POSIX_TIMERS
1421/*
1422 * Initialize POSIX timer handling for a single task.
1423 */
1424static void posix_cpu_timers_init(struct task_struct *tsk)
1425{
1426 tsk->cputime_expires.prof_exp = 0;
1427 tsk->cputime_expires.virt_exp = 0;
1428 tsk->cputime_expires.sched_exp = 0;
1429 INIT_LIST_HEAD(&tsk->cpu_timers[0]);
1430 INIT_LIST_HEAD(&tsk->cpu_timers[1]);
1431 INIT_LIST_HEAD(&tsk->cpu_timers[2]);
1432}
1433/*
1434 * Initialize POSIX timer handling for a single task.
1435 */
1436static void posix_cpu_timers_init(struct task_struct *tsk)
1437{
1438 tsk->cputime_expires.prof_exp = 0;
1439 tsk->cputime_expires.virt_exp = 0;
1440 tsk->cputime_expires.sched_exp = 0;
1441 INIT_LIST_HEAD(&tsk->cpu_timers[0]);
1442 INIT_LIST_HEAD(&tsk->cpu_timers[1]);
1443 INIT_LIST_HEAD(&tsk->cpu_timers[2]);
1444}
1445#else
1446static inline void posix_cpu_timers_init(struct task_struct *tsk) { }
1447#endif
1433
1434static inline void
1435init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
1436{
1437 task->pids[type].pid = pid;
1438}
1439
1440/*

--- 94 unchanged lines hidden (view full) ---

1535 * triggers too late. This doesn't hurt, the check is only there
1536 * to stop root fork bombs.
1537 */
1538 retval = -EAGAIN;
1539 if (nr_threads >= max_threads)
1540 goto bad_fork_cleanup_count;
1541
1542 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
1448
1449static inline void
1450init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
1451{
1452 task->pids[type].pid = pid;
1453}
1454
1455/*

--- 94 unchanged lines hidden (view full) ---

1550 * triggers too late. This doesn't hurt, the check is only there
1551 * to stop root fork bombs.
1552 */
1553 retval = -EAGAIN;
1554 if (nr_threads >= max_threads)
1555 goto bad_fork_cleanup_count;
1556
1557 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
1543 p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
1558 p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE);
1544 p->flags |= PF_FORKNOEXEC;
1545 INIT_LIST_HEAD(&p->children);
1546 INIT_LIST_HEAD(&p->sibling);
1547 rcu_copy_process(p);
1548 p->vfork_done = NULL;
1549 spin_lock_init(&p->alloc_lock);
1550
1551 init_sigpending(&p->pending);
1552
1553 p->utime = p->stime = p->gtime = 0;
1559 p->flags |= PF_FORKNOEXEC;
1560 INIT_LIST_HEAD(&p->children);
1561 INIT_LIST_HEAD(&p->sibling);
1562 rcu_copy_process(p);
1563 p->vfork_done = NULL;
1564 spin_lock_init(&p->alloc_lock);
1565
1566 init_sigpending(&p->pending);
1567
1568 p->utime = p->stime = p->gtime = 0;
1569#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1554 p->utimescaled = p->stimescaled = 0;
1570 p->utimescaled = p->stimescaled = 0;
1571#endif
1555 prev_cputime_init(&p->prev_cputime);
1556
1557#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1558 seqcount_init(&p->vtime_seqcount);
1559 p->vtime_snap = 0;
1560 p->vtime_snap_whence = VTIME_INACTIVE;
1561#endif
1562

--- 769 unchanged lines hidden ---
1572 prev_cputime_init(&p->prev_cputime);
1573
1574#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1575 seqcount_init(&p->vtime_seqcount);
1576 p->vtime_snap = 0;
1577 p->vtime_snap_whence = VTIME_INACTIVE;
1578#endif
1579

--- 769 unchanged lines hidden ---