1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * linux/kernel/fork.c
41da177e4SLinus Torvalds *
51da177e4SLinus Torvalds * Copyright (C) 1991, 1992 Linus Torvalds
61da177e4SLinus Torvalds */
71da177e4SLinus Torvalds
81da177e4SLinus Torvalds /*
91da177e4SLinus Torvalds * 'fork.c' contains the help-routines for the 'fork' system call
101da177e4SLinus Torvalds * (see also entry.S and others).
111da177e4SLinus Torvalds * Fork is rather simple, once you get the hang of it, but the memory
121da177e4SLinus Torvalds * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
131da177e4SLinus Torvalds */
141da177e4SLinus Torvalds
15b3e58382SChristian Brauner #include <linux/anon_inodes.h>
161da177e4SLinus Torvalds #include <linux/slab.h>
174eb5aaa3SIngo Molnar #include <linux/sched/autogroup.h>
186e84f315SIngo Molnar #include <linux/sched/mm.h>
19f7ccbae4SIngo Molnar #include <linux/sched/coredump.h>
208703e8a4SIngo Molnar #include <linux/sched/user.h>
216a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
2203441a34SIngo Molnar #include <linux/sched/stat.h>
2329930025SIngo Molnar #include <linux/sched/task.h>
2468db0cf1SIngo Molnar #include <linux/sched/task_stack.h>
2532ef5517SIngo Molnar #include <linux/sched/cputime.h>
26b3e58382SChristian Brauner #include <linux/seq_file.h>
27037741a6SIngo Molnar #include <linux/rtmutex.h>
281da177e4SLinus Torvalds #include <linux/init.h>
291da177e4SLinus Torvalds #include <linux/unistd.h>
301da177e4SLinus Torvalds #include <linux/module.h>
311da177e4SLinus Torvalds #include <linux/vmalloc.h>
321da177e4SLinus Torvalds #include <linux/completion.h>
331da177e4SLinus Torvalds #include <linux/personality.h>
341da177e4SLinus Torvalds #include <linux/mempolicy.h>
351da177e4SLinus Torvalds #include <linux/sem.h>
361da177e4SLinus Torvalds #include <linux/file.h>
379f3acc31SAl Viro #include <linux/fdtable.h>
38da9cbc87SJens Axboe #include <linux/iocontext.h>
391da177e4SLinus Torvalds #include <linux/key.h>
4050b5e49cSAlexander Potapenko #include <linux/kmsan.h>
411da177e4SLinus Torvalds #include <linux/binfmts.h>
421da177e4SLinus Torvalds #include <linux/mman.h>
43cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h>
441da177e4SLinus Torvalds #include <linux/fs.h>
45615d6e87SDavidlohr Bueso #include <linux/mm.h>
4617fca131SArnd Bergmann #include <linux/mm_inline.h>
47ab516013SSerge E. Hallyn #include <linux/nsproxy.h>
48c59ede7bSRandy.Dunlap #include <linux/capability.h>
491da177e4SLinus Torvalds #include <linux/cpu.h>
50b4f48b63SPaul Menage #include <linux/cgroup.h>
511da177e4SLinus Torvalds #include <linux/security.h>
52a1e78772SMel Gorman #include <linux/hugetlb.h>
53e2cfabdfSWill Drewry #include <linux/seccomp.h>
541da177e4SLinus Torvalds #include <linux/swap.h>
551da177e4SLinus Torvalds #include <linux/syscalls.h>
561da177e4SLinus Torvalds #include <linux/jiffies.h>
571da177e4SLinus Torvalds #include <linux/futex.h>
588141c7f3SLinus Torvalds #include <linux/compat.h>
59207205a2SEric Dumazet #include <linux/kthread.h>
607c3ab738SAndrew Morton #include <linux/task_io_accounting_ops.h>
61ab2af1f5SDipankar Sarma #include <linux/rcupdate.h>
621da177e4SLinus Torvalds #include <linux/ptrace.h>
631da177e4SLinus Torvalds #include <linux/mount.h>
641da177e4SLinus Torvalds #include <linux/audit.h>
6578fb7466SPavel Emelianov #include <linux/memcontrol.h>
66f201ae23SFrederic Weisbecker #include <linux/ftrace.h>
675e2bf014SMike Galbraith #include <linux/proc_fs.h>
681da177e4SLinus Torvalds #include <linux/profile.h>
691da177e4SLinus Torvalds #include <linux/rmap.h>
70f8af4da3SHugh Dickins #include <linux/ksm.h>
711da177e4SLinus Torvalds #include <linux/acct.h>
72893e26e6SPavel Emelyanov #include <linux/userfaultfd_k.h>
738f0ab514SJay Lan #include <linux/tsacct_kern.h>
749f46080cSMatt Helsley #include <linux/cn_proc.h>
75ba96a0c8SRafael J. Wysocki #include <linux/freezer.h>
76ca74e92bSShailabh Nagar #include <linux/delayacct.h>
77ad4ecbcbSShailabh Nagar #include <linux/taskstats_kern.h>
78522ed776SMiloslav Trmac #include <linux/tty.h>
795ad4e53bSAl Viro #include <linux/fs_struct.h>
807c9f8861SEric Sandeen #include <linux/magic.h>
81cdd6c482SIngo Molnar #include <linux/perf_event.h>
8242c4ab41SStanislaw Gruszka #include <linux/posix-timers.h>
838e7cac79SAvi Kivity #include <linux/user-return-notifier.h>
843d5992d2SYing Han #include <linux/oom.h>
85ba76149fSAndrea Arcangeli #include <linux/khugepaged.h>
86d80e731eSOleg Nesterov #include <linux/signalfd.h>
870326f5a9SSrikar Dronamraju #include <linux/uprobes.h>
88a27bb332SKent Overstreet #include <linux/aio.h>
8952f5684cSGideon Israel Dsouza #include <linux/compiler.h>
9016db3d3fSHeinrich Schuchardt #include <linux/sysctl.h>
915c9a8750SDmitry Vyukov #include <linux/kcov.h>
92d83a7cb3SJosh Poimboeuf #include <linux/livepatch.h>
9348ac3c18SMark Rutland #include <linux/thread_info.h>
94afaef01cSAlexander Popov #include <linux/stackleak.h>
95eafb149eSDaniel Axtens #include <linux/kasan.h>
96d08b9f0cSSami Tolvanen #include <linux/scs.h>
970f212204SJens Axboe #include <linux/io_uring.h>
98a10787e6SSong Liu #include <linux/bpf.h>
99b3883a9aSJason A. Donenfeld #include <linux/stackprotector.h>
100fd593511SBeau Belgrave #include <linux/user_events.h>
101cd389115SJacob Pan #include <linux/iommu.h>
102*203003c4SBenjamin Segall #include <linux/tick.h>
1031da177e4SLinus Torvalds
1041da177e4SLinus Torvalds #include <asm/pgalloc.h>
1057c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
1061da177e4SLinus Torvalds #include <asm/mmu_context.h>
1071da177e4SLinus Torvalds #include <asm/cacheflush.h>
1081da177e4SLinus Torvalds #include <asm/tlbflush.h>
1091da177e4SLinus Torvalds
110ad8d75ffSSteven Rostedt #include <trace/events/sched.h>
111ad8d75ffSSteven Rostedt
11243d2b113SKAMEZAWA Hiroyuki #define CREATE_TRACE_POINTS
11343d2b113SKAMEZAWA Hiroyuki #include <trace/events/task.h>
11443d2b113SKAMEZAWA Hiroyuki
1151da177e4SLinus Torvalds /*
116ac1b398dSHeinrich Schuchardt * Minimum number of threads to boot the kernel
117ac1b398dSHeinrich Schuchardt */
118ac1b398dSHeinrich Schuchardt #define MIN_THREADS 20
119ac1b398dSHeinrich Schuchardt
120ac1b398dSHeinrich Schuchardt /*
121ac1b398dSHeinrich Schuchardt * Maximum number of threads
122ac1b398dSHeinrich Schuchardt */
123ac1b398dSHeinrich Schuchardt #define MAX_THREADS FUTEX_TID_MASK
124ac1b398dSHeinrich Schuchardt
125ac1b398dSHeinrich Schuchardt /*
1261da177e4SLinus Torvalds * Protected counters by write_lock_irq(&tasklist_lock)
1271da177e4SLinus Torvalds */
1281da177e4SLinus Torvalds unsigned long total_forks; /* Handle normal Linux uptimes. */
1291da177e4SLinus Torvalds int nr_threads; /* The idle threads do not count.. */
1301da177e4SLinus Torvalds
1318856ae4dSKefeng Wang static int max_threads; /* tunable limit on nr_threads */
1321da177e4SLinus Torvalds
1338495f7e6SSai Praneeth Prakhya #define NAMED_ARRAY_INDEX(x) [x] = __stringify(x)
1348495f7e6SSai Praneeth Prakhya
1358495f7e6SSai Praneeth Prakhya static const char * const resident_page_types[] = {
1368495f7e6SSai Praneeth Prakhya NAMED_ARRAY_INDEX(MM_FILEPAGES),
1378495f7e6SSai Praneeth Prakhya NAMED_ARRAY_INDEX(MM_ANONPAGES),
1388495f7e6SSai Praneeth Prakhya NAMED_ARRAY_INDEX(MM_SWAPENTS),
1398495f7e6SSai Praneeth Prakhya NAMED_ARRAY_INDEX(MM_SHMEMPAGES),
1408495f7e6SSai Praneeth Prakhya };
1418495f7e6SSai Praneeth Prakhya
1421da177e4SLinus Torvalds DEFINE_PER_CPU(unsigned long, process_counts) = 0;
1431da177e4SLinus Torvalds
1441da177e4SLinus Torvalds __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
145db1466b3SPaul E. McKenney
146db1466b3SPaul E. McKenney #ifdef CONFIG_PROVE_RCU
lockdep_tasklist_lock_is_held(void)147db1466b3SPaul E. McKenney int lockdep_tasklist_lock_is_held(void)
148db1466b3SPaul E. McKenney {
149db1466b3SPaul E. McKenney return lockdep_is_held(&tasklist_lock);
150db1466b3SPaul E. McKenney }
151db1466b3SPaul E. McKenney EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
152db1466b3SPaul E. McKenney #endif /* #ifdef CONFIG_PROVE_RCU */
1531da177e4SLinus Torvalds
nr_processes(void)1541da177e4SLinus Torvalds int nr_processes(void)
1551da177e4SLinus Torvalds {
1561da177e4SLinus Torvalds int cpu;
1571da177e4SLinus Torvalds int total = 0;
1581da177e4SLinus Torvalds
1591d510750SIan Campbell for_each_possible_cpu(cpu)
1601da177e4SLinus Torvalds total += per_cpu(process_counts, cpu);
1611da177e4SLinus Torvalds
1621da177e4SLinus Torvalds return total;
1631da177e4SLinus Torvalds }
1641da177e4SLinus Torvalds
arch_release_task_struct(struct task_struct * tsk)165f19b9f74SAkinobu Mita void __weak arch_release_task_struct(struct task_struct *tsk)
166f19b9f74SAkinobu Mita {
167f19b9f74SAkinobu Mita }
168f19b9f74SAkinobu Mita
169f5e10287SThomas Gleixner #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
170e18b890bSChristoph Lameter static struct kmem_cache *task_struct_cachep;
17141101809SThomas Gleixner
alloc_task_struct_node(int node)17241101809SThomas Gleixner static inline struct task_struct *alloc_task_struct_node(int node)
17341101809SThomas Gleixner {
17441101809SThomas Gleixner return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
17541101809SThomas Gleixner }
17641101809SThomas Gleixner
free_task_struct(struct task_struct * tsk)17741101809SThomas Gleixner static inline void free_task_struct(struct task_struct *tsk)
17841101809SThomas Gleixner {
17941101809SThomas Gleixner kmem_cache_free(task_struct_cachep, tsk);
18041101809SThomas Gleixner }
1811da177e4SLinus Torvalds #endif
1821da177e4SLinus Torvalds
183b235beeaSLinus Torvalds #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
18441101809SThomas Gleixner
1850d15d74aSThomas Gleixner /*
1860d15d74aSThomas Gleixner * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
1870d15d74aSThomas Gleixner * kmemcache based allocator.
1880d15d74aSThomas Gleixner */
189ba14a194SAndy Lutomirski # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
190ac496bf4SAndy Lutomirski
191ac496bf4SAndy Lutomirski # ifdef CONFIG_VMAP_STACK
192ac496bf4SAndy Lutomirski /*
193ac496bf4SAndy Lutomirski * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
194ac496bf4SAndy Lutomirski * flush. Try to minimize the number of calls by caching stacks.
195ac496bf4SAndy Lutomirski */
196ac496bf4SAndy Lutomirski #define NR_CACHED_STACKS 2
197ac496bf4SAndy Lutomirski static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]);
19819659c59SHoeun Ryu
199e540bf31SSebastian Andrzej Siewior struct vm_stack {
200e540bf31SSebastian Andrzej Siewior struct rcu_head rcu;
201e540bf31SSebastian Andrzej Siewior struct vm_struct *stack_vm_area;
202e540bf31SSebastian Andrzej Siewior };
203e540bf31SSebastian Andrzej Siewior
try_release_thread_stack_to_cache(struct vm_struct * vm)204e540bf31SSebastian Andrzej Siewior static bool try_release_thread_stack_to_cache(struct vm_struct *vm)
205e540bf31SSebastian Andrzej Siewior {
206e540bf31SSebastian Andrzej Siewior unsigned int i;
207e540bf31SSebastian Andrzej Siewior
208e540bf31SSebastian Andrzej Siewior for (i = 0; i < NR_CACHED_STACKS; i++) {
209e540bf31SSebastian Andrzej Siewior if (this_cpu_cmpxchg(cached_stacks[i], NULL, vm) != NULL)
210e540bf31SSebastian Andrzej Siewior continue;
211e540bf31SSebastian Andrzej Siewior return true;
212e540bf31SSebastian Andrzej Siewior }
213e540bf31SSebastian Andrzej Siewior return false;
214e540bf31SSebastian Andrzej Siewior }
215e540bf31SSebastian Andrzej Siewior
thread_stack_free_rcu(struct rcu_head * rh)216e540bf31SSebastian Andrzej Siewior static void thread_stack_free_rcu(struct rcu_head *rh)
217e540bf31SSebastian Andrzej Siewior {
218e540bf31SSebastian Andrzej Siewior struct vm_stack *vm_stack = container_of(rh, struct vm_stack, rcu);
219e540bf31SSebastian Andrzej Siewior
220e540bf31SSebastian Andrzej Siewior if (try_release_thread_stack_to_cache(vm_stack->stack_vm_area))
221e540bf31SSebastian Andrzej Siewior return;
222e540bf31SSebastian Andrzej Siewior
223e540bf31SSebastian Andrzej Siewior vfree(vm_stack);
224e540bf31SSebastian Andrzej Siewior }
225e540bf31SSebastian Andrzej Siewior
thread_stack_delayed_free(struct task_struct * tsk)226e540bf31SSebastian Andrzej Siewior static void thread_stack_delayed_free(struct task_struct *tsk)
227e540bf31SSebastian Andrzej Siewior {
228e540bf31SSebastian Andrzej Siewior struct vm_stack *vm_stack = tsk->stack;
229e540bf31SSebastian Andrzej Siewior
230e540bf31SSebastian Andrzej Siewior vm_stack->stack_vm_area = tsk->stack_vm_area;
231e540bf31SSebastian Andrzej Siewior call_rcu(&vm_stack->rcu, thread_stack_free_rcu);
232e540bf31SSebastian Andrzej Siewior }
233e540bf31SSebastian Andrzej Siewior
free_vm_stack_cache(unsigned int cpu)23419659c59SHoeun Ryu static int free_vm_stack_cache(unsigned int cpu)
23519659c59SHoeun Ryu {
23619659c59SHoeun Ryu struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu);
23719659c59SHoeun Ryu int i;
23819659c59SHoeun Ryu
23919659c59SHoeun Ryu for (i = 0; i < NR_CACHED_STACKS; i++) {
24019659c59SHoeun Ryu struct vm_struct *vm_stack = cached_vm_stacks[i];
24119659c59SHoeun Ryu
24219659c59SHoeun Ryu if (!vm_stack)
24319659c59SHoeun Ryu continue;
24419659c59SHoeun Ryu
24519659c59SHoeun Ryu vfree(vm_stack->addr);
24619659c59SHoeun Ryu cached_vm_stacks[i] = NULL;
24719659c59SHoeun Ryu }
24819659c59SHoeun Ryu
24919659c59SHoeun Ryu return 0;
25019659c59SHoeun Ryu }
251ac496bf4SAndy Lutomirski
memcg_charge_kernel_stack(struct vm_struct * vm)2521a03d3f1SSebastian Andrzej Siewior static int memcg_charge_kernel_stack(struct vm_struct *vm)
253b69c49b7SFUJITA Tomonori {
254f1c1a9eeSSebastian Andrzej Siewior int i;
255f1c1a9eeSSebastian Andrzej Siewior int ret;
2564e2f6342SHaifeng Xu int nr_charged = 0;
257f1c1a9eeSSebastian Andrzej Siewior
258f1c1a9eeSSebastian Andrzej Siewior BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
259f1c1a9eeSSebastian Andrzej Siewior
260f1c1a9eeSSebastian Andrzej Siewior for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
261f1c1a9eeSSebastian Andrzej Siewior ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL, 0);
262f1c1a9eeSSebastian Andrzej Siewior if (ret)
263f1c1a9eeSSebastian Andrzej Siewior goto err;
2644e2f6342SHaifeng Xu nr_charged++;
265f1c1a9eeSSebastian Andrzej Siewior }
266f1c1a9eeSSebastian Andrzej Siewior return 0;
267f1c1a9eeSSebastian Andrzej Siewior err:
2684e2f6342SHaifeng Xu for (i = 0; i < nr_charged; i++)
269f1c1a9eeSSebastian Andrzej Siewior memcg_kmem_uncharge_page(vm->pages[i], 0);
270f1c1a9eeSSebastian Andrzej Siewior return ret;
271f1c1a9eeSSebastian Andrzej Siewior }
272f1c1a9eeSSebastian Andrzej Siewior
alloc_thread_stack_node(struct task_struct * tsk,int node)2737865aba3SSebastian Andrzej Siewior static int alloc_thread_stack_node(struct task_struct *tsk, int node)
274b69c49b7SFUJITA Tomonori {
2751a03d3f1SSebastian Andrzej Siewior struct vm_struct *vm;
276ac496bf4SAndy Lutomirski void *stack;
277ac496bf4SAndy Lutomirski int i;
278ac496bf4SAndy Lutomirski
279ac496bf4SAndy Lutomirski for (i = 0; i < NR_CACHED_STACKS; i++) {
280112166f8SChristoph Lameter struct vm_struct *s;
281112166f8SChristoph Lameter
282112166f8SChristoph Lameter s = this_cpu_xchg(cached_stacks[i], NULL);
283ac496bf4SAndy Lutomirski
284ac496bf4SAndy Lutomirski if (!s)
285ac496bf4SAndy Lutomirski continue;
286ac496bf4SAndy Lutomirski
28751fb34deSAndrey Konovalov /* Reset stack metadata. */
288cebd0eb2SAndrey Konovalov kasan_unpoison_range(s->addr, THREAD_SIZE);
289eafb149eSDaniel Axtens
29051fb34deSAndrey Konovalov stack = kasan_reset_tag(s->addr);
29151fb34deSAndrey Konovalov
292ca182551SKonstantin Khlebnikov /* Clear stale pointers from reused stack. */
29351fb34deSAndrey Konovalov memset(stack, 0, THREAD_SIZE);
294e01e8063SKees Cook
2951a03d3f1SSebastian Andrzej Siewior if (memcg_charge_kernel_stack(s)) {
296f1c1a9eeSSebastian Andrzej Siewior vfree(s->addr);
297f1c1a9eeSSebastian Andrzej Siewior return -ENOMEM;
298f1c1a9eeSSebastian Andrzej Siewior }
299f1c1a9eeSSebastian Andrzej Siewior
300ac496bf4SAndy Lutomirski tsk->stack_vm_area = s;
30151fb34deSAndrey Konovalov tsk->stack = stack;
3027865aba3SSebastian Andrzej Siewior return 0;
303ac496bf4SAndy Lutomirski }
304ac496bf4SAndy Lutomirski
3059b6f7e16SRoman Gushchin /*
3069b6f7e16SRoman Gushchin * Allocated stacks are cached and later reused by new threads,
3079b6f7e16SRoman Gushchin * so memcg accounting is performed manually on assigning/releasing
3089b6f7e16SRoman Gushchin * stacks to tasks. Drop __GFP_ACCOUNT.
3099b6f7e16SRoman Gushchin */
31048ac3c18SMark Rutland stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
311ba14a194SAndy Lutomirski VMALLOC_START, VMALLOC_END,
3129b6f7e16SRoman Gushchin THREADINFO_GFP & ~__GFP_ACCOUNT,
313ba14a194SAndy Lutomirski PAGE_KERNEL,
314ac496bf4SAndy Lutomirski 0, node, __builtin_return_address(0));
3157865aba3SSebastian Andrzej Siewior if (!stack)
3167865aba3SSebastian Andrzej Siewior return -ENOMEM;
317ba14a194SAndy Lutomirski
3181a03d3f1SSebastian Andrzej Siewior vm = find_vm_area(stack);
3191a03d3f1SSebastian Andrzej Siewior if (memcg_charge_kernel_stack(vm)) {
320f1c1a9eeSSebastian Andrzej Siewior vfree(stack);
321f1c1a9eeSSebastian Andrzej Siewior return -ENOMEM;
322f1c1a9eeSSebastian Andrzej Siewior }
323ba14a194SAndy Lutomirski /*
324ba14a194SAndy Lutomirski * We can't call find_vm_area() in interrupt context, and
325ba14a194SAndy Lutomirski * free_thread_stack() can be called in interrupt context,
326ba14a194SAndy Lutomirski * so cache the vm_struct.
327ba14a194SAndy Lutomirski */
3281a03d3f1SSebastian Andrzej Siewior tsk->stack_vm_area = vm;
329c08e6a12SAndrey Konovalov stack = kasan_reset_tag(stack);
3305eed6f1dSRik van Riel tsk->stack = stack;
3317865aba3SSebastian Andrzej Siewior return 0;
3325eed6f1dSRik van Riel }
333be9a2277SSebastian Andrzej Siewior
free_thread_stack(struct task_struct * tsk)334be9a2277SSebastian Andrzej Siewior static void free_thread_stack(struct task_struct *tsk)
335be9a2277SSebastian Andrzej Siewior {
336e540bf31SSebastian Andrzej Siewior if (!try_release_thread_stack_to_cache(tsk->stack_vm_area))
337e540bf31SSebastian Andrzej Siewior thread_stack_delayed_free(tsk);
338be9a2277SSebastian Andrzej Siewior
339be9a2277SSebastian Andrzej Siewior tsk->stack = NULL;
340be9a2277SSebastian Andrzej Siewior tsk->stack_vm_area = NULL;
341be9a2277SSebastian Andrzej Siewior }
342be9a2277SSebastian Andrzej Siewior
343be9a2277SSebastian Andrzej Siewior # else /* !CONFIG_VMAP_STACK */
344be9a2277SSebastian Andrzej Siewior
thread_stack_free_rcu(struct rcu_head * rh)345e540bf31SSebastian Andrzej Siewior static void thread_stack_free_rcu(struct rcu_head *rh)
346e540bf31SSebastian Andrzej Siewior {
347e540bf31SSebastian Andrzej Siewior __free_pages(virt_to_page(rh), THREAD_SIZE_ORDER);
348e540bf31SSebastian Andrzej Siewior }
349e540bf31SSebastian Andrzej Siewior
thread_stack_delayed_free(struct task_struct * tsk)350e540bf31SSebastian Andrzej Siewior static void thread_stack_delayed_free(struct task_struct *tsk)
351e540bf31SSebastian Andrzej Siewior {
352e540bf31SSebastian Andrzej Siewior struct rcu_head *rh = tsk->stack;
353e540bf31SSebastian Andrzej Siewior
354e540bf31SSebastian Andrzej Siewior call_rcu(rh, thread_stack_free_rcu);
355e540bf31SSebastian Andrzej Siewior }
356e540bf31SSebastian Andrzej Siewior
alloc_thread_stack_node(struct task_struct * tsk,int node)3577865aba3SSebastian Andrzej Siewior static int alloc_thread_stack_node(struct task_struct *tsk, int node)
358be9a2277SSebastian Andrzej Siewior {
3594949148aSVladimir Davydov struct page *page = alloc_pages_node(node, THREADINFO_GFP,
3602889f608SThomas Gleixner THREAD_SIZE_ORDER);
361b6a84016SEric Dumazet
3621bf4580eSAndrea Arcangeli if (likely(page)) {
3638dcc1d34SAndrey Konovalov tsk->stack = kasan_reset_tag(page_address(page));
3647865aba3SSebastian Andrzej Siewior return 0;
3651bf4580eSAndrea Arcangeli }
3667865aba3SSebastian Andrzej Siewior return -ENOMEM;
367b69c49b7SFUJITA Tomonori }
368b69c49b7SFUJITA Tomonori
free_thread_stack(struct task_struct * tsk)369be9a2277SSebastian Andrzej Siewior static void free_thread_stack(struct task_struct *tsk)
370b69c49b7SFUJITA Tomonori {
371e540bf31SSebastian Andrzej Siewior thread_stack_delayed_free(tsk);
372be9a2277SSebastian Andrzej Siewior tsk->stack = NULL;
373ac496bf4SAndy Lutomirski }
374ac496bf4SAndy Lutomirski
375be9a2277SSebastian Andrzej Siewior # endif /* CONFIG_VMAP_STACK */
376be9a2277SSebastian Andrzej Siewior # else /* !(THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)) */
377ac496bf4SAndy Lutomirski
378b235beeaSLinus Torvalds static struct kmem_cache *thread_stack_cache;
3790d15d74aSThomas Gleixner
thread_stack_free_rcu(struct rcu_head * rh)380e540bf31SSebastian Andrzej Siewior static void thread_stack_free_rcu(struct rcu_head *rh)
381e540bf31SSebastian Andrzej Siewior {
382e540bf31SSebastian Andrzej Siewior kmem_cache_free(thread_stack_cache, rh);
383e540bf31SSebastian Andrzej Siewior }
384e540bf31SSebastian Andrzej Siewior
thread_stack_delayed_free(struct task_struct * tsk)385e540bf31SSebastian Andrzej Siewior static void thread_stack_delayed_free(struct task_struct *tsk)
386e540bf31SSebastian Andrzej Siewior {
387e540bf31SSebastian Andrzej Siewior struct rcu_head *rh = tsk->stack;
388e540bf31SSebastian Andrzej Siewior
389e540bf31SSebastian Andrzej Siewior call_rcu(rh, thread_stack_free_rcu);
390e540bf31SSebastian Andrzej Siewior }
391e540bf31SSebastian Andrzej Siewior
alloc_thread_stack_node(struct task_struct * tsk,int node)3927865aba3SSebastian Andrzej Siewior static int alloc_thread_stack_node(struct task_struct *tsk, int node)
3930d15d74aSThomas Gleixner {
3945eed6f1dSRik van Riel unsigned long *stack;
3955eed6f1dSRik van Riel stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
3968dcc1d34SAndrey Konovalov stack = kasan_reset_tag(stack);
3975eed6f1dSRik van Riel tsk->stack = stack;
3987865aba3SSebastian Andrzej Siewior return stack ? 0 : -ENOMEM;
3990d15d74aSThomas Gleixner }
4000d15d74aSThomas Gleixner
free_thread_stack(struct task_struct * tsk)401ba14a194SAndy Lutomirski static void free_thread_stack(struct task_struct *tsk)
4020d15d74aSThomas Gleixner {
403e540bf31SSebastian Andrzej Siewior thread_stack_delayed_free(tsk);
404be9a2277SSebastian Andrzej Siewior tsk->stack = NULL;
4050d15d74aSThomas Gleixner }
4060d15d74aSThomas Gleixner
thread_stack_cache_init(void)407b235beeaSLinus Torvalds void thread_stack_cache_init(void)
4080d15d74aSThomas Gleixner {
409f9d29946SDavid Windsor thread_stack_cache = kmem_cache_create_usercopy("thread_stack",
410f9d29946SDavid Windsor THREAD_SIZE, THREAD_SIZE, 0, 0,
411f9d29946SDavid Windsor THREAD_SIZE, NULL);
412b235beeaSLinus Torvalds BUG_ON(thread_stack_cache == NULL);
4130d15d74aSThomas Gleixner }
414be9a2277SSebastian Andrzej Siewior
415be9a2277SSebastian Andrzej Siewior # endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */
4162bb0529cSSebastian Andrzej Siewior #else /* CONFIG_ARCH_THREAD_STACK_ALLOCATOR */
4172bb0529cSSebastian Andrzej Siewior
alloc_thread_stack_node(struct task_struct * tsk,int node)4187865aba3SSebastian Andrzej Siewior static int alloc_thread_stack_node(struct task_struct *tsk, int node)
4192bb0529cSSebastian Andrzej Siewior {
4202bb0529cSSebastian Andrzej Siewior unsigned long *stack;
4212bb0529cSSebastian Andrzej Siewior
4222bb0529cSSebastian Andrzej Siewior stack = arch_alloc_thread_stack_node(tsk, node);
4232bb0529cSSebastian Andrzej Siewior tsk->stack = stack;
4247865aba3SSebastian Andrzej Siewior return stack ? 0 : -ENOMEM;
4252bb0529cSSebastian Andrzej Siewior }
4262bb0529cSSebastian Andrzej Siewior
free_thread_stack(struct task_struct * tsk)4272bb0529cSSebastian Andrzej Siewior static void free_thread_stack(struct task_struct *tsk)
4282bb0529cSSebastian Andrzej Siewior {
4292bb0529cSSebastian Andrzej Siewior arch_free_thread_stack(tsk);
4302bb0529cSSebastian Andrzej Siewior tsk->stack = NULL;
4312bb0529cSSebastian Andrzej Siewior }
4322bb0529cSSebastian Andrzej Siewior
433be9a2277SSebastian Andrzej Siewior #endif /* !CONFIG_ARCH_THREAD_STACK_ALLOCATOR */
434b69c49b7SFUJITA Tomonori
4351da177e4SLinus Torvalds /* SLAB cache for signal_struct structures (tsk->signal) */
436e18b890bSChristoph Lameter static struct kmem_cache *signal_cachep;
4371da177e4SLinus Torvalds
4381da177e4SLinus Torvalds /* SLAB cache for sighand_struct structures (tsk->sighand) */
439e18b890bSChristoph Lameter struct kmem_cache *sighand_cachep;
4401da177e4SLinus Torvalds
4411da177e4SLinus Torvalds /* SLAB cache for files_struct structures (tsk->files) */
442e18b890bSChristoph Lameter struct kmem_cache *files_cachep;
4431da177e4SLinus Torvalds
4441da177e4SLinus Torvalds /* SLAB cache for fs_struct structures (tsk->fs) */
445e18b890bSChristoph Lameter struct kmem_cache *fs_cachep;
4461da177e4SLinus Torvalds
4471da177e4SLinus Torvalds /* SLAB cache for vm_area_struct structures */
4483928d4f5SLinus Torvalds static struct kmem_cache *vm_area_cachep;
4491da177e4SLinus Torvalds
4501da177e4SLinus Torvalds /* SLAB cache for mm_struct structures (tsk->mm) */
451e18b890bSChristoph Lameter static struct kmem_cache *mm_cachep;
4521da177e4SLinus Torvalds
453c7f8f31cSSuren Baghdasaryan #ifdef CONFIG_PER_VMA_LOCK
454c7f8f31cSSuren Baghdasaryan
455c7f8f31cSSuren Baghdasaryan /* SLAB cache for vm_area_struct.lock */
456c7f8f31cSSuren Baghdasaryan static struct kmem_cache *vma_lock_cachep;
457c7f8f31cSSuren Baghdasaryan
vma_lock_alloc(struct vm_area_struct * vma)458c7f8f31cSSuren Baghdasaryan static bool vma_lock_alloc(struct vm_area_struct *vma)
459c7f8f31cSSuren Baghdasaryan {
460c7f8f31cSSuren Baghdasaryan vma->vm_lock = kmem_cache_alloc(vma_lock_cachep, GFP_KERNEL);
461c7f8f31cSSuren Baghdasaryan if (!vma->vm_lock)
462c7f8f31cSSuren Baghdasaryan return false;
463c7f8f31cSSuren Baghdasaryan
464c7f8f31cSSuren Baghdasaryan init_rwsem(&vma->vm_lock->lock);
465c7f8f31cSSuren Baghdasaryan vma->vm_lock_seq = -1;
466c7f8f31cSSuren Baghdasaryan
467c7f8f31cSSuren Baghdasaryan return true;
468c7f8f31cSSuren Baghdasaryan }
469c7f8f31cSSuren Baghdasaryan
vma_lock_free(struct vm_area_struct * vma)470c7f8f31cSSuren Baghdasaryan static inline void vma_lock_free(struct vm_area_struct *vma)
471c7f8f31cSSuren Baghdasaryan {
472c7f8f31cSSuren Baghdasaryan kmem_cache_free(vma_lock_cachep, vma->vm_lock);
473c7f8f31cSSuren Baghdasaryan }
474c7f8f31cSSuren Baghdasaryan
475c7f8f31cSSuren Baghdasaryan #else /* CONFIG_PER_VMA_LOCK */
476c7f8f31cSSuren Baghdasaryan
vma_lock_alloc(struct vm_area_struct * vma)477c7f8f31cSSuren Baghdasaryan static inline bool vma_lock_alloc(struct vm_area_struct *vma) { return true; }
vma_lock_free(struct vm_area_struct * vma)478c7f8f31cSSuren Baghdasaryan static inline void vma_lock_free(struct vm_area_struct *vma) {}
479c7f8f31cSSuren Baghdasaryan
480c7f8f31cSSuren Baghdasaryan #endif /* CONFIG_PER_VMA_LOCK */
481c7f8f31cSSuren Baghdasaryan
vm_area_alloc(struct mm_struct * mm)482490fc053SLinus Torvalds struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
4833928d4f5SLinus Torvalds {
484a670468fSAndrew Morton struct vm_area_struct *vma;
485490fc053SLinus Torvalds
486a670468fSAndrew Morton vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
487c7f8f31cSSuren Baghdasaryan if (!vma)
488c7f8f31cSSuren Baghdasaryan return NULL;
489c7f8f31cSSuren Baghdasaryan
490027232daSKirill A. Shutemov vma_init(vma, mm);
491c7f8f31cSSuren Baghdasaryan if (!vma_lock_alloc(vma)) {
492c7f8f31cSSuren Baghdasaryan kmem_cache_free(vm_area_cachep, vma);
493c7f8f31cSSuren Baghdasaryan return NULL;
494c7f8f31cSSuren Baghdasaryan }
495c7f8f31cSSuren Baghdasaryan
496490fc053SLinus Torvalds return vma;
4973928d4f5SLinus Torvalds }
4983928d4f5SLinus Torvalds
vm_area_dup(struct vm_area_struct * orig)4993928d4f5SLinus Torvalds struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
5003928d4f5SLinus Torvalds {
50195faf699SLinus Torvalds struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
50295faf699SLinus Torvalds
503c7f8f31cSSuren Baghdasaryan if (!new)
504c7f8f31cSSuren Baghdasaryan return NULL;
505c7f8f31cSSuren Baghdasaryan
506cda099b3SQian Cai ASSERT_EXCLUSIVE_WRITER(orig->vm_flags);
507cda099b3SQian Cai ASSERT_EXCLUSIVE_WRITER(orig->vm_file);
508cda099b3SQian Cai /*
509cda099b3SQian Cai * orig->shared.rb may be modified concurrently, but the clone
510cda099b3SQian Cai * will be reinitialized.
511cda099b3SQian Cai */
51206e78b61SSuren Baghdasaryan data_race(memcpy(new, orig, sizeof(*new)));
513c7f8f31cSSuren Baghdasaryan if (!vma_lock_alloc(new)) {
514c7f8f31cSSuren Baghdasaryan kmem_cache_free(vm_area_cachep, new);
515c7f8f31cSSuren Baghdasaryan return NULL;
51695faf699SLinus Torvalds }
517c7f8f31cSSuren Baghdasaryan INIT_LIST_HEAD(&new->anon_vma_chain);
518ef6a22b7SMel Gorman vma_numab_state_init(new);
519c7f8f31cSSuren Baghdasaryan dup_anon_vma_name(orig, new);
520c7f8f31cSSuren Baghdasaryan
52195faf699SLinus Torvalds return new;
5223928d4f5SLinus Torvalds }
5233928d4f5SLinus Torvalds
__vm_area_free(struct vm_area_struct * vma)5240d2ebf9cSSuren Baghdasaryan void __vm_area_free(struct vm_area_struct *vma)
5253928d4f5SLinus Torvalds {
526ef6a22b7SMel Gorman vma_numab_state_free(vma);
5275c26f6acSSuren Baghdasaryan free_anon_vma_name(vma);
528c7f8f31cSSuren Baghdasaryan vma_lock_free(vma);
5293928d4f5SLinus Torvalds kmem_cache_free(vm_area_cachep, vma);
5303928d4f5SLinus Torvalds }
5313928d4f5SLinus Torvalds
53220cce633SMichel Lespinasse #ifdef CONFIG_PER_VMA_LOCK
vm_area_free_rcu_cb(struct rcu_head * head)53320cce633SMichel Lespinasse static void vm_area_free_rcu_cb(struct rcu_head *head)
53420cce633SMichel Lespinasse {
53520cce633SMichel Lespinasse struct vm_area_struct *vma = container_of(head, struct vm_area_struct,
53620cce633SMichel Lespinasse vm_rcu);
537f2e13784SSuren Baghdasaryan
538f2e13784SSuren Baghdasaryan /* The vma should not be locked while being destroyed. */
539c7f8f31cSSuren Baghdasaryan VM_BUG_ON_VMA(rwsem_is_locked(&vma->vm_lock->lock), vma);
54020cce633SMichel Lespinasse __vm_area_free(vma);
54120cce633SMichel Lespinasse }
54220cce633SMichel Lespinasse #endif
54320cce633SMichel Lespinasse
vm_area_free(struct vm_area_struct * vma)5441da177e4SLinus Torvalds void vm_area_free(struct vm_area_struct *vma)
5451da177e4SLinus Torvalds {
54620cce633SMichel Lespinasse #ifdef CONFIG_PER_VMA_LOCK
54720cce633SMichel Lespinasse call_rcu(&vma->vm_rcu, vm_area_free_rcu_cb);
54820cce633SMichel Lespinasse #else
54920cce633SMichel Lespinasse __vm_area_free(vma);
55020cce633SMichel Lespinasse #endif
5511da177e4SLinus Torvalds }
5521da177e4SLinus Torvalds
account_kernel_stack(struct task_struct * tsk,int account)553ba14a194SAndy Lutomirski static void account_kernel_stack(struct task_struct *tsk, int account)
554c6a7f572SKOSAKI Motohiro {
5550ce055f8SSebastian Andrzej Siewior if (IS_ENABLED(CONFIG_VMAP_STACK)) {
556ba14a194SAndy Lutomirski struct vm_struct *vm = task_stack_vm_area(tsk);
55727faca83SMuchun Song int i;
558ba14a194SAndy Lutomirski
55927faca83SMuchun Song for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
56027faca83SMuchun Song mod_lruvec_page_state(vm->pages[i], NR_KERNEL_STACK_KB,
56127faca83SMuchun Song account * (PAGE_SIZE / 1024));
56227faca83SMuchun Song } else {
5630ce055f8SSebastian Andrzej Siewior void *stack = task_stack_page(tsk);
5640ce055f8SSebastian Andrzej Siewior
565991e7673SShakeel Butt /* All stack pages are in the same node. */
566da3ceeffSMuchun Song mod_lruvec_kmem_state(stack, NR_KERNEL_STACK_KB,
567991e7673SShakeel Butt account * (THREAD_SIZE / 1024));
568ba14a194SAndy Lutomirski }
56927faca83SMuchun Song }
570c6a7f572SKOSAKI Motohiro
exit_task_stack_account(struct task_struct * tsk)5711a03d3f1SSebastian Andrzej Siewior void exit_task_stack_account(struct task_struct *tsk)
5729b6f7e16SRoman Gushchin {
5731a03d3f1SSebastian Andrzej Siewior account_kernel_stack(tsk, -1);
5749b6f7e16SRoman Gushchin
5751a03d3f1SSebastian Andrzej Siewior if (IS_ENABLED(CONFIG_VMAP_STACK)) {
5761a03d3f1SSebastian Andrzej Siewior struct vm_struct *vm;
5779b6f7e16SRoman Gushchin int i;
5789b6f7e16SRoman Gushchin
5791a03d3f1SSebastian Andrzej Siewior vm = task_stack_vm_area(tsk);
5801a03d3f1SSebastian Andrzej Siewior for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
5811a03d3f1SSebastian Andrzej Siewior memcg_kmem_uncharge_page(vm->pages[i], 0);
5829b6f7e16SRoman Gushchin }
5839b6f7e16SRoman Gushchin }
5849b6f7e16SRoman Gushchin
release_task_stack(struct task_struct * tsk)58568f24b08SAndy Lutomirski static void release_task_stack(struct task_struct *tsk)
5861da177e4SLinus Torvalds {
5872f064a59SPeter Zijlstra if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD))
588405c0759SAndy Lutomirski return; /* Better to leak the stack than to free prematurely */
589405c0759SAndy Lutomirski
590ba14a194SAndy Lutomirski free_thread_stack(tsk);
59168f24b08SAndy Lutomirski }
59268f24b08SAndy Lutomirski
59368f24b08SAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
put_task_stack(struct task_struct * tsk)59468f24b08SAndy Lutomirski void put_task_stack(struct task_struct *tsk)
59568f24b08SAndy Lutomirski {
596f0b89d39SElena Reshetova if (refcount_dec_and_test(&tsk->stack_refcount))
59768f24b08SAndy Lutomirski release_task_stack(tsk);
59868f24b08SAndy Lutomirski }
59968f24b08SAndy Lutomirski #endif
60068f24b08SAndy Lutomirski
free_task(struct task_struct * tsk)60168f24b08SAndy Lutomirski void free_task(struct task_struct *tsk)
60268f24b08SAndy Lutomirski {
603a1140cb2SKuniyuki Iwashima #ifdef CONFIG_SECCOMP
604a1140cb2SKuniyuki Iwashima WARN_ON_ONCE(tsk->seccomp.filter);
605a1140cb2SKuniyuki Iwashima #endif
606b90ca8baSWill Deacon release_user_cpus_ptr(tsk);
607d08b9f0cSSami Tolvanen scs_release(tsk);
608d08b9f0cSSami Tolvanen
60968f24b08SAndy Lutomirski #ifndef CONFIG_THREAD_INFO_IN_TASK
61068f24b08SAndy Lutomirski /*
61168f24b08SAndy Lutomirski * The task is finally done with both the stack and thread_info,
61268f24b08SAndy Lutomirski * so free both.
61368f24b08SAndy Lutomirski */
61468f24b08SAndy Lutomirski release_task_stack(tsk);
61568f24b08SAndy Lutomirski #else
61668f24b08SAndy Lutomirski /*
61768f24b08SAndy Lutomirski * If the task had a separate stack allocation, it should be gone
61868f24b08SAndy Lutomirski * by now.
61968f24b08SAndy Lutomirski */
620f0b89d39SElena Reshetova WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0);
62168f24b08SAndy Lutomirski #endif
62223f78d4aSIngo Molnar rt_mutex_debug_task_free(tsk);
623fb52607aSFrederic Weisbecker ftrace_graph_exit_task(tsk);
624f19b9f74SAkinobu Mita arch_release_task_struct(tsk);
6251da5c46fSOleg Nesterov if (tsk->flags & PF_KTHREAD)
6261da5c46fSOleg Nesterov free_kthread_struct(tsk);
627b0fd1852SKP Singh bpf_task_storage_free(tsk);
6281da177e4SLinus Torvalds free_task_struct(tsk);
6291da177e4SLinus Torvalds }
6301da177e4SLinus Torvalds EXPORT_SYMBOL(free_task);
6311da177e4SLinus Torvalds
dup_mm_exe_file(struct mm_struct * mm,struct mm_struct * oldmm)632fe69d560SDavid Hildenbrand static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm)
633fe69d560SDavid Hildenbrand {
634fe69d560SDavid Hildenbrand struct file *exe_file;
635fe69d560SDavid Hildenbrand
636fe69d560SDavid Hildenbrand exe_file = get_mm_exe_file(oldmm);
637fe69d560SDavid Hildenbrand RCU_INIT_POINTER(mm->exe_file, exe_file);
638fe69d560SDavid Hildenbrand /*
639fe69d560SDavid Hildenbrand * We depend on the oldmm having properly denied write access to the
640fe69d560SDavid Hildenbrand * exe_file already.
641fe69d560SDavid Hildenbrand */
642fe69d560SDavid Hildenbrand if (exe_file && deny_write_access(exe_file))
643fe69d560SDavid Hildenbrand pr_warn_once("deny_write_access() failed in %s\n", __func__);
644fe69d560SDavid Hildenbrand }
645fe69d560SDavid Hildenbrand
6461da177e4SLinus Torvalds #ifdef CONFIG_MMU
dup_mmap(struct mm_struct * mm,struct mm_struct * oldmm)6470766f788SEmese Revfy static __latent_entropy int dup_mmap(struct mm_struct *mm,
6480766f788SEmese Revfy struct mm_struct *oldmm)
6491da177e4SLinus Torvalds {
650763ecb03SLiam R. Howlett struct vm_area_struct *mpnt, *tmp;
6511da177e4SLinus Torvalds int retval;
652c9dbe82cSLiam R. Howlett unsigned long charge = 0;
653893e26e6SPavel Emelyanov LIST_HEAD(uf);
6543b9dbd5eSLiam R. Howlett VMA_ITERATOR(old_vmi, oldmm, 0);
6553b9dbd5eSLiam R. Howlett VMA_ITERATOR(vmi, mm, 0);
6561da177e4SLinus Torvalds
65732cdba1eSOleg Nesterov uprobe_start_dup_mmap();
658d8ed45c5SMichel Lespinasse if (mmap_write_lock_killable(oldmm)) {
6597c051267SMichal Hocko retval = -EINTR;
6607c051267SMichal Hocko goto fail_uprobe_end;
6617c051267SMichal Hocko }
662ec8c0446SRalf Baechle flush_cache_dup_mm(oldmm);
663f8ac4ec9SOleg Nesterov uprobe_dup_mmap(oldmm, mm);
664ad339451SIngo Molnar /*
665ad339451SIngo Molnar * Not linked in yet - no deadlock potential:
666ad339451SIngo Molnar */
667aaa2cc56SMichel Lespinasse mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING);
6687ee78232SHugh Dickins
66990f31d0eSKonstantin Khlebnikov /* No ordering required: file already has been exposed. */
670fe69d560SDavid Hildenbrand dup_mm_exe_file(mm, oldmm);
67190f31d0eSKonstantin Khlebnikov
6724f7d4614SVladimir Davydov mm->total_vm = oldmm->total_vm;
67384638335SKonstantin Khlebnikov mm->data_vm = oldmm->data_vm;
6744f7d4614SVladimir Davydov mm->exec_vm = oldmm->exec_vm;
6754f7d4614SVladimir Davydov mm->stack_vm = oldmm->stack_vm;
6764f7d4614SVladimir Davydov
677f8af4da3SHugh Dickins retval = ksm_fork(mm, oldmm);
678f8af4da3SHugh Dickins if (retval)
679f8af4da3SHugh Dickins goto out;
680d2081b2bSYang Shi khugepaged_fork(mm, oldmm);
6811da177e4SLinus Torvalds
6823b9dbd5eSLiam R. Howlett retval = vma_iter_bulk_alloc(&vmi, oldmm->map_count);
683d4af56c5SLiam R. Howlett if (retval)
684d4af56c5SLiam R. Howlett goto out;
685d4af56c5SLiam R. Howlett
6863dd44325SLiam R. Howlett mt_clear_in_rcu(vmi.mas.tree);
6873b9dbd5eSLiam R. Howlett for_each_vma(old_vmi, mpnt) {
6881da177e4SLinus Torvalds struct file *file;
6891da177e4SLinus Torvalds
690fb49c455SSuren Baghdasaryan vma_start_write(mpnt);
6911da177e4SLinus Torvalds if (mpnt->vm_flags & VM_DONTCOPY) {
69284638335SKonstantin Khlebnikov vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
6931da177e4SLinus Torvalds continue;
6941da177e4SLinus Torvalds }
6951da177e4SLinus Torvalds charge = 0;
696655c79bbSTetsuo Handa /*
697655c79bbSTetsuo Handa * Don't duplicate many vmas if we've been oom-killed (for
698655c79bbSTetsuo Handa * example)
699655c79bbSTetsuo Handa */
700655c79bbSTetsuo Handa if (fatal_signal_pending(current)) {
701655c79bbSTetsuo Handa retval = -EINTR;
702d4af56c5SLiam R. Howlett goto loop_out;
703655c79bbSTetsuo Handa }
7041da177e4SLinus Torvalds if (mpnt->vm_flags & VM_ACCOUNT) {
705b2412b7fSHuang Shijie unsigned long len = vma_pages(mpnt);
706b2412b7fSHuang Shijie
707191c5424SAl Viro if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
7081da177e4SLinus Torvalds goto fail_nomem;
7091da177e4SLinus Torvalds charge = len;
7101da177e4SLinus Torvalds }
7113928d4f5SLinus Torvalds tmp = vm_area_dup(mpnt);
7121da177e4SLinus Torvalds if (!tmp)
7131da177e4SLinus Torvalds goto fail_nomem;
714ef0855d3SOleg Nesterov retval = vma_dup_policy(mpnt, tmp);
715ef0855d3SOleg Nesterov if (retval)
7161da177e4SLinus Torvalds goto fail_nomem_policy;
717a247c3a9SAndrea Arcangeli tmp->vm_mm = mm;
718893e26e6SPavel Emelyanov retval = dup_userfaultfd(tmp, &uf);
719893e26e6SPavel Emelyanov if (retval)
720893e26e6SPavel Emelyanov goto fail_nomem_anon_vma_fork;
721d2cd9edeSRik van Riel if (tmp->vm_flags & VM_WIPEONFORK) {
72293949bb2SLi Xinhai /*
72393949bb2SLi Xinhai * VM_WIPEONFORK gets a clean slate in the child.
72493949bb2SLi Xinhai * Don't prepare anon_vma until fault since we don't
72593949bb2SLi Xinhai * copy page for current vma.
72693949bb2SLi Xinhai */
727d2cd9edeSRik van Riel tmp->anon_vma = NULL;
728d2cd9edeSRik van Riel } else if (anon_vma_fork(tmp, mpnt))
7295beb4930SRik van Riel goto fail_nomem_anon_vma_fork;
730e430a95aSSuren Baghdasaryan vm_flags_clear(tmp, VM_LOCKED_MASK);
7311da177e4SLinus Torvalds file = tmp->vm_file;
7321da177e4SLinus Torvalds if (file) {
733b88ed205SHugh Dickins struct address_space *mapping = file->f_mapping;
734b88ed205SHugh Dickins
7351da177e4SLinus Torvalds get_file(file);
73683cde9e8SDavidlohr Bueso i_mmap_lock_write(mapping);
737b88ed205SHugh Dickins if (tmp->vm_flags & VM_SHARED)
738cf508b58SMiaohe Lin mapping_allow_writable(mapping);
739b88ed205SHugh Dickins flush_dcache_mmap_lock(mapping);
740b88ed205SHugh Dickins /* insert tmp into the share list, just after mpnt */
7419826a516SMichel Lespinasse vma_interval_tree_insert_after(tmp, mpnt,
7429826a516SMichel Lespinasse &mapping->i_mmap);
743b88ed205SHugh Dickins flush_dcache_mmap_unlock(mapping);
74483cde9e8SDavidlohr Bueso i_mmap_unlock_write(mapping);
7451da177e4SLinus Torvalds }
7461da177e4SLinus Torvalds
747dd782da4SSam James /*
748dd782da4SSam James * Copy/update hugetlb private vma information.
749dd782da4SSam James */
750dd782da4SSam James if (is_vm_hugetlb_page(tmp))
751dd782da4SSam James hugetlb_dup_vma_private(tmp);
752dd782da4SSam James
753d4af56c5SLiam R. Howlett /* Link the vma into the MT */
7543b9dbd5eSLiam R. Howlett if (vma_iter_bulk_store(&vmi, tmp))
7553b9dbd5eSLiam R. Howlett goto fail_nomem_vmi_store;
7561da177e4SLinus Torvalds
7571da177e4SLinus Torvalds mm->map_count++;
758d2cd9edeSRik van Riel if (!(tmp->vm_flags & VM_WIPEONFORK))
759c78f4636SPeter Xu retval = copy_page_range(tmp, mpnt);
7601da177e4SLinus Torvalds
761dd782da4SSam James if (tmp->vm_ops && tmp->vm_ops->open)
762dd782da4SSam James tmp->vm_ops->open(tmp);
763dd782da4SSam James
7641da177e4SLinus Torvalds if (retval)
765d4af56c5SLiam R. Howlett goto loop_out;
7661da177e4SLinus Torvalds }
767d6dd61c8SJeremy Fitzhardinge /* a new mm has just been created */
7681ed0cc5aSNadav Amit retval = arch_dup_mmap(oldmm, mm);
769d4af56c5SLiam R. Howlett loop_out:
7703b9dbd5eSLiam R. Howlett vma_iter_free(&vmi);
7713dd44325SLiam R. Howlett if (!retval)
7723dd44325SLiam R. Howlett mt_set_in_rcu(vmi.mas.tree);
7731da177e4SLinus Torvalds out:
774d8ed45c5SMichel Lespinasse mmap_write_unlock(mm);
775fd3e42fcSHugh Dickins flush_tlb_mm(oldmm);
776d8ed45c5SMichel Lespinasse mmap_write_unlock(oldmm);
777893e26e6SPavel Emelyanov dup_userfaultfd_complete(&uf);
7787c051267SMichal Hocko fail_uprobe_end:
77932cdba1eSOleg Nesterov uprobe_end_dup_mmap();
7801da177e4SLinus Torvalds return retval;
781c9dbe82cSLiam R. Howlett
7823b9dbd5eSLiam R. Howlett fail_nomem_vmi_store:
783c9dbe82cSLiam R. Howlett unlink_anon_vmas(tmp);
7845beb4930SRik van Riel fail_nomem_anon_vma_fork:
785ef0855d3SOleg Nesterov mpol_put(vma_policy(tmp));
7861da177e4SLinus Torvalds fail_nomem_policy:
7873928d4f5SLinus Torvalds vm_area_free(tmp);
7881da177e4SLinus Torvalds fail_nomem:
7891da177e4SLinus Torvalds retval = -ENOMEM;
7901da177e4SLinus Torvalds vm_unacct_memory(charge);
791d4af56c5SLiam R. Howlett goto loop_out;
7921da177e4SLinus Torvalds }
7931da177e4SLinus Torvalds
mm_alloc_pgd(struct mm_struct * mm)7941da177e4SLinus Torvalds static inline int mm_alloc_pgd(struct mm_struct *mm)
7951da177e4SLinus Torvalds {
7961da177e4SLinus Torvalds mm->pgd = pgd_alloc(mm);
7971da177e4SLinus Torvalds if (unlikely(!mm->pgd))
7981da177e4SLinus Torvalds return -ENOMEM;
7991da177e4SLinus Torvalds return 0;
8001da177e4SLinus Torvalds }
8011da177e4SLinus Torvalds
mm_free_pgd(struct mm_struct * mm)8021da177e4SLinus Torvalds static inline void mm_free_pgd(struct mm_struct *mm)
8031da177e4SLinus Torvalds {
8045e541973SBenjamin Herrenschmidt pgd_free(mm, mm->pgd);
8051da177e4SLinus Torvalds }
8061da177e4SLinus Torvalds #else
dup_mmap(struct mm_struct * mm,struct mm_struct * oldmm)80790f31d0eSKonstantin Khlebnikov static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
80890f31d0eSKonstantin Khlebnikov {
809d8ed45c5SMichel Lespinasse mmap_write_lock(oldmm);
810fe69d560SDavid Hildenbrand dup_mm_exe_file(mm, oldmm);
811d8ed45c5SMichel Lespinasse mmap_write_unlock(oldmm);
81290f31d0eSKonstantin Khlebnikov return 0;
81390f31d0eSKonstantin Khlebnikov }
8141da177e4SLinus Torvalds #define mm_alloc_pgd(mm) (0)
8151da177e4SLinus Torvalds #define mm_free_pgd(mm)
8161da177e4SLinus Torvalds #endif /* CONFIG_MMU */
8171da177e4SLinus Torvalds
check_mm(struct mm_struct * mm)818d70f2a14SAndrew Morton static void check_mm(struct mm_struct *mm)
819d70f2a14SAndrew Morton {
820d70f2a14SAndrew Morton int i;
821d70f2a14SAndrew Morton
8228495f7e6SSai Praneeth Prakhya BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types) != NR_MM_COUNTERS,
8238495f7e6SSai Praneeth Prakhya "Please make sure 'struct resident_page_types[]' is updated as well");
8248495f7e6SSai Praneeth Prakhya
825d70f2a14SAndrew Morton for (i = 0; i < NR_MM_COUNTERS; i++) {
826f1a79412SShakeel Butt long x = percpu_counter_sum(&mm->rss_stat[i]);
827d70f2a14SAndrew Morton
828d70f2a14SAndrew Morton if (unlikely(x))
8298495f7e6SSai Praneeth Prakhya pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n",
8308495f7e6SSai Praneeth Prakhya mm, resident_page_types[i], x);
831d70f2a14SAndrew Morton }
832d70f2a14SAndrew Morton
833d70f2a14SAndrew Morton if (mm_pgtables_bytes(mm))
834d70f2a14SAndrew Morton pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
835d70f2a14SAndrew Morton mm_pgtables_bytes(mm));
836d70f2a14SAndrew Morton
837d70f2a14SAndrew Morton #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
838d70f2a14SAndrew Morton VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
839d70f2a14SAndrew Morton #endif
840d70f2a14SAndrew Morton }
8411da177e4SLinus Torvalds
842e94b1766SChristoph Lameter #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
8431da177e4SLinus Torvalds #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
8441da177e4SLinus Torvalds
do_check_lazy_tlb(void * arg)8452655421aSNicholas Piggin static void do_check_lazy_tlb(void *arg)
8462655421aSNicholas Piggin {
8472655421aSNicholas Piggin struct mm_struct *mm = arg;
8482655421aSNicholas Piggin
8492655421aSNicholas Piggin WARN_ON_ONCE(current->active_mm == mm);
8502655421aSNicholas Piggin }
8512655421aSNicholas Piggin
do_shoot_lazy_tlb(void * arg)8522655421aSNicholas Piggin static void do_shoot_lazy_tlb(void *arg)
8532655421aSNicholas Piggin {
8542655421aSNicholas Piggin struct mm_struct *mm = arg;
8552655421aSNicholas Piggin
8562655421aSNicholas Piggin if (current->active_mm == mm) {
8572655421aSNicholas Piggin WARN_ON_ONCE(current->mm);
8582655421aSNicholas Piggin current->active_mm = &init_mm;
8592655421aSNicholas Piggin switch_mm(mm, &init_mm, current);
8602655421aSNicholas Piggin }
8612655421aSNicholas Piggin }
8622655421aSNicholas Piggin
cleanup_lazy_tlbs(struct mm_struct * mm)8632655421aSNicholas Piggin static void cleanup_lazy_tlbs(struct mm_struct *mm)
8642655421aSNicholas Piggin {
8652655421aSNicholas Piggin if (!IS_ENABLED(CONFIG_MMU_LAZY_TLB_SHOOTDOWN)) {
8662655421aSNicholas Piggin /*
8672655421aSNicholas Piggin * In this case, lazy tlb mms are refounted and would not reach
8682655421aSNicholas Piggin * __mmdrop until all CPUs have switched away and mmdrop()ed.
8692655421aSNicholas Piggin */
8702655421aSNicholas Piggin return;
8712655421aSNicholas Piggin }
8722655421aSNicholas Piggin
8732655421aSNicholas Piggin /*
8742655421aSNicholas Piggin * Lazy mm shootdown does not refcount "lazy tlb mm" usage, rather it
8752655421aSNicholas Piggin * requires lazy mm users to switch to another mm when the refcount
8762655421aSNicholas Piggin * drops to zero, before the mm is freed. This requires IPIs here to
8772655421aSNicholas Piggin * switch kernel threads to init_mm.
8782655421aSNicholas Piggin *
8792655421aSNicholas Piggin * archs that use IPIs to flush TLBs can piggy-back that lazy tlb mm
8802655421aSNicholas Piggin * switch with the final userspace teardown TLB flush which leaves the
8812655421aSNicholas Piggin * mm lazy on this CPU but no others, reducing the need for additional
8822655421aSNicholas Piggin * IPIs here. There are cases where a final IPI is still required here,
8832655421aSNicholas Piggin * such as the final mmdrop being performed on a different CPU than the
8842655421aSNicholas Piggin * one exiting, or kernel threads using the mm when userspace exits.
8852655421aSNicholas Piggin *
8862655421aSNicholas Piggin * IPI overheads have not found to be expensive, but they could be
8872655421aSNicholas Piggin * reduced in a number of possible ways, for example (roughly
8882655421aSNicholas Piggin * increasing order of complexity):
8892655421aSNicholas Piggin * - The last lazy reference created by exit_mm() could instead switch
8902655421aSNicholas Piggin * to init_mm, however it's probable this will run on the same CPU
8912655421aSNicholas Piggin * immediately afterwards, so this may not reduce IPIs much.
8922655421aSNicholas Piggin * - A batch of mms requiring IPIs could be gathered and freed at once.
8932655421aSNicholas Piggin * - CPUs store active_mm where it can be remotely checked without a
8942655421aSNicholas Piggin * lock, to filter out false-positives in the cpumask.
8952655421aSNicholas Piggin * - After mm_users or mm_count reaches zero, switching away from the
8962655421aSNicholas Piggin * mm could clear mm_cpumask to reduce some IPIs, perhaps together
8972655421aSNicholas Piggin * with some batching or delaying of the final IPIs.
8982655421aSNicholas Piggin * - A delayed freeing and RCU-like quiescing sequence based on mm
8992655421aSNicholas Piggin * switching to avoid IPIs completely.
9002655421aSNicholas Piggin */
9012655421aSNicholas Piggin on_each_cpu_mask(mm_cpumask(mm), do_shoot_lazy_tlb, (void *)mm, 1);
9022655421aSNicholas Piggin if (IS_ENABLED(CONFIG_DEBUG_VM_SHOOT_LAZIES))
9032655421aSNicholas Piggin on_each_cpu(do_check_lazy_tlb, (void *)mm, 1);
9042655421aSNicholas Piggin }
9052655421aSNicholas Piggin
906d70f2a14SAndrew Morton /*
907d70f2a14SAndrew Morton * Called when the last reference to the mm
908d70f2a14SAndrew Morton * is dropped: either by a lazy thread or by
909d70f2a14SAndrew Morton * mmput. Free the page directory and the mm.
910d70f2a14SAndrew Morton */
__mmdrop(struct mm_struct * mm)911d34bc48fSAndrew Morton void __mmdrop(struct mm_struct *mm)
912d70f2a14SAndrew Morton {
913d70f2a14SAndrew Morton BUG_ON(mm == &init_mm);
9143eda69c9SMark Rutland WARN_ON_ONCE(mm == current->mm);
9152655421aSNicholas Piggin
9162655421aSNicholas Piggin /* Ensure no CPUs are using this as their lazy tlb mm */
9172655421aSNicholas Piggin cleanup_lazy_tlbs(mm);
9182655421aSNicholas Piggin
9193eda69c9SMark Rutland WARN_ON_ONCE(mm == current->active_mm);
920d70f2a14SAndrew Morton mm_free_pgd(mm);
921d70f2a14SAndrew Morton destroy_context(mm);
922984cfe4eSJason Gunthorpe mmu_notifier_subscriptions_destroy(mm);
923d70f2a14SAndrew Morton check_mm(mm);
924d70f2a14SAndrew Morton put_user_ns(mm->user_ns);
9252667ed10SFenghua Yu mm_pasid_drop(mm);
926223baf9dSMathieu Desnoyers mm_destroy_cid(mm);
92714ef95beSMateusz Guzik percpu_counter_destroy_many(mm->rss_stat, NR_MM_COUNTERS);
928f1a79412SShakeel Butt
929d70f2a14SAndrew Morton free_mm(mm);
930d70f2a14SAndrew Morton }
931d34bc48fSAndrew Morton EXPORT_SYMBOL_GPL(__mmdrop);
932d70f2a14SAndrew Morton
mmdrop_async_fn(struct work_struct * work)933d70f2a14SAndrew Morton static void mmdrop_async_fn(struct work_struct *work)
934d70f2a14SAndrew Morton {
935d70f2a14SAndrew Morton struct mm_struct *mm;
936d70f2a14SAndrew Morton
937d70f2a14SAndrew Morton mm = container_of(work, struct mm_struct, async_put_work);
938d70f2a14SAndrew Morton __mmdrop(mm);
939d70f2a14SAndrew Morton }
940d70f2a14SAndrew Morton
mmdrop_async(struct mm_struct * mm)941d70f2a14SAndrew Morton static void mmdrop_async(struct mm_struct *mm)
942d70f2a14SAndrew Morton {
943d70f2a14SAndrew Morton if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
944d70f2a14SAndrew Morton INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
945d70f2a14SAndrew Morton schedule_work(&mm->async_put_work);
946d70f2a14SAndrew Morton }
947d70f2a14SAndrew Morton }
948d70f2a14SAndrew Morton
free_signal_struct(struct signal_struct * sig)9491da177e4SLinus Torvalds static inline void free_signal_struct(struct signal_struct *sig)
9501da177e4SLinus Torvalds {
9511da177e4SLinus Torvalds taskstats_tgid_free(sig);
9521da177e4SLinus Torvalds sched_autogroup_exit(sig);
9531da177e4SLinus Torvalds /*
9541da177e4SLinus Torvalds * __mmdrop is not safe to call from softirq context on x86 due to
9551da177e4SLinus Torvalds * pgd_dtor so postpone it to the async context
9561da177e4SLinus Torvalds */
9571da177e4SLinus Torvalds if (sig->oom_mm)
9581da177e4SLinus Torvalds mmdrop_async(sig->oom_mm);
9591da177e4SLinus Torvalds kmem_cache_free(signal_cachep, sig);
9601da177e4SLinus Torvalds }
9611da177e4SLinus Torvalds
put_signal_struct(struct signal_struct * sig)9621da177e4SLinus Torvalds static inline void put_signal_struct(struct signal_struct *sig)
9631da177e4SLinus Torvalds {
96460d4de3fSElena Reshetova if (refcount_dec_and_test(&sig->sigcnt))
9651da177e4SLinus Torvalds free_signal_struct(sig);
9661da177e4SLinus Torvalds }
9671da177e4SLinus Torvalds
__put_task_struct(struct task_struct * tsk)9681da177e4SLinus Torvalds void __put_task_struct(struct task_struct *tsk)
9691da177e4SLinus Torvalds {
9701da177e4SLinus Torvalds WARN_ON(!tsk->exit_state);
971ec1d2819SElena Reshetova WARN_ON(refcount_read(&tsk->usage));
9721da177e4SLinus Torvalds WARN_ON(tsk == current);
9731da177e4SLinus Torvalds
9740f212204SJens Axboe io_uring_free(tsk);
9751da177e4SLinus Torvalds cgroup_free(tsk);
97616d51a59SJann Horn task_numa_free(tsk, true);
9771da177e4SLinus Torvalds security_task_free(tsk);
9781da177e4SLinus Torvalds exit_creds(tsk);
9791da177e4SLinus Torvalds delayacct_tsk_free(tsk);
9801da177e4SLinus Torvalds put_signal_struct(tsk->signal);
9816e33cad0SPeter Zijlstra sched_core_free(tsk);
9821da177e4SLinus Torvalds free_task(tsk);
9831da177e4SLinus Torvalds }
9841da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(__put_task_struct);
9851da177e4SLinus Torvalds
__put_task_struct_rcu_cb(struct rcu_head * rhp)986d243b344SWander Lairson Costa void __put_task_struct_rcu_cb(struct rcu_head *rhp)
987d243b344SWander Lairson Costa {
988d243b344SWander Lairson Costa struct task_struct *task = container_of(rhp, struct task_struct, rcu);
989d243b344SWander Lairson Costa
990d243b344SWander Lairson Costa __put_task_struct(task);
991d243b344SWander Lairson Costa }
992d243b344SWander Lairson Costa EXPORT_SYMBOL_GPL(__put_task_struct_rcu_cb);
993d243b344SWander Lairson Costa
arch_task_cache_init(void)9941da177e4SLinus Torvalds void __init __weak arch_task_cache_init(void) { }
9951da177e4SLinus Torvalds
9961da177e4SLinus Torvalds /*
9971da177e4SLinus Torvalds * set_max_threads
9981da177e4SLinus Torvalds */
set_max_threads(unsigned int max_threads_suggested)9991da177e4SLinus Torvalds static void set_max_threads(unsigned int max_threads_suggested)
10001da177e4SLinus Torvalds {
10011da177e4SLinus Torvalds u64 threads;
1002ca79b0c2SArun KS unsigned long nr_pages = totalram_pages();
10031da177e4SLinus Torvalds
10041da177e4SLinus Torvalds /*
10051da177e4SLinus Torvalds * The number of threads shall be limited such that the thread
10061da177e4SLinus Torvalds * structures may only consume a small part of the available memory.
10071da177e4SLinus Torvalds */
10083d6357deSArun KS if (fls64(nr_pages) + fls64(PAGE_SIZE) > 64)
10091da177e4SLinus Torvalds threads = MAX_THREADS;
10101da177e4SLinus Torvalds else
10113d6357deSArun KS threads = div64_u64((u64) nr_pages * (u64) PAGE_SIZE,
10121da177e4SLinus Torvalds (u64) THREAD_SIZE * 8UL);
10131da177e4SLinus Torvalds
10141da177e4SLinus Torvalds if (threads > max_threads_suggested)
10151da177e4SLinus Torvalds threads = max_threads_suggested;
10161da177e4SLinus Torvalds
10171da177e4SLinus Torvalds max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
10181da177e4SLinus Torvalds }
10191da177e4SLinus Torvalds
10201da177e4SLinus Torvalds #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
10211da177e4SLinus Torvalds /* Initialized by the architecture: */
10221da177e4SLinus Torvalds int arch_task_struct_size __read_mostly;
10231da177e4SLinus Torvalds #endif
10241da177e4SLinus Torvalds
10254189ff23SChristoph Hellwig #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
task_struct_whitelist(unsigned long * offset,unsigned long * size)10265905429aSKees Cook static void task_struct_whitelist(unsigned long *offset, unsigned long *size)
10275905429aSKees Cook {
10285905429aSKees Cook /* Fetch thread_struct whitelist for the architecture. */
10295905429aSKees Cook arch_thread_struct_whitelist(offset, size);
10305905429aSKees Cook
10315905429aSKees Cook /*
10325905429aSKees Cook * Handle zero-sized whitelist or empty thread_struct, otherwise
10335905429aSKees Cook * adjust offset to position of thread_struct in task_struct.
10345905429aSKees Cook */
10355905429aSKees Cook if (unlikely(*size == 0))
10365905429aSKees Cook *offset = 0;
10375905429aSKees Cook else
10385905429aSKees Cook *offset += offsetof(struct task_struct, thread);
10395905429aSKees Cook }
10404189ff23SChristoph Hellwig #endif /* CONFIG_ARCH_TASK_STRUCT_ALLOCATOR */
10415905429aSKees Cook
fork_init(void)10421da177e4SLinus Torvalds void __init fork_init(void)
10431da177e4SLinus Torvalds {
10441da177e4SLinus Torvalds int i;
10451da177e4SLinus Torvalds #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
10461da177e4SLinus Torvalds #ifndef ARCH_MIN_TASKALIGN
10471da177e4SLinus Torvalds #define ARCH_MIN_TASKALIGN 0
10481da177e4SLinus Torvalds #endif
10491da177e4SLinus Torvalds int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN);
10505905429aSKees Cook unsigned long useroffset, usersize;
10511da177e4SLinus Torvalds
10521da177e4SLinus Torvalds /* create a slab on which task_structs can be allocated */
10535905429aSKees Cook task_struct_whitelist(&useroffset, &usersize);
10545905429aSKees Cook task_struct_cachep = kmem_cache_create_usercopy("task_struct",
10551da177e4SLinus Torvalds arch_task_struct_size, align,
10565905429aSKees Cook SLAB_PANIC|SLAB_ACCOUNT,
10575905429aSKees Cook useroffset, usersize, NULL);
10581da177e4SLinus Torvalds #endif
10591da177e4SLinus Torvalds
10601da177e4SLinus Torvalds /* do the arch specific task caches init */
10611da177e4SLinus Torvalds arch_task_cache_init();
10621da177e4SLinus Torvalds
10631da177e4SLinus Torvalds set_max_threads(MAX_THREADS);
10641da177e4SLinus Torvalds
10651da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
10661da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
10671da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_SIGPENDING] =
10681da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_NPROC];
10691da177e4SLinus Torvalds
1070de399236SAlexey Gladkov for (i = 0; i < UCOUNT_COUNTS; i++)
10711da177e4SLinus Torvalds init_user_ns.ucount_max[i] = max_threads/2;
10721da177e4SLinus Torvalds
1073de399236SAlexey Gladkov set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, RLIM_INFINITY);
1074de399236SAlexey Gladkov set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, RLIM_INFINITY);
1075de399236SAlexey Gladkov set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, RLIM_INFINITY);
1076de399236SAlexey Gladkov set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, RLIM_INFINITY);
107721d1c5e3SAlexey Gladkov
10781da177e4SLinus Torvalds #ifdef CONFIG_VMAP_STACK
10791da177e4SLinus Torvalds cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
10801da177e4SLinus Torvalds NULL, free_vm_stack_cache);
10811da177e4SLinus Torvalds #endif
10821da177e4SLinus Torvalds
1083d08b9f0cSSami Tolvanen scs_init();
1084d08b9f0cSSami Tolvanen
10851da177e4SLinus Torvalds lockdep_init_task(&init_task);
1086aad42dd4SNadav Amit uprobes_init();
10871da177e4SLinus Torvalds }
10881da177e4SLinus Torvalds
arch_dup_task_struct(struct task_struct * dst,struct task_struct * src)10891da177e4SLinus Torvalds int __weak arch_dup_task_struct(struct task_struct *dst,
10901da177e4SLinus Torvalds struct task_struct *src)
10911da177e4SLinus Torvalds {
10921da177e4SLinus Torvalds *dst = *src;
10931da177e4SLinus Torvalds return 0;
10941da177e4SLinus Torvalds }
10951da177e4SLinus Torvalds
set_task_stack_end_magic(struct task_struct * tsk)10961da177e4SLinus Torvalds void set_task_stack_end_magic(struct task_struct *tsk)
10971da177e4SLinus Torvalds {
10981da177e4SLinus Torvalds unsigned long *stackend;
10991da177e4SLinus Torvalds
11001da177e4SLinus Torvalds stackend = end_of_stack(tsk);
11011da177e4SLinus Torvalds *stackend = STACK_END_MAGIC; /* for overflow detection */
11021da177e4SLinus Torvalds }
11031da177e4SLinus Torvalds
dup_task_struct(struct task_struct * orig,int node)11041da177e4SLinus Torvalds static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
11051da177e4SLinus Torvalds {
11061da177e4SLinus Torvalds struct task_struct *tsk;
11071da177e4SLinus Torvalds int err;
11081da177e4SLinus Torvalds
11091da177e4SLinus Torvalds if (node == NUMA_NO_NODE)
11101da177e4SLinus Torvalds node = tsk_fork_get_node(orig);
11111da177e4SLinus Torvalds tsk = alloc_task_struct_node(node);
11121da177e4SLinus Torvalds if (!tsk)
11131da177e4SLinus Torvalds return NULL;
11141da177e4SLinus Torvalds
1115546c42b2SSebastian Andrzej Siewior err = arch_dup_task_struct(tsk, orig);
1116546c42b2SSebastian Andrzej Siewior if (err)
11171da177e4SLinus Torvalds goto free_tsk;
11181da177e4SLinus Torvalds
11197865aba3SSebastian Andrzej Siewior err = alloc_thread_stack_node(tsk, node);
11207865aba3SSebastian Andrzej Siewior if (err)
11211da177e4SLinus Torvalds goto free_tsk;
11229b6f7e16SRoman Gushchin
11231da177e4SLinus Torvalds #ifdef CONFIG_THREAD_INFO_IN_TASK
1124f0b89d39SElena Reshetova refcount_set(&tsk->stack_refcount, 1);
11251da177e4SLinus Torvalds #endif
11261a03d3f1SSebastian Andrzej Siewior account_kernel_stack(tsk, 1);
11271da177e4SLinus Torvalds
1128d08b9f0cSSami Tolvanen err = scs_prepare(tsk, node);
1129d08b9f0cSSami Tolvanen if (err)
1130d08b9f0cSSami Tolvanen goto free_stack;
1131d08b9f0cSSami Tolvanen
11321da177e4SLinus Torvalds #ifdef CONFIG_SECCOMP
11331da177e4SLinus Torvalds /*
11341da177e4SLinus Torvalds * We must handle setting up seccomp filters once we're under
11351da177e4SLinus Torvalds * the sighand lock in case orig has changed between now and
11361da177e4SLinus Torvalds * then. Until then, filter must be NULL to avoid messing up
11371da177e4SLinus Torvalds * the usage counts on the error path calling free_task.
11381da177e4SLinus Torvalds */
11391da177e4SLinus Torvalds tsk->seccomp.filter = NULL;
11401da177e4SLinus Torvalds #endif
11411da177e4SLinus Torvalds
11421da177e4SLinus Torvalds setup_thread_stack(tsk, orig);
11431da177e4SLinus Torvalds clear_user_return_notifier(tsk);
11441da177e4SLinus Torvalds clear_tsk_need_resched(tsk);
11451da177e4SLinus Torvalds set_task_stack_end_magic(tsk);
11461446e1dfSGabriel Krisman Bertazi clear_syscall_work_syscall_user_dispatch(tsk);
11471da177e4SLinus Torvalds
1148050e9baaSLinus Torvalds #ifdef CONFIG_STACKPROTECTOR
11491da177e4SLinus Torvalds tsk->stack_canary = get_random_canary();
11501da177e4SLinus Torvalds #endif
11513bd37062SSebastian Andrzej Siewior if (orig->cpus_ptr == &orig->cpus_mask)
11523bd37062SSebastian Andrzej Siewior tsk->cpus_ptr = &tsk->cpus_mask;
1153b90ca8baSWill Deacon dup_user_cpus_ptr(tsk, orig, node);
11541da177e4SLinus Torvalds
11551da177e4SLinus Torvalds /*
11560ff7b2cfSEric W. Biederman * One for the user space visible state that goes away when reaped.
11570ff7b2cfSEric W. Biederman * One for the scheduler.
11581da177e4SLinus Torvalds */
11590ff7b2cfSEric W. Biederman refcount_set(&tsk->rcu_users, 2);
11600ff7b2cfSEric W. Biederman /* One for the rcu users */
11610ff7b2cfSEric W. Biederman refcount_set(&tsk->usage, 1);
11621da177e4SLinus Torvalds #ifdef CONFIG_BLK_DEV_IO_TRACE
11631da177e4SLinus Torvalds tsk->btrace_seq = 0;
11641da177e4SLinus Torvalds #endif
11651da177e4SLinus Torvalds tsk->splice_pipe = NULL;
11661da177e4SLinus Torvalds tsk->task_frag.page = NULL;
11671da177e4SLinus Torvalds tsk->wake_q.next = NULL;
1168e32cf5dfSEric W. Biederman tsk->worker_private = NULL;
11691da177e4SLinus Torvalds
11701da177e4SLinus Torvalds kcov_task_init(tsk);
117150b5e49cSAlexander Potapenko kmsan_task_create(tsk);
11725fbda3ecSThomas Gleixner kmap_local_fork(tsk);
11731da177e4SLinus Torvalds
11741da177e4SLinus Torvalds #ifdef CONFIG_FAULT_INJECTION
11751da177e4SLinus Torvalds tsk->fail_nth = 0;
11761da177e4SLinus Torvalds #endif
11771da177e4SLinus Torvalds
11782c323017SJosef Bacik #ifdef CONFIG_BLK_CGROUP
1179f05837edSChristoph Hellwig tsk->throttle_disk = NULL;
11802c323017SJosef Bacik tsk->use_memdelay = 0;
11812c323017SJosef Bacik #endif
11822c323017SJosef Bacik
1183a3d29e82SPeter Zijlstra #ifdef CONFIG_IOMMU_SVA
1184a3d29e82SPeter Zijlstra tsk->pasid_activated = 0;
1185a3d29e82SPeter Zijlstra #endif
1186a3d29e82SPeter Zijlstra
1187d46eb14bSShakeel Butt #ifdef CONFIG_MEMCG
1188d46eb14bSShakeel Butt tsk->active_memcg = NULL;
1189d46eb14bSShakeel Butt #endif
1190b041b525STony Luck
1191b041b525STony Luck #ifdef CONFIG_CPU_SUP_INTEL
1192b041b525STony Luck tsk->reported_split_lock = 0;
1193b041b525STony Luck #endif
1194b041b525STony Luck
1195af7f588dSMathieu Desnoyers #ifdef CONFIG_SCHED_MM_CID
1196af7f588dSMathieu Desnoyers tsk->mm_cid = -1;
1197223baf9dSMathieu Desnoyers tsk->last_mm_cid = -1;
1198af7f588dSMathieu Desnoyers tsk->mm_cid_active = 0;
1199223baf9dSMathieu Desnoyers tsk->migrate_from_cpu = -1;
1200af7f588dSMathieu Desnoyers #endif
12011da177e4SLinus Torvalds return tsk;
12021da177e4SLinus Torvalds
12031da177e4SLinus Torvalds free_stack:
12041a03d3f1SSebastian Andrzej Siewior exit_task_stack_account(tsk);
12051da177e4SLinus Torvalds free_thread_stack(tsk);
12061da177e4SLinus Torvalds free_tsk:
12071da177e4SLinus Torvalds free_task_struct(tsk);
12081da177e4SLinus Torvalds return NULL;
12091da177e4SLinus Torvalds }
12101da177e4SLinus Torvalds
12111da177e4SLinus Torvalds __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
12121da177e4SLinus Torvalds
12134cb0e11bSHidehiro Kawai static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
12144cb0e11bSHidehiro Kawai
coredump_filter_setup(char * s)12154cb0e11bSHidehiro Kawai static int __init coredump_filter_setup(char *s)
12164cb0e11bSHidehiro Kawai {
12174cb0e11bSHidehiro Kawai default_dump_filter =
12184cb0e11bSHidehiro Kawai (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
12194cb0e11bSHidehiro Kawai MMF_DUMP_FILTER_MASK;
12204cb0e11bSHidehiro Kawai return 1;
12214cb0e11bSHidehiro Kawai }
12224cb0e11bSHidehiro Kawai
12234cb0e11bSHidehiro Kawai __setup("coredump_filter=", coredump_filter_setup);
12244cb0e11bSHidehiro Kawai
12251da177e4SLinus Torvalds #include <linux/init_task.h>
12261da177e4SLinus Torvalds
mm_init_aio(struct mm_struct * mm)1227858f0993SAlexey Dobriyan static void mm_init_aio(struct mm_struct *mm)
1228858f0993SAlexey Dobriyan {
1229858f0993SAlexey Dobriyan #ifdef CONFIG_AIO
1230858f0993SAlexey Dobriyan spin_lock_init(&mm->ioctx_lock);
1231db446a08SBenjamin LaHaise mm->ioctx_table = NULL;
1232858f0993SAlexey Dobriyan #endif
1233858f0993SAlexey Dobriyan }
1234858f0993SAlexey Dobriyan
mm_clear_owner(struct mm_struct * mm,struct task_struct * p)1235c3f3ce04SAndrea Arcangeli static __always_inline void mm_clear_owner(struct mm_struct *mm,
1236c3f3ce04SAndrea Arcangeli struct task_struct *p)
1237c3f3ce04SAndrea Arcangeli {
1238c3f3ce04SAndrea Arcangeli #ifdef CONFIG_MEMCG
1239c3f3ce04SAndrea Arcangeli if (mm->owner == p)
1240c3f3ce04SAndrea Arcangeli WRITE_ONCE(mm->owner, NULL);
1241c3f3ce04SAndrea Arcangeli #endif
1242c3f3ce04SAndrea Arcangeli }
1243c3f3ce04SAndrea Arcangeli
mm_init_owner(struct mm_struct * mm,struct task_struct * p)124433144e84SVladimir Davydov static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
124533144e84SVladimir Davydov {
124633144e84SVladimir Davydov #ifdef CONFIG_MEMCG
124733144e84SVladimir Davydov mm->owner = p;
124833144e84SVladimir Davydov #endif
124933144e84SVladimir Davydov }
125033144e84SVladimir Davydov
mm_init_uprobes_state(struct mm_struct * mm)1251355627f5SEric Biggers static void mm_init_uprobes_state(struct mm_struct *mm)
1252355627f5SEric Biggers {
1253355627f5SEric Biggers #ifdef CONFIG_UPROBES
1254355627f5SEric Biggers mm->uprobes_state.xol_area = NULL;
1255355627f5SEric Biggers #endif
1256355627f5SEric Biggers }
1257355627f5SEric Biggers
mm_init(struct mm_struct * mm,struct task_struct * p,struct user_namespace * user_ns)1258bfedb589SEric W. Biederman static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
1259bfedb589SEric W. Biederman struct user_namespace *user_ns)
12601da177e4SLinus Torvalds {
1261d4af56c5SLiam R. Howlett mt_init_flags(&mm->mm_mt, MM_MT_FLAGS);
1262d4af56c5SLiam R. Howlett mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock);
12631da177e4SLinus Torvalds atomic_set(&mm->mm_users, 1);
12641da177e4SLinus Torvalds atomic_set(&mm->mm_count, 1);
126557efa1feSJason Gunthorpe seqcount_init(&mm->write_protect_seq);
1266d8ed45c5SMichel Lespinasse mmap_init_lock(mm);
12671da177e4SLinus Torvalds INIT_LIST_HEAD(&mm->mmlist);
12685e31275cSSuren Baghdasaryan #ifdef CONFIG_PER_VMA_LOCK
12695e31275cSSuren Baghdasaryan mm->mm_lock_seq = 0;
12705e31275cSSuren Baghdasaryan #endif
1271af5b0f6aSKirill A. Shutemov mm_pgtables_bytes_init(mm);
127241f727fdSVladimir Davydov mm->map_count = 0;
127341f727fdSVladimir Davydov mm->locked_vm = 0;
127470f8a3caSDavidlohr Bueso atomic64_set(&mm->pinned_vm, 0);
1275d559db08SKAMEZAWA Hiroyuki memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
12761da177e4SLinus Torvalds spin_lock_init(&mm->page_table_lock);
127788aa7cc6SYang Shi spin_lock_init(&mm->arg_lock);
127841f727fdSVladimir Davydov mm_init_cpumask(mm);
1279858f0993SAlexey Dobriyan mm_init_aio(mm);
1280cf475ad2SBalbir Singh mm_init_owner(mm, p);
1281a6cbd440SFenghua Yu mm_pasid_init(mm);
12822b7e8665SEric Biggers RCU_INIT_POINTER(mm->exe_file, NULL);
1283984cfe4eSJason Gunthorpe mmu_notifier_subscriptions_init(mm);
128416af97dcSNadav Amit init_tlb_flush_pending(mm);
128541f727fdSVladimir Davydov #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
128641f727fdSVladimir Davydov mm->pmd_huge_pte = NULL;
128741f727fdSVladimir Davydov #endif
1288355627f5SEric Biggers mm_init_uprobes_state(mm);
128913db8c50SLiu Zixian hugetlb_count_init(mm);
12901da177e4SLinus Torvalds
1291a0715cc2SAlex Thorlton if (current->mm) {
12922b00d1fdSFlorent Revest mm->flags = mmf_init_flags(current->mm->flags);
1293a0715cc2SAlex Thorlton mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK;
1294a0715cc2SAlex Thorlton } else {
1295a0715cc2SAlex Thorlton mm->flags = default_dump_filter;
12961da177e4SLinus Torvalds mm->def_flags = 0;
1297a0715cc2SAlex Thorlton }
1298a0715cc2SAlex Thorlton
129941f727fdSVladimir Davydov if (mm_alloc_pgd(mm))
130041f727fdSVladimir Davydov goto fail_nopgd;
130178fb7466SPavel Emelianov
130241f727fdSVladimir Davydov if (init_new_context(p, mm))
130341f727fdSVladimir Davydov goto fail_nocontext;
130441f727fdSVladimir Davydov
1305223baf9dSMathieu Desnoyers if (mm_alloc_cid(mm))
1306223baf9dSMathieu Desnoyers goto fail_cid;
1307223baf9dSMathieu Desnoyers
130814ef95beSMateusz Guzik if (percpu_counter_init_many(mm->rss_stat, 0, GFP_KERNEL_ACCOUNT,
130914ef95beSMateusz Guzik NR_MM_COUNTERS))
1310f1a79412SShakeel Butt goto fail_pcpu;
1311f1a79412SShakeel Butt
1312bfedb589SEric W. Biederman mm->user_ns = get_user_ns(user_ns);
1313bd74fdaeSYu Zhao lru_gen_init_mm(mm);
131441f727fdSVladimir Davydov return mm;
131541f727fdSVladimir Davydov
1316f1a79412SShakeel Butt fail_pcpu:
1317223baf9dSMathieu Desnoyers mm_destroy_cid(mm);
1318223baf9dSMathieu Desnoyers fail_cid:
1319b20b0368SMathieu Desnoyers destroy_context(mm);
132041f727fdSVladimir Davydov fail_nocontext:
132141f727fdSVladimir Davydov mm_free_pgd(mm);
132241f727fdSVladimir Davydov fail_nopgd:
13231da177e4SLinus Torvalds free_mm(mm);
13241da177e4SLinus Torvalds return NULL;
13251da177e4SLinus Torvalds }
13261da177e4SLinus Torvalds
13271da177e4SLinus Torvalds /*
13281da177e4SLinus Torvalds * Allocate and initialize an mm_struct.
13291da177e4SLinus Torvalds */
mm_alloc(void)13301da177e4SLinus Torvalds struct mm_struct *mm_alloc(void)
13311da177e4SLinus Torvalds {
13321da177e4SLinus Torvalds struct mm_struct *mm;
13331da177e4SLinus Torvalds
13341da177e4SLinus Torvalds mm = allocate_mm();
1335de03c72cSKOSAKI Motohiro if (!mm)
1336de03c72cSKOSAKI Motohiro return NULL;
1337de03c72cSKOSAKI Motohiro
13381da177e4SLinus Torvalds memset(mm, 0, sizeof(*mm));
1339bfedb589SEric W. Biederman return mm_init(mm, current, current_user_ns());
13401da177e4SLinus Torvalds }
13411da177e4SLinus Torvalds
__mmput(struct mm_struct * mm)1342ec8d7c14SMichal Hocko static inline void __mmput(struct mm_struct *mm)
13431da177e4SLinus Torvalds {
1344ec8d7c14SMichal Hocko VM_BUG_ON(atomic_read(&mm->mm_users));
13450ae26f1bSAndrew Morton
1346d4b3b638SSrikar Dronamraju uprobe_clear_state(mm);
13471da177e4SLinus Torvalds exit_aio(mm);
13481c2fb7a4SAndrea Arcangeli ksm_exit(mm);
1349ba76149fSAndrea Arcangeli khugepaged_exit(mm); /* must run before exit_mmap */
13501da177e4SLinus Torvalds exit_mmap(mm);
13516fcb52a5SAaron Lu mm_put_huge_zero_page(mm);
1352925d1c40SMatt Helsley set_mm_exe_file(mm, NULL);
13531da177e4SLinus Torvalds if (!list_empty(&mm->mmlist)) {
13541da177e4SLinus Torvalds spin_lock(&mmlist_lock);
13551da177e4SLinus Torvalds list_del(&mm->mmlist);
13561da177e4SLinus Torvalds spin_unlock(&mmlist_lock);
13571da177e4SLinus Torvalds }
1358801460d0SHiroshi Shimamoto if (mm->binfmt)
1359801460d0SHiroshi Shimamoto module_put(mm->binfmt->module);
1360bd74fdaeSYu Zhao lru_gen_del_mm(mm);
13611da177e4SLinus Torvalds mmdrop(mm);
13621da177e4SLinus Torvalds }
1363ec8d7c14SMichal Hocko
1364ec8d7c14SMichal Hocko /*
1365ec8d7c14SMichal Hocko * Decrement the use count and release all resources for an mm.
1366ec8d7c14SMichal Hocko */
mmput(struct mm_struct * mm)1367ec8d7c14SMichal Hocko void mmput(struct mm_struct *mm)
1368ec8d7c14SMichal Hocko {
1369ec8d7c14SMichal Hocko might_sleep();
1370ec8d7c14SMichal Hocko
1371ec8d7c14SMichal Hocko if (atomic_dec_and_test(&mm->mm_users))
1372ec8d7c14SMichal Hocko __mmput(mm);
13731da177e4SLinus Torvalds }
13741da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(mmput);
13751da177e4SLinus Torvalds
1376a1b2289cSSherry Yang #ifdef CONFIG_MMU
mmput_async_fn(struct work_struct * work)1377a1b2289cSSherry Yang static void mmput_async_fn(struct work_struct *work)
1378a1b2289cSSherry Yang {
1379a1b2289cSSherry Yang struct mm_struct *mm = container_of(work, struct mm_struct,
1380a1b2289cSSherry Yang async_put_work);
1381a1b2289cSSherry Yang
1382a1b2289cSSherry Yang __mmput(mm);
1383a1b2289cSSherry Yang }
1384a1b2289cSSherry Yang
mmput_async(struct mm_struct * mm)1385a1b2289cSSherry Yang void mmput_async(struct mm_struct *mm)
1386a1b2289cSSherry Yang {
1387a1b2289cSSherry Yang if (atomic_dec_and_test(&mm->mm_users)) {
1388a1b2289cSSherry Yang INIT_WORK(&mm->async_put_work, mmput_async_fn);
1389a1b2289cSSherry Yang schedule_work(&mm->async_put_work);
1390a1b2289cSSherry Yang }
1391a1b2289cSSherry Yang }
139285eaeb50SYishai Hadas EXPORT_SYMBOL_GPL(mmput_async);
1393a1b2289cSSherry Yang #endif
1394a1b2289cSSherry Yang
139590f31d0eSKonstantin Khlebnikov /**
139690f31d0eSKonstantin Khlebnikov * set_mm_exe_file - change a reference to the mm's executable file
139790f31d0eSKonstantin Khlebnikov *
139890f31d0eSKonstantin Khlebnikov * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
139990f31d0eSKonstantin Khlebnikov *
14006e399cd1SDavidlohr Bueso * Main users are mmput() and sys_execve(). Callers prevent concurrent
1401a7031f14SMateusz Guzik * invocations: in mmput() nobody alive left, in execve it happens before
1402a7031f14SMateusz Guzik * the new mm is made visible to anyone.
1403fe69d560SDavid Hildenbrand *
1404fe69d560SDavid Hildenbrand * Can only fail if new_exe_file != NULL.
140590f31d0eSKonstantin Khlebnikov */
set_mm_exe_file(struct mm_struct * mm,struct file * new_exe_file)1406fe69d560SDavid Hildenbrand int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
140738646013SJiri Slaby {
14086e399cd1SDavidlohr Bueso struct file *old_exe_file;
14096e399cd1SDavidlohr Bueso
14106e399cd1SDavidlohr Bueso /*
14116e399cd1SDavidlohr Bueso * It is safe to dereference the exe_file without RCU as
14126e399cd1SDavidlohr Bueso * this function is only called if nobody else can access
14136e399cd1SDavidlohr Bueso * this mm -- see comment above for justification.
14146e399cd1SDavidlohr Bueso */
14156e399cd1SDavidlohr Bueso old_exe_file = rcu_dereference_raw(mm->exe_file);
141690f31d0eSKonstantin Khlebnikov
1417fe69d560SDavid Hildenbrand if (new_exe_file) {
1418fe69d560SDavid Hildenbrand /*
1419fe69d560SDavid Hildenbrand * We expect the caller (i.e., sys_execve) to already denied
1420fe69d560SDavid Hildenbrand * write access, so this is unlikely to fail.
1421fe69d560SDavid Hildenbrand */
1422fe69d560SDavid Hildenbrand if (unlikely(deny_write_access(new_exe_file)))
1423fe69d560SDavid Hildenbrand return -EACCES;
142438646013SJiri Slaby get_file(new_exe_file);
1425fe69d560SDavid Hildenbrand }
142690f31d0eSKonstantin Khlebnikov rcu_assign_pointer(mm->exe_file, new_exe_file);
1427fe69d560SDavid Hildenbrand if (old_exe_file) {
1428fe69d560SDavid Hildenbrand allow_write_access(old_exe_file);
142990f31d0eSKonstantin Khlebnikov fput(old_exe_file);
143038646013SJiri Slaby }
1431fe69d560SDavid Hildenbrand return 0;
1432fe69d560SDavid Hildenbrand }
143338646013SJiri Slaby
143490f31d0eSKonstantin Khlebnikov /**
143535d7bdc8SDavid Hildenbrand * replace_mm_exe_file - replace a reference to the mm's executable file
143635d7bdc8SDavid Hildenbrand *
1437a7031f14SMateusz Guzik * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
143835d7bdc8SDavid Hildenbrand *
143935d7bdc8SDavid Hildenbrand * Main user is sys_prctl(PR_SET_MM_MAP/EXE_FILE).
144035d7bdc8SDavid Hildenbrand */
replace_mm_exe_file(struct mm_struct * mm,struct file * new_exe_file)144135d7bdc8SDavid Hildenbrand int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
144235d7bdc8SDavid Hildenbrand {
144335d7bdc8SDavid Hildenbrand struct vm_area_struct *vma;
144435d7bdc8SDavid Hildenbrand struct file *old_exe_file;
144535d7bdc8SDavid Hildenbrand int ret = 0;
144635d7bdc8SDavid Hildenbrand
144735d7bdc8SDavid Hildenbrand /* Forbid mm->exe_file change if old file still mapped. */
144835d7bdc8SDavid Hildenbrand old_exe_file = get_mm_exe_file(mm);
144935d7bdc8SDavid Hildenbrand if (old_exe_file) {
1450fa5e5876SMatthew Wilcox (Oracle) VMA_ITERATOR(vmi, mm, 0);
145135d7bdc8SDavid Hildenbrand mmap_read_lock(mm);
1452fa5e5876SMatthew Wilcox (Oracle) for_each_vma(vmi, vma) {
145335d7bdc8SDavid Hildenbrand if (!vma->vm_file)
145435d7bdc8SDavid Hildenbrand continue;
145535d7bdc8SDavid Hildenbrand if (path_equal(&vma->vm_file->f_path,
1456fa5e5876SMatthew Wilcox (Oracle) &old_exe_file->f_path)) {
145735d7bdc8SDavid Hildenbrand ret = -EBUSY;
1458fa5e5876SMatthew Wilcox (Oracle) break;
1459fa5e5876SMatthew Wilcox (Oracle) }
146035d7bdc8SDavid Hildenbrand }
146135d7bdc8SDavid Hildenbrand mmap_read_unlock(mm);
146235d7bdc8SDavid Hildenbrand fput(old_exe_file);
146335d7bdc8SDavid Hildenbrand if (ret)
146435d7bdc8SDavid Hildenbrand return ret;
146535d7bdc8SDavid Hildenbrand }
146635d7bdc8SDavid Hildenbrand
1467fe69d560SDavid Hildenbrand ret = deny_write_access(new_exe_file);
1468fe69d560SDavid Hildenbrand if (ret)
1469fe69d560SDavid Hildenbrand return -EACCES;
147035d7bdc8SDavid Hildenbrand get_file(new_exe_file);
1471fe69d560SDavid Hildenbrand
1472a7031f14SMateusz Guzik /* set the new file */
1473a7031f14SMateusz Guzik mmap_write_lock(mm);
1474a7031f14SMateusz Guzik old_exe_file = rcu_dereference_raw(mm->exe_file);
1475a7031f14SMateusz Guzik rcu_assign_pointer(mm->exe_file, new_exe_file);
1476a7031f14SMateusz Guzik mmap_write_unlock(mm);
1477a7031f14SMateusz Guzik
1478fe69d560SDavid Hildenbrand if (old_exe_file) {
1479fe69d560SDavid Hildenbrand allow_write_access(old_exe_file);
148035d7bdc8SDavid Hildenbrand fput(old_exe_file);
1481fe69d560SDavid Hildenbrand }
148235d7bdc8SDavid Hildenbrand return 0;
148335d7bdc8SDavid Hildenbrand }
148438646013SJiri Slaby
148590f31d0eSKonstantin Khlebnikov /**
148690f31d0eSKonstantin Khlebnikov * get_mm_exe_file - acquire a reference to the mm's executable file
148790f31d0eSKonstantin Khlebnikov *
148890f31d0eSKonstantin Khlebnikov * Returns %NULL if mm has no associated executable file.
148990f31d0eSKonstantin Khlebnikov * User must release file via fput().
149090f31d0eSKonstantin Khlebnikov */
get_mm_exe_file(struct mm_struct * mm)149138646013SJiri Slaby struct file *get_mm_exe_file(struct mm_struct *mm)
149238646013SJiri Slaby {
149338646013SJiri Slaby struct file *exe_file;
149438646013SJiri Slaby
149590f31d0eSKonstantin Khlebnikov rcu_read_lock();
149690f31d0eSKonstantin Khlebnikov exe_file = rcu_dereference(mm->exe_file);
149790f31d0eSKonstantin Khlebnikov if (exe_file && !get_file_rcu(exe_file))
149890f31d0eSKonstantin Khlebnikov exe_file = NULL;
149990f31d0eSKonstantin Khlebnikov rcu_read_unlock();
150038646013SJiri Slaby return exe_file;
150138646013SJiri Slaby }
150238646013SJiri Slaby
15031da177e4SLinus Torvalds /**
1504cd81a917SMateusz Guzik * get_task_exe_file - acquire a reference to the task's executable file
1505cd81a917SMateusz Guzik *
1506cd81a917SMateusz Guzik * Returns %NULL if task's mm (if any) has no associated executable file or
1507cd81a917SMateusz Guzik * this is a kernel thread with borrowed mm (see the comment above get_task_mm).
1508cd81a917SMateusz Guzik * User must release file via fput().
1509cd81a917SMateusz Guzik */
get_task_exe_file(struct task_struct * task)1510cd81a917SMateusz Guzik struct file *get_task_exe_file(struct task_struct *task)
1511cd81a917SMateusz Guzik {
1512cd81a917SMateusz Guzik struct file *exe_file = NULL;
1513cd81a917SMateusz Guzik struct mm_struct *mm;
1514cd81a917SMateusz Guzik
1515cd81a917SMateusz Guzik task_lock(task);
1516cd81a917SMateusz Guzik mm = task->mm;
1517cd81a917SMateusz Guzik if (mm) {
1518cd81a917SMateusz Guzik if (!(task->flags & PF_KTHREAD))
1519cd81a917SMateusz Guzik exe_file = get_mm_exe_file(mm);
1520cd81a917SMateusz Guzik }
1521cd81a917SMateusz Guzik task_unlock(task);
1522cd81a917SMateusz Guzik return exe_file;
1523cd81a917SMateusz Guzik }
1524cd81a917SMateusz Guzik
1525cd81a917SMateusz Guzik /**
15261da177e4SLinus Torvalds * get_task_mm - acquire a reference to the task's mm
15271da177e4SLinus Torvalds *
1528246bb0b1SOleg Nesterov * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
15291da177e4SLinus Torvalds * this kernel workthread has transiently adopted a user mm with use_mm,
15301da177e4SLinus Torvalds * to do its AIO) is not set and if so returns a reference to it, after
15311da177e4SLinus Torvalds * bumping up the use count. User must release the mm via mmput()
15321da177e4SLinus Torvalds * after use. Typically used by /proc and ptrace.
15331da177e4SLinus Torvalds */
get_task_mm(struct task_struct * task)15341da177e4SLinus Torvalds struct mm_struct *get_task_mm(struct task_struct *task)
15351da177e4SLinus Torvalds {
15361da177e4SLinus Torvalds struct mm_struct *mm;
15371da177e4SLinus Torvalds
15381da177e4SLinus Torvalds task_lock(task);
15391da177e4SLinus Torvalds mm = task->mm;
15401da177e4SLinus Torvalds if (mm) {
1541246bb0b1SOleg Nesterov if (task->flags & PF_KTHREAD)
15421da177e4SLinus Torvalds mm = NULL;
15431da177e4SLinus Torvalds else
15443fce371bSVegard Nossum mmget(mm);
15451da177e4SLinus Torvalds }
15461da177e4SLinus Torvalds task_unlock(task);
15471da177e4SLinus Torvalds return mm;
15481da177e4SLinus Torvalds }
15491da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(get_task_mm);
15501da177e4SLinus Torvalds
mm_access(struct task_struct * task,unsigned int mode)15518cdb878dSChristopher Yeoh struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
15528cdb878dSChristopher Yeoh {
15538cdb878dSChristopher Yeoh struct mm_struct *mm;
15548cdb878dSChristopher Yeoh int err;
15558cdb878dSChristopher Yeoh
1556f7cfd871SEric W. Biederman err = down_read_killable(&task->signal->exec_update_lock);
15578cdb878dSChristopher Yeoh if (err)
15588cdb878dSChristopher Yeoh return ERR_PTR(err);
15598cdb878dSChristopher Yeoh
15608cdb878dSChristopher Yeoh mm = get_task_mm(task);
15618cdb878dSChristopher Yeoh if (mm && mm != current->mm &&
15628cdb878dSChristopher Yeoh !ptrace_may_access(task, mode)) {
15638cdb878dSChristopher Yeoh mmput(mm);
15648cdb878dSChristopher Yeoh mm = ERR_PTR(-EACCES);
15658cdb878dSChristopher Yeoh }
1566f7cfd871SEric W. Biederman up_read(&task->signal->exec_update_lock);
15678cdb878dSChristopher Yeoh
15688cdb878dSChristopher Yeoh return mm;
15698cdb878dSChristopher Yeoh }
15708cdb878dSChristopher Yeoh
complete_vfork_done(struct task_struct * tsk)157157b59c4aSOleg Nesterov static void complete_vfork_done(struct task_struct *tsk)
1572c415c3b4SOleg Nesterov {
1573d68b46feSOleg Nesterov struct completion *vfork;
1574c415c3b4SOleg Nesterov
1575d68b46feSOleg Nesterov task_lock(tsk);
1576d68b46feSOleg Nesterov vfork = tsk->vfork_done;
1577d68b46feSOleg Nesterov if (likely(vfork)) {
1578c415c3b4SOleg Nesterov tsk->vfork_done = NULL;
1579d68b46feSOleg Nesterov complete(vfork);
1580d68b46feSOleg Nesterov }
1581d68b46feSOleg Nesterov task_unlock(tsk);
1582d68b46feSOleg Nesterov }
1583d68b46feSOleg Nesterov
wait_for_vfork_done(struct task_struct * child,struct completion * vfork)1584d68b46feSOleg Nesterov static int wait_for_vfork_done(struct task_struct *child,
1585d68b46feSOleg Nesterov struct completion *vfork)
1586d68b46feSOleg Nesterov {
1587f5d39b02SPeter Zijlstra unsigned int state = TASK_UNINTERRUPTIBLE|TASK_KILLABLE|TASK_FREEZABLE;
1588d68b46feSOleg Nesterov int killed;
1589d68b46feSOleg Nesterov
159076f969e8SRoman Gushchin cgroup_enter_frozen();
1591f5d39b02SPeter Zijlstra killed = wait_for_completion_state(vfork, state);
159276f969e8SRoman Gushchin cgroup_leave_frozen(false);
1593d68b46feSOleg Nesterov
1594d68b46feSOleg Nesterov if (killed) {
1595d68b46feSOleg Nesterov task_lock(child);
1596d68b46feSOleg Nesterov child->vfork_done = NULL;
1597d68b46feSOleg Nesterov task_unlock(child);
1598d68b46feSOleg Nesterov }
1599d68b46feSOleg Nesterov
1600d68b46feSOleg Nesterov put_task_struct(child);
1601d68b46feSOleg Nesterov return killed;
1602c415c3b4SOleg Nesterov }
1603c415c3b4SOleg Nesterov
16041da177e4SLinus Torvalds /* Please note the differences between mmput and mm_release.
16051da177e4SLinus Torvalds * mmput is called whenever we stop holding onto a mm_struct,
16061da177e4SLinus Torvalds * error success whatever.
16071da177e4SLinus Torvalds *
16081da177e4SLinus Torvalds * mm_release is called after a mm_struct has been removed
16091da177e4SLinus Torvalds * from the current process.
16101da177e4SLinus Torvalds *
16111da177e4SLinus Torvalds * This difference is important for error handling, when we
16121da177e4SLinus Torvalds * only half set up a mm_struct for a new process and need to restore
16131da177e4SLinus Torvalds * the old one. Because we mmput the new mm_struct before
16141da177e4SLinus Torvalds * restoring the old one. . .
16151da177e4SLinus Torvalds * Eric Biederman 10 January 1998
16161da177e4SLinus Torvalds */
mm_release(struct task_struct * tsk,struct mm_struct * mm)16174610ba7aSThomas Gleixner static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
16181da177e4SLinus Torvalds {
16190326f5a9SSrikar Dronamraju uprobe_free_utask(tsk);
16200326f5a9SSrikar Dronamraju
16211da177e4SLinus Torvalds /* Get rid of any cached register state */
16221da177e4SLinus Torvalds deactivate_mm(tsk, mm);
16231da177e4SLinus Torvalds
1624fec1d011SRoland McGrath /*
1625735f2770SMichal Hocko * Signal userspace if we're not exiting with a core dump
1626735f2770SMichal Hocko * because we want to leave the value intact for debugging
1627735f2770SMichal Hocko * purposes.
1628fec1d011SRoland McGrath */
16299c8a8228SEric Dumazet if (tsk->clear_child_tid) {
163092307383SEric W. Biederman if (atomic_read(&mm->mm_users) > 1) {
16311da177e4SLinus Torvalds /*
16321da177e4SLinus Torvalds * We don't check the error code - if userspace has
16331da177e4SLinus Torvalds * not set up a proper pointer then tough luck.
16341da177e4SLinus Torvalds */
16359c8a8228SEric Dumazet put_user(0, tsk->clear_child_tid);
16362de0db99SDominik Brodowski do_futex(tsk->clear_child_tid, FUTEX_WAKE,
16372de0db99SDominik Brodowski 1, NULL, NULL, 0, 0);
16389c8a8228SEric Dumazet }
16399c8a8228SEric Dumazet tsk->clear_child_tid = NULL;
16401da177e4SLinus Torvalds }
1641f7505d64SKonstantin Khlebnikov
1642f7505d64SKonstantin Khlebnikov /*
1643f7505d64SKonstantin Khlebnikov * All done, finally we can wake up parent and return this mm to him.
1644f7505d64SKonstantin Khlebnikov * Also kthread_stop() uses this completion for synchronization.
1645f7505d64SKonstantin Khlebnikov */
1646f7505d64SKonstantin Khlebnikov if (tsk->vfork_done)
1647f7505d64SKonstantin Khlebnikov complete_vfork_done(tsk);
16481da177e4SLinus Torvalds }
16491da177e4SLinus Torvalds
exit_mm_release(struct task_struct * tsk,struct mm_struct * mm)16504610ba7aSThomas Gleixner void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm)
16514610ba7aSThomas Gleixner {
1652150d7158SThomas Gleixner futex_exit_release(tsk);
16534610ba7aSThomas Gleixner mm_release(tsk, mm);
16544610ba7aSThomas Gleixner }
16554610ba7aSThomas Gleixner
exec_mm_release(struct task_struct * tsk,struct mm_struct * mm)16564610ba7aSThomas Gleixner void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm)
16574610ba7aSThomas Gleixner {
1658150d7158SThomas Gleixner futex_exec_release(tsk);
16594610ba7aSThomas Gleixner mm_release(tsk, mm);
16604610ba7aSThomas Gleixner }
16614610ba7aSThomas Gleixner
166213585fa0SNadav Amit /**
166313585fa0SNadav Amit * dup_mm() - duplicates an existing mm structure
166413585fa0SNadav Amit * @tsk: the task_struct with which the new mm will be associated.
166513585fa0SNadav Amit * @oldmm: the mm to duplicate.
166613585fa0SNadav Amit *
166713585fa0SNadav Amit * Allocates a new mm structure and duplicates the provided @oldmm structure
166813585fa0SNadav Amit * content into it.
166913585fa0SNadav Amit *
167013585fa0SNadav Amit * Return: the duplicated mm or NULL on failure.
1671a0a7ec30SJANAK DESAI */
dup_mm(struct task_struct * tsk,struct mm_struct * oldmm)167213585fa0SNadav Amit static struct mm_struct *dup_mm(struct task_struct *tsk,
167313585fa0SNadav Amit struct mm_struct *oldmm)
1674a0a7ec30SJANAK DESAI {
167513585fa0SNadav Amit struct mm_struct *mm;
1676a0a7ec30SJANAK DESAI int err;
1677a0a7ec30SJANAK DESAI
1678a0a7ec30SJANAK DESAI mm = allocate_mm();
1679a0a7ec30SJANAK DESAI if (!mm)
1680a0a7ec30SJANAK DESAI goto fail_nomem;
1681a0a7ec30SJANAK DESAI
1682a0a7ec30SJANAK DESAI memcpy(mm, oldmm, sizeof(*mm));
1683a0a7ec30SJANAK DESAI
1684bfedb589SEric W. Biederman if (!mm_init(mm, tsk, mm->user_ns))
1685a0a7ec30SJANAK DESAI goto fail_nomem;
1686a0a7ec30SJANAK DESAI
1687a0a7ec30SJANAK DESAI err = dup_mmap(mm, oldmm);
1688a0a7ec30SJANAK DESAI if (err)
1689a0a7ec30SJANAK DESAI goto free_pt;
1690a0a7ec30SJANAK DESAI
1691a0a7ec30SJANAK DESAI mm->hiwater_rss = get_mm_rss(mm);
1692a0a7ec30SJANAK DESAI mm->hiwater_vm = mm->total_vm;
1693a0a7ec30SJANAK DESAI
1694801460d0SHiroshi Shimamoto if (mm->binfmt && !try_module_get(mm->binfmt->module))
1695801460d0SHiroshi Shimamoto goto free_pt;
1696801460d0SHiroshi Shimamoto
1697a0a7ec30SJANAK DESAI return mm;
1698a0a7ec30SJANAK DESAI
1699a0a7ec30SJANAK DESAI free_pt:
1700801460d0SHiroshi Shimamoto /* don't put binfmt in mmput, we haven't got module yet */
1701801460d0SHiroshi Shimamoto mm->binfmt = NULL;
1702c3f3ce04SAndrea Arcangeli mm_init_owner(mm, NULL);
1703a0a7ec30SJANAK DESAI mmput(mm);
1704a0a7ec30SJANAK DESAI
1705a0a7ec30SJANAK DESAI fail_nomem:
1706a0a7ec30SJANAK DESAI return NULL;
1707a0a7ec30SJANAK DESAI }
1708a0a7ec30SJANAK DESAI
copy_mm(unsigned long clone_flags,struct task_struct * tsk)17091da177e4SLinus Torvalds static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
17101da177e4SLinus Torvalds {
17111da177e4SLinus Torvalds struct mm_struct *mm, *oldmm;
17121da177e4SLinus Torvalds
17131da177e4SLinus Torvalds tsk->min_flt = tsk->maj_flt = 0;
17141da177e4SLinus Torvalds tsk->nvcsw = tsk->nivcsw = 0;
171517406b82SMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK
171617406b82SMandeep Singh Baines tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
1717a2e51445SDmitry Vyukov tsk->last_switch_time = 0;
171817406b82SMandeep Singh Baines #endif
17191da177e4SLinus Torvalds
17201da177e4SLinus Torvalds tsk->mm = NULL;
17211da177e4SLinus Torvalds tsk->active_mm = NULL;
17221da177e4SLinus Torvalds
17231da177e4SLinus Torvalds /*
17241da177e4SLinus Torvalds * Are we cloning a kernel thread?
17251da177e4SLinus Torvalds *
17261da177e4SLinus Torvalds * We need to steal a active VM for that..
17271da177e4SLinus Torvalds */
17281da177e4SLinus Torvalds oldmm = current->mm;
17291da177e4SLinus Torvalds if (!oldmm)
17301da177e4SLinus Torvalds return 0;
17311da177e4SLinus Torvalds
17321da177e4SLinus Torvalds if (clone_flags & CLONE_VM) {
17333fce371bSVegard Nossum mmget(oldmm);
17341da177e4SLinus Torvalds mm = oldmm;
1735a6895399SRolf Eike Beer } else {
173613585fa0SNadav Amit mm = dup_mm(tsk, current->mm);
17371da177e4SLinus Torvalds if (!mm)
1738a6895399SRolf Eike Beer return -ENOMEM;
1739a6895399SRolf Eike Beer }
17401da177e4SLinus Torvalds
17411da177e4SLinus Torvalds tsk->mm = mm;
17421da177e4SLinus Torvalds tsk->active_mm = mm;
1743af7f588dSMathieu Desnoyers sched_mm_cid_fork(tsk);
17441da177e4SLinus Torvalds return 0;
17451da177e4SLinus Torvalds }
17461da177e4SLinus Torvalds
copy_fs(unsigned long clone_flags,struct task_struct * tsk)1747a39bc516SAlexey Dobriyan static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
17481da177e4SLinus Torvalds {
1749498052bbSAl Viro struct fs_struct *fs = current->fs;
17501da177e4SLinus Torvalds if (clone_flags & CLONE_FS) {
1751498052bbSAl Viro /* tsk->fs is already what we want */
17522a4419b5SNick Piggin spin_lock(&fs->lock);
1753498052bbSAl Viro if (fs->in_exec) {
17542a4419b5SNick Piggin spin_unlock(&fs->lock);
1755498052bbSAl Viro return -EAGAIN;
1756498052bbSAl Viro }
1757498052bbSAl Viro fs->users++;
17582a4419b5SNick Piggin spin_unlock(&fs->lock);
17591da177e4SLinus Torvalds return 0;
17601da177e4SLinus Torvalds }
1761498052bbSAl Viro tsk->fs = copy_fs_struct(fs);
17621da177e4SLinus Torvalds if (!tsk->fs)
17631da177e4SLinus Torvalds return -ENOMEM;
17641da177e4SLinus Torvalds return 0;
17651da177e4SLinus Torvalds }
17661da177e4SLinus Torvalds
copy_files(unsigned long clone_flags,struct task_struct * tsk,int no_files)176711f3f500SMike Christie static int copy_files(unsigned long clone_flags, struct task_struct *tsk,
176811f3f500SMike Christie int no_files)
1769a016f338SJANAK DESAI {
1770a016f338SJANAK DESAI struct files_struct *oldf, *newf;
1771a016f338SJANAK DESAI
1772a016f338SJANAK DESAI /*
1773a016f338SJANAK DESAI * A background process may not have any files ...
1774a016f338SJANAK DESAI */
1775a016f338SJANAK DESAI oldf = current->files;
1776a016f338SJANAK DESAI if (!oldf)
1777a8023f8bSAl Viro return 0;
1778a016f338SJANAK DESAI
177911f3f500SMike Christie if (no_files) {
178011f3f500SMike Christie tsk->files = NULL;
1781a8023f8bSAl Viro return 0;
178211f3f500SMike Christie }
178311f3f500SMike Christie
1784a016f338SJANAK DESAI if (clone_flags & CLONE_FILES) {
1785a016f338SJANAK DESAI atomic_inc(&oldf->count);
1786a8023f8bSAl Viro return 0;
1787a016f338SJANAK DESAI }
1788a016f338SJANAK DESAI
1789a8023f8bSAl Viro newf = dup_fd(oldf, NULL);
1790a8023f8bSAl Viro if (IS_ERR(newf))
1791a8023f8bSAl Viro return PTR_ERR(newf);
1792a016f338SJANAK DESAI
1793a016f338SJANAK DESAI tsk->files = newf;
1794a8023f8bSAl Viro return 0;
1795a016f338SJANAK DESAI }
1796a016f338SJANAK DESAI
copy_sighand(unsigned long clone_flags,struct task_struct * tsk)1797a39bc516SAlexey Dobriyan static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
17981da177e4SLinus Torvalds {
17991da177e4SLinus Torvalds struct sighand_struct *sig;
18001da177e4SLinus Torvalds
180160348802SZhaolei if (clone_flags & CLONE_SIGHAND) {
1802d036bda7SElena Reshetova refcount_inc(¤t->sighand->count);
18031da177e4SLinus Torvalds return 0;
18041da177e4SLinus Torvalds }
18051da177e4SLinus Torvalds sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
18060c282b06SMadhuparna Bhowmik RCU_INIT_POINTER(tsk->sighand, sig);
18071da177e4SLinus Torvalds if (!sig)
18081da177e4SLinus Torvalds return -ENOMEM;
18099d7fb042SPeter Zijlstra
1810d036bda7SElena Reshetova refcount_set(&sig->count, 1);
181106e62a46SJann Horn spin_lock_irq(¤t->sighand->siglock);
18121da177e4SLinus Torvalds memcpy(sig->action, current->sighand->action, sizeof(sig->action));
181306e62a46SJann Horn spin_unlock_irq(¤t->sighand->siglock);
1814b612e5dfSChristian Brauner
1815b612e5dfSChristian Brauner /* Reset all signal handler not set to SIG_IGN to SIG_DFL. */
1816b612e5dfSChristian Brauner if (clone_flags & CLONE_CLEAR_SIGHAND)
1817b612e5dfSChristian Brauner flush_signal_handlers(tsk, 0);
1818b612e5dfSChristian Brauner
18191da177e4SLinus Torvalds return 0;
18201da177e4SLinus Torvalds }
18211da177e4SLinus Torvalds
__cleanup_sighand(struct sighand_struct * sighand)1822a7e5328aSOleg Nesterov void __cleanup_sighand(struct sighand_struct *sighand)
1823c81addc9SOleg Nesterov {
1824d036bda7SElena Reshetova if (refcount_dec_and_test(&sighand->count)) {
1825d80e731eSOleg Nesterov signalfd_cleanup(sighand);
1826392809b2SOleg Nesterov /*
18275f0d5a3aSPaul E. McKenney * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
1828392809b2SOleg Nesterov * without an RCU grace period, see __lock_task_sighand().
1829392809b2SOleg Nesterov */
1830c81addc9SOleg Nesterov kmem_cache_free(sighand_cachep, sighand);
1831c81addc9SOleg Nesterov }
1832d80e731eSOleg Nesterov }
1833c81addc9SOleg Nesterov
1834f06febc9SFrank Mayhar /*
1835f06febc9SFrank Mayhar * Initialize POSIX timer handling for a thread group.
1836f06febc9SFrank Mayhar */
posix_cpu_timers_init_group(struct signal_struct * sig)1837f06febc9SFrank Mayhar static void posix_cpu_timers_init_group(struct signal_struct *sig)
1838f06febc9SFrank Mayhar {
18392b69942fSThomas Gleixner struct posix_cputimers *pct = &sig->posix_cputimers;
184078d7d407SJiri Slaby unsigned long cpu_limit;
184178d7d407SJiri Slaby
1842316c1608SJason Low cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
18433a245c0fSThomas Gleixner posix_cputimers_group_init(pct, cpu_limit);
18446279a751SOleg Nesterov }
18456279a751SOleg Nesterov
copy_signal(unsigned long clone_flags,struct task_struct * tsk)1846a39bc516SAlexey Dobriyan static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
18471da177e4SLinus Torvalds {
18481da177e4SLinus Torvalds struct signal_struct *sig;
18491da177e4SLinus Torvalds
18504ab6c083SOleg Nesterov if (clone_flags & CLONE_THREAD)
1851490dea45SPeter Zijlstra return 0;
18526279a751SOleg Nesterov
1853a56704efSVeaceslav Falico sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
18541da177e4SLinus Torvalds tsk->signal = sig;
18551da177e4SLinus Torvalds if (!sig)
18561da177e4SLinus Torvalds return -ENOMEM;
18571da177e4SLinus Torvalds
1858b3ac022cSOleg Nesterov sig->nr_threads = 1;
1859d80f7d7bSEric W. Biederman sig->quick_threads = 1;
18601da177e4SLinus Torvalds atomic_set(&sig->live, 1);
186160d4de3fSElena Reshetova refcount_set(&sig->sigcnt, 1);
18620c740d0aSOleg Nesterov
18630c740d0aSOleg Nesterov /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
18640c740d0aSOleg Nesterov sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
18650c740d0aSOleg Nesterov tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
18660c740d0aSOleg Nesterov
18671da177e4SLinus Torvalds init_waitqueue_head(&sig->wait_chldexit);
1868db51aeccSOleg Nesterov sig->curr_target = tsk;
18691da177e4SLinus Torvalds init_sigpending(&sig->shared_pending);
1870c3ad2c3bSEric W. Biederman INIT_HLIST_HEAD(&sig->multiprocess);
1871e78c3496SRik van Riel seqlock_init(&sig->stats_lock);
18729d7fb042SPeter Zijlstra prev_cputime_init(&sig->prev_cputime);
18731da177e4SLinus Torvalds
1874baa73d9eSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS
1875b18b6a9cSNicolas Pitre INIT_LIST_HEAD(&sig->posix_timers);
1876c9cb2e3dSThomas Gleixner hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
18771da177e4SLinus Torvalds sig->real_timer.function = it_real_fn;
1878baa73d9eSNicolas Pitre #endif
18791da177e4SLinus Torvalds
18801da177e4SLinus Torvalds task_lock(current->group_leader);
18811da177e4SLinus Torvalds memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
18821da177e4SLinus Torvalds task_unlock(current->group_leader);
18831da177e4SLinus Torvalds
18846279a751SOleg Nesterov posix_cpu_timers_init_group(sig);
18856279a751SOleg Nesterov
1886522ed776SMiloslav Trmac tty_audit_fork(sig);
18875091faa4SMike Galbraith sched_autogroup_fork(sig);
1888522ed776SMiloslav Trmac
1889a63d83f4SDavid Rientjes sig->oom_score_adj = current->signal->oom_score_adj;
1890dabb16f6SMandeep Singh Baines sig->oom_score_adj_min = current->signal->oom_score_adj_min;
189128b83c51SKOSAKI Motohiro
18929b1bf12dSKOSAKI Motohiro mutex_init(&sig->cred_guard_mutex);
1893f7cfd871SEric W. Biederman init_rwsem(&sig->exec_update_lock);
18949b1bf12dSKOSAKI Motohiro
18951da177e4SLinus Torvalds return 0;
18961da177e4SLinus Torvalds }
18971da177e4SLinus Torvalds
copy_seccomp(struct task_struct * p)1898dbd95212SKees Cook static void copy_seccomp(struct task_struct *p)
1899dbd95212SKees Cook {
1900dbd95212SKees Cook #ifdef CONFIG_SECCOMP
1901dbd95212SKees Cook /*
1902dbd95212SKees Cook * Must be called with sighand->lock held, which is common to
1903dbd95212SKees Cook * all threads in the group. Holding cred_guard_mutex is not
1904dbd95212SKees Cook * needed because this new task is not yet running and cannot
1905dbd95212SKees Cook * be racing exec.
1906dbd95212SKees Cook */
190769f6a34bSGuenter Roeck assert_spin_locked(¤t->sighand->siglock);
1908dbd95212SKees Cook
1909dbd95212SKees Cook /* Ref-count the new filter user, and assign it. */
1910dbd95212SKees Cook get_seccomp_filter(current);
1911dbd95212SKees Cook p->seccomp = current->seccomp;
1912dbd95212SKees Cook
1913dbd95212SKees Cook /*
1914dbd95212SKees Cook * Explicitly enable no_new_privs here in case it got set
1915dbd95212SKees Cook * between the task_struct being duplicated and holding the
1916dbd95212SKees Cook * sighand lock. The seccomp state and nnp must be in sync.
1917dbd95212SKees Cook */
1918dbd95212SKees Cook if (task_no_new_privs(current))
1919dbd95212SKees Cook task_set_no_new_privs(p);
1920dbd95212SKees Cook
1921dbd95212SKees Cook /*
1922dbd95212SKees Cook * If the parent gained a seccomp mode after copying thread
1923dbd95212SKees Cook * flags and between before we held the sighand lock, we have
1924dbd95212SKees Cook * to manually enable the seccomp thread flag here.
1925dbd95212SKees Cook */
1926dbd95212SKees Cook if (p->seccomp.mode != SECCOMP_MODE_DISABLED)
192723d67a54SGabriel Krisman Bertazi set_task_syscall_work(p, SECCOMP);
1928dbd95212SKees Cook #endif
1929dbd95212SKees Cook }
1930dbd95212SKees Cook
SYSCALL_DEFINE1(set_tid_address,int __user *,tidptr)193117da2bd9SHeiko Carstens SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
19321da177e4SLinus Torvalds {
19331da177e4SLinus Torvalds current->clear_child_tid = tidptr;
19341da177e4SLinus Torvalds
1935b488893aSPavel Emelyanov return task_pid_vnr(current);
19361da177e4SLinus Torvalds }
19371da177e4SLinus Torvalds
rt_mutex_init_task(struct task_struct * p)1938a39bc516SAlexey Dobriyan static void rt_mutex_init_task(struct task_struct *p)
193923f78d4aSIngo Molnar {
19401d615482SThomas Gleixner raw_spin_lock_init(&p->pi_lock);
1941e29e175bSZilvinas Valinskas #ifdef CONFIG_RT_MUTEXES
1942a23ba907SDavidlohr Bueso p->pi_waiters = RB_ROOT_CACHED;
1943e96a7705SXunlei Pang p->pi_top_task = NULL;
194423f78d4aSIngo Molnar p->pi_blocked_on = NULL;
194523f78d4aSIngo Molnar #endif
194623f78d4aSIngo Molnar }
194723f78d4aSIngo Molnar
init_task_pid_links(struct task_struct * task)19482c470475SEric W. Biederman static inline void init_task_pid_links(struct task_struct *task)
19492c470475SEric W. Biederman {
19502c470475SEric W. Biederman enum pid_type type;
19512c470475SEric W. Biederman
195296e1e984SAlexander Guril for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type)
19532c470475SEric W. Biederman INIT_HLIST_NODE(&task->pid_links[type]);
19542c470475SEric W. Biederman }
19552c470475SEric W. Biederman
195681907739SOleg Nesterov static inline void
init_task_pid(struct task_struct * task,enum pid_type type,struct pid * pid)195781907739SOleg Nesterov init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
195881907739SOleg Nesterov {
19592c470475SEric W. Biederman if (type == PIDTYPE_PID)
19602c470475SEric W. Biederman task->thread_pid = pid;
19612c470475SEric W. Biederman else
19622c470475SEric W. Biederman task->signal->pids[type] = pid;
196381907739SOleg Nesterov }
196481907739SOleg Nesterov
rcu_copy_process(struct task_struct * p)19656bfbaa51SIngo Molnar static inline void rcu_copy_process(struct task_struct *p)
19666bfbaa51SIngo Molnar {
19676bfbaa51SIngo Molnar #ifdef CONFIG_PREEMPT_RCU
19686bfbaa51SIngo Molnar p->rcu_read_lock_nesting = 0;
19696bfbaa51SIngo Molnar p->rcu_read_unlock_special.s = 0;
19706bfbaa51SIngo Molnar p->rcu_blocked_node = NULL;
19716bfbaa51SIngo Molnar INIT_LIST_HEAD(&p->rcu_node_entry);
19726bfbaa51SIngo Molnar #endif /* #ifdef CONFIG_PREEMPT_RCU */
19736bfbaa51SIngo Molnar #ifdef CONFIG_TASKS_RCU
19746bfbaa51SIngo Molnar p->rcu_tasks_holdout = false;
19756bfbaa51SIngo Molnar INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
19766bfbaa51SIngo Molnar p->rcu_tasks_idle_cpu = -1;
19777679283eSPaul E. McKenney INIT_LIST_HEAD(&p->rcu_tasks_exit_list);
19786bfbaa51SIngo Molnar #endif /* #ifdef CONFIG_TASKS_RCU */
1979d5f177d3SPaul E. McKenney #ifdef CONFIG_TASKS_TRACE_RCU
1980d5f177d3SPaul E. McKenney p->trc_reader_nesting = 0;
1981276c4104SPaul E. McKenney p->trc_reader_special.s = 0;
1982d5f177d3SPaul E. McKenney INIT_LIST_HEAD(&p->trc_holdout_list);
1983434c9eefSPaul E. McKenney INIT_LIST_HEAD(&p->trc_blkd_node);
1984d5f177d3SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
19856bfbaa51SIngo Molnar }
19866bfbaa51SIngo Molnar
pidfd_pid(const struct file * file)19873695eae5SChristian Brauner struct pid *pidfd_pid(const struct file *file)
19883695eae5SChristian Brauner {
19893695eae5SChristian Brauner if (file->f_op == &pidfd_fops)
19903695eae5SChristian Brauner return file->private_data;
19913695eae5SChristian Brauner
19923695eae5SChristian Brauner return ERR_PTR(-EBADF);
19933695eae5SChristian Brauner }
19943695eae5SChristian Brauner
pidfd_release(struct inode * inode,struct file * file)1995b3e58382SChristian Brauner static int pidfd_release(struct inode *inode, struct file *file)
1996b3e58382SChristian Brauner {
1997b3e58382SChristian Brauner struct pid *pid = file->private_data;
1998b3e58382SChristian Brauner
1999b3e58382SChristian Brauner file->private_data = NULL;
2000b3e58382SChristian Brauner put_pid(pid);
2001b3e58382SChristian Brauner return 0;
2002b3e58382SChristian Brauner }
2003b3e58382SChristian Brauner
2004b3e58382SChristian Brauner #ifdef CONFIG_PROC_FS
200515d42eb2SChristian Kellner /**
200615d42eb2SChristian Kellner * pidfd_show_fdinfo - print information about a pidfd
200715d42eb2SChristian Kellner * @m: proc fdinfo file
200815d42eb2SChristian Kellner * @f: file referencing a pidfd
200915d42eb2SChristian Kellner *
201015d42eb2SChristian Kellner * Pid:
201115d42eb2SChristian Kellner * This function will print the pid that a given pidfd refers to in the
201215d42eb2SChristian Kellner * pid namespace of the procfs instance.
201315d42eb2SChristian Kellner * If the pid namespace of the process is not a descendant of the pid
201415d42eb2SChristian Kellner * namespace of the procfs instance 0 will be shown as its pid. This is
201515d42eb2SChristian Kellner * similar to calling getppid() on a process whose parent is outside of
201615d42eb2SChristian Kellner * its pid namespace.
201715d42eb2SChristian Kellner *
201815d42eb2SChristian Kellner * NSpid:
201915d42eb2SChristian Kellner * If pid namespaces are supported then this function will also print
202015d42eb2SChristian Kellner * the pid of a given pidfd refers to for all descendant pid namespaces
202115d42eb2SChristian Kellner * starting from the current pid namespace of the instance, i.e. the
202215d42eb2SChristian Kellner * Pid field and the first entry in the NSpid field will be identical.
202315d42eb2SChristian Kellner * If the pid namespace of the process is not a descendant of the pid
202415d42eb2SChristian Kellner * namespace of the procfs instance 0 will be shown as its first NSpid
202515d42eb2SChristian Kellner * entry and no others will be shown.
202615d42eb2SChristian Kellner * Note that this differs from the Pid and NSpid fields in
202715d42eb2SChristian Kellner * /proc/<pid>/status where Pid and NSpid are always shown relative to
202815d42eb2SChristian Kellner * the pid namespace of the procfs instance. The difference becomes
202915d42eb2SChristian Kellner * obvious when sending around a pidfd between pid namespaces from a
2030a8ca6b13SXiaofeng Cao * different branch of the tree, i.e. where no ancestral relation is
203115d42eb2SChristian Kellner * present between the pid namespaces:
203215d42eb2SChristian Kellner * - create two new pid namespaces ns1 and ns2 in the initial pid
203315d42eb2SChristian Kellner * namespace (also take care to create new mount namespaces in the
203415d42eb2SChristian Kellner * new pid namespace and mount procfs)
203515d42eb2SChristian Kellner * - create a process with a pidfd in ns1
203615d42eb2SChristian Kellner * - send pidfd from ns1 to ns2
203715d42eb2SChristian Kellner * - read /proc/self/fdinfo/<pidfd> and observe that both Pid and NSpid
203815d42eb2SChristian Kellner * have exactly one entry, which is 0
203915d42eb2SChristian Kellner */
pidfd_show_fdinfo(struct seq_file * m,struct file * f)2040b3e58382SChristian Brauner static void pidfd_show_fdinfo(struct seq_file *m, struct file *f)
2041b3e58382SChristian Brauner {
2042b3e58382SChristian Brauner struct pid *pid = f->private_data;
20433d6d8da4SChristian Brauner struct pid_namespace *ns;
20443d6d8da4SChristian Brauner pid_t nr = -1;
2045b3e58382SChristian Brauner
20463d6d8da4SChristian Brauner if (likely(pid_has_task(pid, PIDTYPE_PID))) {
20479d78edeaSAlexey Gladkov ns = proc_pid_ns(file_inode(m->file)->i_sb);
20483d6d8da4SChristian Brauner nr = pid_nr_ns(pid, ns);
20493d6d8da4SChristian Brauner }
20503d6d8da4SChristian Brauner
20513d6d8da4SChristian Brauner seq_put_decimal_ll(m, "Pid:\t", nr);
205215d42eb2SChristian Kellner
205315d42eb2SChristian Kellner #ifdef CONFIG_PID_NS
20543d6d8da4SChristian Brauner seq_put_decimal_ll(m, "\nNSpid:\t", nr);
20553d6d8da4SChristian Brauner if (nr > 0) {
205615d42eb2SChristian Kellner int i;
205715d42eb2SChristian Kellner
205815d42eb2SChristian Kellner /* If nr is non-zero it means that 'pid' is valid and that
205915d42eb2SChristian Kellner * ns, i.e. the pid namespace associated with the procfs
206015d42eb2SChristian Kellner * instance, is in the pid namespace hierarchy of pid.
206115d42eb2SChristian Kellner * Start at one below the already printed level.
206215d42eb2SChristian Kellner */
206315d42eb2SChristian Kellner for (i = ns->level + 1; i <= pid->level; i++)
20643d6d8da4SChristian Brauner seq_put_decimal_ll(m, "\t", pid->numbers[i].nr);
206515d42eb2SChristian Kellner }
206615d42eb2SChristian Kellner #endif
2067b3e58382SChristian Brauner seq_putc(m, '\n');
2068b3e58382SChristian Brauner }
2069b3e58382SChristian Brauner #endif
2070b3e58382SChristian Brauner
2071b53b0b9dSJoel Fernandes (Google) /*
2072b53b0b9dSJoel Fernandes (Google) * Poll support for process exit notification.
2073b53b0b9dSJoel Fernandes (Google) */
pidfd_poll(struct file * file,struct poll_table_struct * pts)20749e77716aSLuc Van Oostenryck static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts)
2075b53b0b9dSJoel Fernandes (Google) {
2076b53b0b9dSJoel Fernandes (Google) struct pid *pid = file->private_data;
20779e77716aSLuc Van Oostenryck __poll_t poll_flags = 0;
2078b53b0b9dSJoel Fernandes (Google)
2079b53b0b9dSJoel Fernandes (Google) poll_wait(file, &pid->wait_pidfd, pts);
2080b53b0b9dSJoel Fernandes (Google)
2081b53b0b9dSJoel Fernandes (Google) /*
2082b53b0b9dSJoel Fernandes (Google) * Inform pollers only when the whole thread group exits.
2083b53b0b9dSJoel Fernandes (Google) * If the thread group leader exits before all other threads in the
2084b53b0b9dSJoel Fernandes (Google) * group, then poll(2) should block, similar to the wait(2) family.
2085b53b0b9dSJoel Fernandes (Google) */
208638fd525aSEric W. Biederman if (thread_group_exited(pid))
20879e77716aSLuc Van Oostenryck poll_flags = EPOLLIN | EPOLLRDNORM;
2088b53b0b9dSJoel Fernandes (Google)
2089b53b0b9dSJoel Fernandes (Google) return poll_flags;
2090b53b0b9dSJoel Fernandes (Google) }
2091b53b0b9dSJoel Fernandes (Google)
2092b3e58382SChristian Brauner const struct file_operations pidfd_fops = {
2093b3e58382SChristian Brauner .release = pidfd_release,
2094b53b0b9dSJoel Fernandes (Google) .poll = pidfd_poll,
2095b3e58382SChristian Brauner #ifdef CONFIG_PROC_FS
2096b3e58382SChristian Brauner .show_fdinfo = pidfd_show_fdinfo,
2097b3e58382SChristian Brauner #endif
2098b3e58382SChristian Brauner };
2099b3e58382SChristian Brauner
21006ae930d9SChristian Brauner /**
21016ae930d9SChristian Brauner * __pidfd_prepare - allocate a new pidfd_file and reserve a pidfd
21026ae930d9SChristian Brauner * @pid: the struct pid for which to create a pidfd
21036ae930d9SChristian Brauner * @flags: flags of the new @pidfd
21046ae930d9SChristian Brauner * @pidfd: the pidfd to return
21056ae930d9SChristian Brauner *
21066ae930d9SChristian Brauner * Allocate a new file that stashes @pid and reserve a new pidfd number in the
21076ae930d9SChristian Brauner * caller's file descriptor table. The pidfd is reserved but not installed yet.
21086ae930d9SChristian Brauner
21096ae930d9SChristian Brauner * The helper doesn't perform checks on @pid which makes it useful for pidfds
21106ae930d9SChristian Brauner * created via CLONE_PIDFD where @pid has no task attached when the pidfd and
21116ae930d9SChristian Brauner * pidfd file are prepared.
21126ae930d9SChristian Brauner *
21136ae930d9SChristian Brauner * If this function returns successfully the caller is responsible to either
21146ae930d9SChristian Brauner * call fd_install() passing the returned pidfd and pidfd file as arguments in
21156ae930d9SChristian Brauner * order to install the pidfd into its file descriptor table or they must use
21166ae930d9SChristian Brauner * put_unused_fd() and fput() on the returned pidfd and pidfd file
21176ae930d9SChristian Brauner * respectively.
21186ae930d9SChristian Brauner *
21196ae930d9SChristian Brauner * This function is useful when a pidfd must already be reserved but there
21206ae930d9SChristian Brauner * might still be points of failure afterwards and the caller wants to ensure
21216ae930d9SChristian Brauner * that no pidfd is leaked into its file descriptor table.
21226ae930d9SChristian Brauner *
21236ae930d9SChristian Brauner * Return: On success, a reserved pidfd is returned from the function and a new
21246ae930d9SChristian Brauner * pidfd file is returned in the last argument to the function. On
21256ae930d9SChristian Brauner * error, a negative error code is returned from the function and the
21266ae930d9SChristian Brauner * last argument remains unchanged.
21276ae930d9SChristian Brauner */
__pidfd_prepare(struct pid * pid,unsigned int flags,struct file ** ret)21286ae930d9SChristian Brauner static int __pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret)
21296ae930d9SChristian Brauner {
21306ae930d9SChristian Brauner int pidfd;
21316ae930d9SChristian Brauner struct file *pidfd_file;
21326ae930d9SChristian Brauner
21336ae930d9SChristian Brauner if (flags & ~(O_NONBLOCK | O_RDWR | O_CLOEXEC))
21346ae930d9SChristian Brauner return -EINVAL;
21356ae930d9SChristian Brauner
21366ae930d9SChristian Brauner pidfd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
21376ae930d9SChristian Brauner if (pidfd < 0)
21386ae930d9SChristian Brauner return pidfd;
21396ae930d9SChristian Brauner
21406ae930d9SChristian Brauner pidfd_file = anon_inode_getfile("[pidfd]", &pidfd_fops, pid,
21416ae930d9SChristian Brauner flags | O_RDWR | O_CLOEXEC);
21426ae930d9SChristian Brauner if (IS_ERR(pidfd_file)) {
21436ae930d9SChristian Brauner put_unused_fd(pidfd);
21446ae930d9SChristian Brauner return PTR_ERR(pidfd_file);
21456ae930d9SChristian Brauner }
21466ae930d9SChristian Brauner get_pid(pid); /* held by pidfd_file now */
21476ae930d9SChristian Brauner *ret = pidfd_file;
21486ae930d9SChristian Brauner return pidfd;
21496ae930d9SChristian Brauner }
21506ae930d9SChristian Brauner
21516ae930d9SChristian Brauner /**
21526ae930d9SChristian Brauner * pidfd_prepare - allocate a new pidfd_file and reserve a pidfd
21536ae930d9SChristian Brauner * @pid: the struct pid for which to create a pidfd
21546ae930d9SChristian Brauner * @flags: flags of the new @pidfd
21556ae930d9SChristian Brauner * @pidfd: the pidfd to return
21566ae930d9SChristian Brauner *
21576ae930d9SChristian Brauner * Allocate a new file that stashes @pid and reserve a new pidfd number in the
21586ae930d9SChristian Brauner * caller's file descriptor table. The pidfd is reserved but not installed yet.
21596ae930d9SChristian Brauner *
21606ae930d9SChristian Brauner * The helper verifies that @pid is used as a thread group leader.
21616ae930d9SChristian Brauner *
21626ae930d9SChristian Brauner * If this function returns successfully the caller is responsible to either
21636ae930d9SChristian Brauner * call fd_install() passing the returned pidfd and pidfd file as arguments in
21646ae930d9SChristian Brauner * order to install the pidfd into its file descriptor table or they must use
21656ae930d9SChristian Brauner * put_unused_fd() and fput() on the returned pidfd and pidfd file
21666ae930d9SChristian Brauner * respectively.
21676ae930d9SChristian Brauner *
21686ae930d9SChristian Brauner * This function is useful when a pidfd must already be reserved but there
21696ae930d9SChristian Brauner * might still be points of failure afterwards and the caller wants to ensure
21706ae930d9SChristian Brauner * that no pidfd is leaked into its file descriptor table.
21716ae930d9SChristian Brauner *
21726ae930d9SChristian Brauner * Return: On success, a reserved pidfd is returned from the function and a new
21736ae930d9SChristian Brauner * pidfd file is returned in the last argument to the function. On
21746ae930d9SChristian Brauner * error, a negative error code is returned from the function and the
21756ae930d9SChristian Brauner * last argument remains unchanged.
21766ae930d9SChristian Brauner */
pidfd_prepare(struct pid * pid,unsigned int flags,struct file ** ret)21776ae930d9SChristian Brauner int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret)
21786ae930d9SChristian Brauner {
21796ae930d9SChristian Brauner if (!pid || !pid_has_task(pid, PIDTYPE_TGID))
21806ae930d9SChristian Brauner return -EINVAL;
21816ae930d9SChristian Brauner
21826ae930d9SChristian Brauner return __pidfd_prepare(pid, flags, ret);
21836ae930d9SChristian Brauner }
21846ae930d9SChristian Brauner
__delayed_free_task(struct rcu_head * rhp)2185c3f3ce04SAndrea Arcangeli static void __delayed_free_task(struct rcu_head *rhp)
2186c3f3ce04SAndrea Arcangeli {
2187c3f3ce04SAndrea Arcangeli struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
2188c3f3ce04SAndrea Arcangeli
2189c3f3ce04SAndrea Arcangeli free_task(tsk);
2190c3f3ce04SAndrea Arcangeli }
2191c3f3ce04SAndrea Arcangeli
delayed_free_task(struct task_struct * tsk)2192c3f3ce04SAndrea Arcangeli static __always_inline void delayed_free_task(struct task_struct *tsk)
2193c3f3ce04SAndrea Arcangeli {
2194c3f3ce04SAndrea Arcangeli if (IS_ENABLED(CONFIG_MEMCG))
2195c3f3ce04SAndrea Arcangeli call_rcu(&tsk->rcu, __delayed_free_task);
2196c3f3ce04SAndrea Arcangeli else
2197c3f3ce04SAndrea Arcangeli free_task(tsk);
2198c3f3ce04SAndrea Arcangeli }
2199c3f3ce04SAndrea Arcangeli
copy_oom_score_adj(u64 clone_flags,struct task_struct * tsk)220067197a4fSSuren Baghdasaryan static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk)
220167197a4fSSuren Baghdasaryan {
220267197a4fSSuren Baghdasaryan /* Skip if kernel thread */
220367197a4fSSuren Baghdasaryan if (!tsk->mm)
220467197a4fSSuren Baghdasaryan return;
220567197a4fSSuren Baghdasaryan
220667197a4fSSuren Baghdasaryan /* Skip if spawning a thread or using vfork */
220767197a4fSSuren Baghdasaryan if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM)
220867197a4fSSuren Baghdasaryan return;
220967197a4fSSuren Baghdasaryan
221067197a4fSSuren Baghdasaryan /* We need to synchronize with __set_oom_adj */
221167197a4fSSuren Baghdasaryan mutex_lock(&oom_adj_mutex);
221267197a4fSSuren Baghdasaryan set_bit(MMF_MULTIPROCESS, &tsk->mm->flags);
221367197a4fSSuren Baghdasaryan /* Update the values in case they were changed after copy_signal */
221467197a4fSSuren Baghdasaryan tsk->signal->oom_score_adj = current->signal->oom_score_adj;
221567197a4fSSuren Baghdasaryan tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min;
221667197a4fSSuren Baghdasaryan mutex_unlock(&oom_adj_mutex);
221767197a4fSSuren Baghdasaryan }
221867197a4fSSuren Baghdasaryan
221979257534SDaniel Bristot de Oliveira #ifdef CONFIG_RV
rv_task_fork(struct task_struct * p)222079257534SDaniel Bristot de Oliveira static void rv_task_fork(struct task_struct *p)
222179257534SDaniel Bristot de Oliveira {
222279257534SDaniel Bristot de Oliveira int i;
222379257534SDaniel Bristot de Oliveira
222479257534SDaniel Bristot de Oliveira for (i = 0; i < RV_PER_TASK_MONITORS; i++)
222579257534SDaniel Bristot de Oliveira p->rv[i].da_mon.monitoring = false;
222679257534SDaniel Bristot de Oliveira }
222779257534SDaniel Bristot de Oliveira #else
222879257534SDaniel Bristot de Oliveira #define rv_task_fork(p) do {} while (0)
222979257534SDaniel Bristot de Oliveira #endif
223079257534SDaniel Bristot de Oliveira
2231f06febc9SFrank Mayhar /*
22321da177e4SLinus Torvalds * This creates a new process as a copy of the old one,
22331da177e4SLinus Torvalds * but does not actually start it yet.
22341da177e4SLinus Torvalds *
22351da177e4SLinus Torvalds * It copies the registers, and all the appropriate
22361da177e4SLinus Torvalds * parts of the process environment (as per the clone
22371da177e4SLinus Torvalds * flags). The actual kick-off is left to the caller.
22381da177e4SLinus Torvalds */
copy_process(struct pid * pid,int trace,int node,struct kernel_clone_args * args)223989c8e98dSMike Christie __latent_entropy struct task_struct *copy_process(
224009a05394SRoland McGrath struct pid *pid,
22413033f14aSJosh Triplett int trace,
22427f192e3cSChristian Brauner int node,
22437f192e3cSChristian Brauner struct kernel_clone_args *args)
22441da177e4SLinus Torvalds {
2245b3e58382SChristian Brauner int pidfd = -1, retval;
2246a24efe62SMariusz Kozlowski struct task_struct *p;
2247c3ad2c3bSEric W. Biederman struct multiprocess_signals delayed;
22486fd2fe49SAl Viro struct file *pidfile = NULL;
2249c5febea0SEric W. Biederman const u64 clone_flags = args->flags;
2250769071acSAndrei Vagin struct nsproxy *nsp = current->nsproxy;
22511da177e4SLinus Torvalds
2252667b6094SMarcos Paulo de Souza /*
2253667b6094SMarcos Paulo de Souza * Don't allow sharing the root directory with processes in a different
2254667b6094SMarcos Paulo de Souza * namespace
2255667b6094SMarcos Paulo de Souza */
22561da177e4SLinus Torvalds if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
22571da177e4SLinus Torvalds return ERR_PTR(-EINVAL);
22581da177e4SLinus Torvalds
2259e66eded8SEric W. Biederman if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
2260e66eded8SEric W. Biederman return ERR_PTR(-EINVAL);
2261e66eded8SEric W. Biederman
22621da177e4SLinus Torvalds /*
22631da177e4SLinus Torvalds * Thread groups must share signals as well, and detached threads
22641da177e4SLinus Torvalds * can only be started up within the thread group.
22651da177e4SLinus Torvalds */
22661da177e4SLinus Torvalds if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
22671da177e4SLinus Torvalds return ERR_PTR(-EINVAL);
22681da177e4SLinus Torvalds
22691da177e4SLinus Torvalds /*
22701da177e4SLinus Torvalds * Shared signal handlers imply shared VM. By way of the above,
22711da177e4SLinus Torvalds * thread groups also imply shared VM. Blocking this case allows
22721da177e4SLinus Torvalds * for various simplifications in other code.
22731da177e4SLinus Torvalds */
22741da177e4SLinus Torvalds if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
22751da177e4SLinus Torvalds return ERR_PTR(-EINVAL);
22761da177e4SLinus Torvalds
2277123be07bSSukadev Bhattiprolu /*
2278123be07bSSukadev Bhattiprolu * Siblings of global init remain as zombies on exit since they are
2279123be07bSSukadev Bhattiprolu * not reaped by their parent (swapper). To solve this and to avoid
2280123be07bSSukadev Bhattiprolu * multi-rooted process trees, prevent global and container-inits
2281123be07bSSukadev Bhattiprolu * from creating siblings.
2282123be07bSSukadev Bhattiprolu */
2283123be07bSSukadev Bhattiprolu if ((clone_flags & CLONE_PARENT) &&
2284123be07bSSukadev Bhattiprolu current->signal->flags & SIGNAL_UNKILLABLE)
2285123be07bSSukadev Bhattiprolu return ERR_PTR(-EINVAL);
2286123be07bSSukadev Bhattiprolu
22878382fcacSEric W. Biederman /*
228840a0d32dSOleg Nesterov * If the new process will be in a different pid or user namespace
2289faf00da5SEric W. Biederman * do not allow it to share a thread group with the forking task.
22908382fcacSEric W. Biederman */
2291faf00da5SEric W. Biederman if (clone_flags & CLONE_THREAD) {
229240a0d32dSOleg Nesterov if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
2293769071acSAndrei Vagin (task_active_pid_ns(current) != nsp->pid_ns_for_children))
2294769071acSAndrei Vagin return ERR_PTR(-EINVAL);
2295769071acSAndrei Vagin }
2296769071acSAndrei Vagin
2297b3e58382SChristian Brauner if (clone_flags & CLONE_PIDFD) {
2298b3e58382SChristian Brauner /*
2299b3e58382SChristian Brauner * - CLONE_DETACHED is blocked so that we can potentially
2300b3e58382SChristian Brauner * reuse it later for CLONE_PIDFD.
2301b3e58382SChristian Brauner * - CLONE_THREAD is blocked until someone really needs it.
2302b3e58382SChristian Brauner */
23037f192e3cSChristian Brauner if (clone_flags & (CLONE_DETACHED | CLONE_THREAD))
2304b3e58382SChristian Brauner return ERR_PTR(-EINVAL);
2305b3e58382SChristian Brauner }
2306b3e58382SChristian Brauner
2307c3ad2c3bSEric W. Biederman /*
2308c3ad2c3bSEric W. Biederman * Force any signals received before this point to be delivered
2309c3ad2c3bSEric W. Biederman * before the fork happens. Collect up signals sent to multiple
2310c3ad2c3bSEric W. Biederman * processes that happen during the fork and delay them so that
2311c3ad2c3bSEric W. Biederman * they appear to happen after the fork.
2312c3ad2c3bSEric W. Biederman */
2313c3ad2c3bSEric W. Biederman sigemptyset(&delayed.signal);
2314c3ad2c3bSEric W. Biederman INIT_HLIST_NODE(&delayed.node);
2315c3ad2c3bSEric W. Biederman
2316c3ad2c3bSEric W. Biederman spin_lock_irq(¤t->sighand->siglock);
2317c3ad2c3bSEric W. Biederman if (!(clone_flags & CLONE_THREAD))
2318c3ad2c3bSEric W. Biederman hlist_add_head(&delayed.node, ¤t->signal->multiprocess);
2319c3ad2c3bSEric W. Biederman recalc_sigpending();
2320c3ad2c3bSEric W. Biederman spin_unlock_irq(¤t->sighand->siglock);
2321c3ad2c3bSEric W. Biederman retval = -ERESTARTNOINTR;
232266ae0d1eSJens Axboe if (task_sigpending(current))
2323c3ad2c3bSEric W. Biederman goto fork_out;
2324c3ad2c3bSEric W. Biederman
23251da177e4SLinus Torvalds retval = -ENOMEM;
2326725fc629SAndi Kleen p = dup_task_struct(current, node);
23271da177e4SLinus Torvalds if (!p)
23281da177e4SLinus Torvalds goto fork_out;
2329753550ebSEric W. Biederman p->flags &= ~PF_KTHREAD;
2330753550ebSEric W. Biederman if (args->kthread)
2331753550ebSEric W. Biederman p->flags |= PF_KTHREAD;
2332f9010dbdSMike Christie if (args->user_worker) {
2333b16b3855SJens Axboe /*
2334f9010dbdSMike Christie * Mark us a user worker, and block any signal that isn't
2335b16b3855SJens Axboe * fatal or STOP
2336b16b3855SJens Axboe */
2337f9010dbdSMike Christie p->flags |= PF_USER_WORKER;
2338b16b3855SJens Axboe siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP));
2339b16b3855SJens Axboe }
2340f9010dbdSMike Christie if (args->io_thread)
2341f9010dbdSMike Christie p->flags |= PF_IO_WORKER;
23421da177e4SLinus Torvalds
2343cf587db2SMike Christie if (args->name)
2344cf587db2SMike Christie strscpy_pad(p->comm, args->name, sizeof(p->comm));
2345cf587db2SMike Christie
23467f192e3cSChristian Brauner p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL;
23474d6501dcSVegard Nossum /*
23484d6501dcSVegard Nossum * Clear TID on mm_release()?
23494d6501dcSVegard Nossum */
23507f192e3cSChristian Brauner p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? args->child_tid : NULL;
23514d6501dcSVegard Nossum
2352f7e8b616SSteven Rostedt ftrace_graph_init_task(p);
2353f7e8b616SSteven Rostedt
2354bea493a0SPeter Zijlstra rt_mutex_init_task(p);
2355bea493a0SPeter Zijlstra
2356a21ee605SPeter Zijlstra lockdep_assert_irqs_enabled();
2357d12c1a37SIngo Molnar #ifdef CONFIG_PROVE_LOCKING
2358de30a2b3SIngo Molnar DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
2359de30a2b3SIngo Molnar #endif
23608f2f9c4dSEric W. Biederman retval = copy_creds(p, clone_flags);
23618f2f9c4dSEric W. Biederman if (retval < 0)
23628f2f9c4dSEric W. Biederman goto bad_fork_free;
23638f2f9c4dSEric W. Biederman
23641da177e4SLinus Torvalds retval = -EAGAIN;
2365de399236SAlexey Gladkov if (is_rlimit_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) {
2366b57922b6SEric Paris if (p->real_cred->user != INIT_USER &&
2367b57922b6SEric Paris !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
23688f2f9c4dSEric W. Biederman goto bad_fork_cleanup_count;
23691da177e4SLinus Torvalds }
237072fa5997SVasiliy Kulikov current->flags &= ~PF_NPROC_EXCEEDED;
23711da177e4SLinus Torvalds
23721da177e4SLinus Torvalds /*
23731da177e4SLinus Torvalds * If multiple threads are within copy_process(), then this check
23741da177e4SLinus Torvalds * triggers too late. This doesn't hurt, the check is only there
23751da177e4SLinus Torvalds * to stop root fork bombs.
23761da177e4SLinus Torvalds */
237704ec93feSLi Zefan retval = -EAGAIN;
2378c17d1a3aSWeilong Chen if (data_race(nr_threads >= max_threads))
23791da177e4SLinus Torvalds goto bad_fork_cleanup_count;
23801da177e4SLinus Torvalds
2381ca74e92bSShailabh Nagar delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
2382a8ea6fc9SFrederic Weisbecker p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE | PF_NO_SETAFFINITY);
2383514ddb44SDavid Rientjes p->flags |= PF_FORKNOEXEC;
23841da177e4SLinus Torvalds INIT_LIST_HEAD(&p->children);
23851da177e4SLinus Torvalds INIT_LIST_HEAD(&p->sibling);
2386f41d911fSPaul E. McKenney rcu_copy_process(p);
23871da177e4SLinus Torvalds p->vfork_done = NULL;
23881da177e4SLinus Torvalds spin_lock_init(&p->alloc_lock);
23891da177e4SLinus Torvalds
23901da177e4SLinus Torvalds init_sigpending(&p->pending);
23911da177e4SLinus Torvalds
239264861634SMartin Schwidefsky p->utime = p->stime = p->gtime = 0;
239340565b5aSStanislaw Gruszka #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
239464861634SMartin Schwidefsky p->utimescaled = p->stimescaled = 0;
239540565b5aSStanislaw Gruszka #endif
23969d7fb042SPeter Zijlstra prev_cputime_init(&p->prev_cputime);
23979d7fb042SPeter Zijlstra
23986a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2399bac5b6b6SFrederic Weisbecker seqcount_init(&p->vtime.seqcount);
2400bac5b6b6SFrederic Weisbecker p->vtime.starttime = 0;
2401bac5b6b6SFrederic Weisbecker p->vtime.state = VTIME_INACTIVE;
24026a61671bSFrederic Weisbecker #endif
24036a61671bSFrederic Weisbecker
24040f212204SJens Axboe #ifdef CONFIG_IO_URING
24050f212204SJens Axboe p->io_uring = NULL;
24060f212204SJens Axboe #endif
24070f212204SJens Axboe
2408a3a2e76cSKAMEZAWA Hiroyuki #if defined(SPLIT_RSS_COUNTING)
2409a3a2e76cSKAMEZAWA Hiroyuki memset(&p->rss_stat, 0, sizeof(p->rss_stat));
2410a3a2e76cSKAMEZAWA Hiroyuki #endif
2411172ba844SBalbir Singh
24126976675dSArjan van de Ven p->default_timer_slack_ns = current->timer_slack_ns;
24136976675dSArjan van de Ven
2414eb414681SJohannes Weiner #ifdef CONFIG_PSI
2415eb414681SJohannes Weiner p->psi_flags = 0;
2416eb414681SJohannes Weiner #endif
2417eb414681SJohannes Weiner
24185995477aSAndrea Righi task_io_accounting_init(&p->ioac);
24191da177e4SLinus Torvalds acct_clear_integrals(p);
24201da177e4SLinus Torvalds
24213a245c0fSThomas Gleixner posix_cputimers_init(&p->posix_cputimers);
2422*203003c4SBenjamin Segall tick_dep_init_task(p);
24231da177e4SLinus Torvalds
24241da177e4SLinus Torvalds p->io_context = NULL;
2425c0b0ae8aSRichard Guy Briggs audit_set_context(p, NULL);
2426b4f48b63SPaul Menage cgroup_fork(p);
2427343f4c49SEric W. Biederman if (args->kthread) {
242840966e31SEric W. Biederman if (!set_kthread_struct(p))
2429ff8288ffSEric W. Biederman goto bad_fork_cleanup_delayacct;
243040966e31SEric W. Biederman }
24311da177e4SLinus Torvalds #ifdef CONFIG_NUMA
2432846a16bfSLee Schermerhorn p->mempolicy = mpol_dup(p->mempolicy);
24331da177e4SLinus Torvalds if (IS_ERR(p->mempolicy)) {
24341da177e4SLinus Torvalds retval = PTR_ERR(p->mempolicy);
24351da177e4SLinus Torvalds p->mempolicy = NULL;
2436ff8288ffSEric W. Biederman goto bad_fork_cleanup_delayacct;
24371da177e4SLinus Torvalds }
24381da177e4SLinus Torvalds #endif
2439778d3b0fSMichal Hocko #ifdef CONFIG_CPUSETS
2440778d3b0fSMichal Hocko p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
2441778d3b0fSMichal Hocko p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
2442b7505861SAhmed S. Darwish seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock);
2443778d3b0fSMichal Hocko #endif
2444de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS
24450584df9cSMarco Elver memset(&p->irqtrace, 0, sizeof(p->irqtrace));
24460584df9cSMarco Elver p->irqtrace.hardirq_disable_ip = _THIS_IP_;
24470584df9cSMarco Elver p->irqtrace.softirq_enable_ip = _THIS_IP_;
2448de30a2b3SIngo Molnar p->softirqs_enabled = 1;
2449de30a2b3SIngo Molnar p->softirq_context = 0;
2450de30a2b3SIngo Molnar #endif
24518bcbde54SDavid Hildenbrand
24528bcbde54SDavid Hildenbrand p->pagefault_disabled = 0;
24538bcbde54SDavid Hildenbrand
2454fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP
2455b09be676SByungchul Park lockdep_init_task(p);
2456fbb9ce95SIngo Molnar #endif
24571da177e4SLinus Torvalds
2458408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES
2459408894eeSIngo Molnar p->blocked_on = NULL; /* not blocked yet */
2460408894eeSIngo Molnar #endif
2461cafe5635SKent Overstreet #ifdef CONFIG_BCACHE
2462cafe5635SKent Overstreet p->sequential_io = 0;
2463cafe5635SKent Overstreet p->sequential_io_avg = 0;
2464cafe5635SKent Overstreet #endif
2465a10787e6SSong Liu #ifdef CONFIG_BPF_SYSCALL
2466a10787e6SSong Liu RCU_INIT_POINTER(p->bpf_storage, NULL);
2467c7603cfaSAndrii Nakryiko p->bpf_ctx = NULL;
2468a10787e6SSong Liu #endif
24690f481406SMarkus Metzger
24703c90e6e9SSrivatsa Vaddagiri /* Perform scheduler related setup. Assign this task to a CPU. */
2471aab03e05SDario Faggioli retval = sched_fork(clone_flags, p);
2472aab03e05SDario Faggioli if (retval)
2473aab03e05SDario Faggioli goto bad_fork_cleanup_policy;
24746ab423e0SPeter Zijlstra
24752b26f0aaSMarco Elver retval = perf_event_init_task(p, clone_flags);
24766ab423e0SPeter Zijlstra if (retval)
24776ab423e0SPeter Zijlstra goto bad_fork_cleanup_policy;
2478fb0a685cSDaniel Rebelo de Oliveira retval = audit_alloc(p);
2479fb0a685cSDaniel Rebelo de Oliveira if (retval)
24806c72e350SPeter Zijlstra goto bad_fork_cleanup_perf;
24811da177e4SLinus Torvalds /* copy all the process information */
2482ab602f79SJack Miller shm_init_task(p);
2483e4e55b47STetsuo Handa retval = security_task_alloc(p, clone_flags);
2484fb0a685cSDaniel Rebelo de Oliveira if (retval)
24851da177e4SLinus Torvalds goto bad_fork_cleanup_audit;
2486e4e55b47STetsuo Handa retval = copy_semundo(clone_flags, p);
2487e4e55b47STetsuo Handa if (retval)
2488e4e55b47STetsuo Handa goto bad_fork_cleanup_security;
248911f3f500SMike Christie retval = copy_files(clone_flags, p, args->no_files);
2490fb0a685cSDaniel Rebelo de Oliveira if (retval)
24911da177e4SLinus Torvalds goto bad_fork_cleanup_semundo;
2492fb0a685cSDaniel Rebelo de Oliveira retval = copy_fs(clone_flags, p);
2493fb0a685cSDaniel Rebelo de Oliveira if (retval)
24941da177e4SLinus Torvalds goto bad_fork_cleanup_files;
2495fb0a685cSDaniel Rebelo de Oliveira retval = copy_sighand(clone_flags, p);
2496fb0a685cSDaniel Rebelo de Oliveira if (retval)
24971da177e4SLinus Torvalds goto bad_fork_cleanup_fs;
2498fb0a685cSDaniel Rebelo de Oliveira retval = copy_signal(clone_flags, p);
2499fb0a685cSDaniel Rebelo de Oliveira if (retval)
25001da177e4SLinus Torvalds goto bad_fork_cleanup_sighand;
2501fb0a685cSDaniel Rebelo de Oliveira retval = copy_mm(clone_flags, p);
2502fb0a685cSDaniel Rebelo de Oliveira if (retval)
25031da177e4SLinus Torvalds goto bad_fork_cleanup_signal;
2504fb0a685cSDaniel Rebelo de Oliveira retval = copy_namespaces(clone_flags, p);
2505fb0a685cSDaniel Rebelo de Oliveira if (retval)
2506d84f4f99SDavid Howells goto bad_fork_cleanup_mm;
2507fb0a685cSDaniel Rebelo de Oliveira retval = copy_io(clone_flags, p);
2508fb0a685cSDaniel Rebelo de Oliveira if (retval)
2509fd0928dfSJens Axboe goto bad_fork_cleanup_namespaces;
2510c5febea0SEric W. Biederman retval = copy_thread(p, args);
25111da177e4SLinus Torvalds if (retval)
2512fd0928dfSJens Axboe goto bad_fork_cleanup_io;
25131da177e4SLinus Torvalds
2514afaef01cSAlexander Popov stackleak_task_init(p);
2515afaef01cSAlexander Popov
2516425fb2b4SPavel Emelyanov if (pid != &init_struct_pid) {
251749cb2fc4SAdrian Reber pid = alloc_pid(p->nsproxy->pid_ns_for_children, args->set_tid,
251849cb2fc4SAdrian Reber args->set_tid_size);
251935f71bc0SMichal Hocko if (IS_ERR(pid)) {
252035f71bc0SMichal Hocko retval = PTR_ERR(pid);
25210740aa5fSJiri Slaby goto bad_fork_cleanup_thread;
2522425fb2b4SPavel Emelyanov }
252335f71bc0SMichal Hocko }
2524425fb2b4SPavel Emelyanov
2525b3e58382SChristian Brauner /*
2526b3e58382SChristian Brauner * This has to happen after we've potentially unshared the file
2527b3e58382SChristian Brauner * descriptor table (so that the pidfd doesn't leak into the child
2528b3e58382SChristian Brauner * if the fd table isn't shared).
2529b3e58382SChristian Brauner */
2530b3e58382SChristian Brauner if (clone_flags & CLONE_PIDFD) {
2531ca7707f5SChristian Brauner /* Note that no task has been attached to @pid yet. */
2532ca7707f5SChristian Brauner retval = __pidfd_prepare(pid, O_RDWR | O_CLOEXEC, &pidfile);
2533b3e58382SChristian Brauner if (retval < 0)
2534b3e58382SChristian Brauner goto bad_fork_free_pid;
2535b3e58382SChristian Brauner pidfd = retval;
25366fd2fe49SAl Viro
25377f192e3cSChristian Brauner retval = put_user(pidfd, args->pidfd);
2538b3e58382SChristian Brauner if (retval)
2539b3e58382SChristian Brauner goto bad_fork_put_pidfd;
2540b3e58382SChristian Brauner }
2541b3e58382SChristian Brauner
254273c10101SJens Axboe #ifdef CONFIG_BLOCK
254373c10101SJens Axboe p->plug = NULL;
254473c10101SJens Axboe #endif
2545ba31c1a4SThomas Gleixner futex_init_task(p);
2546ba31c1a4SThomas Gleixner
25471da177e4SLinus Torvalds /*
2548f9a3879aSGOTO Masanori * sigaltstack should be cleared when sharing the same VM
2549f9a3879aSGOTO Masanori */
2550f9a3879aSGOTO Masanori if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
25512a742138SStas Sergeev sas_ss_reset(p);
2552f9a3879aSGOTO Masanori
2553f9a3879aSGOTO Masanori /*
25546580807dSOleg Nesterov * Syscall tracing and stepping should be turned off in the
25556580807dSOleg Nesterov * child regardless of CLONE_PTRACE.
25561da177e4SLinus Torvalds */
25576580807dSOleg Nesterov user_disable_single_step(p);
255864c19ba2SGabriel Krisman Bertazi clear_task_syscall_work(p, SYSCALL_TRACE);
255964eb35f7SGabriel Krisman Bertazi #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
256064eb35f7SGabriel Krisman Bertazi clear_task_syscall_work(p, SYSCALL_EMU);
2561ed75e8d5SLaurent Vivier #endif
2562e02c9b0dSLin Feng clear_tsk_latency_tracing(p);
25631da177e4SLinus Torvalds
25641da177e4SLinus Torvalds /* ok, now we should be set up.. */
256518c830dfSOleg Nesterov p->pid = pid_nr(pid);
256618c830dfSOleg Nesterov if (clone_flags & CLONE_THREAD) {
256718c830dfSOleg Nesterov p->group_leader = current->group_leader;
256818c830dfSOleg Nesterov p->tgid = current->tgid;
256918c830dfSOleg Nesterov } else {
257018c830dfSOleg Nesterov p->group_leader = p;
257118c830dfSOleg Nesterov p->tgid = p->pid;
257218c830dfSOleg Nesterov }
25735f8aadd8SOleg Nesterov
25749d823e8fSWu Fengguang p->nr_dirtied = 0;
25759d823e8fSWu Fengguang p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
257683712358SWu Fengguang p->dirty_paused_when = 0;
25779d823e8fSWu Fengguang
2578bb8cbbfeSOleg Nesterov p->pdeath_signal = 0;
257947e65328SOleg Nesterov INIT_LIST_HEAD(&p->thread_group);
2580158e1645SAl Viro p->task_works = NULL;
2581ca7752caSMichael Pratt clear_posix_cputimers_work(p);
25821da177e4SLinus Torvalds
2583d741bf41SPeter Zijlstra #ifdef CONFIG_KRETPROBES
2584d741bf41SPeter Zijlstra p->kretprobe_instances.first = NULL;
2585d741bf41SPeter Zijlstra #endif
258654ecbe6fSMasami Hiramatsu #ifdef CONFIG_RETHOOK
258754ecbe6fSMasami Hiramatsu p->rethooks.first = NULL;
258854ecbe6fSMasami Hiramatsu #endif
2589d741bf41SPeter Zijlstra
259018c830dfSOleg Nesterov /*
25917e47682eSAleksa Sarai * Ensure that the cgroup subsystem policies allow the new process to be
25927b7b8a2cSRandy Dunlap * forked. It should be noted that the new process's css_set can be changed
25937e47682eSAleksa Sarai * between here and cgroup_post_fork() if an organisation operation is in
25947e47682eSAleksa Sarai * progress.
25957e47682eSAleksa Sarai */
2596ef2c41cfSChristian Brauner retval = cgroup_can_fork(p, args);
25977e47682eSAleksa Sarai if (retval)
25985a5cf5cbSChristian Brauner goto bad_fork_put_pidfd;
25997e47682eSAleksa Sarai
26007e47682eSAleksa Sarai /*
2601b1e82065SPeter Zijlstra * Now that the cgroups are pinned, re-clone the parent cgroup and put
2602b1e82065SPeter Zijlstra * the new task on the correct runqueue. All this *before* the task
2603b1e82065SPeter Zijlstra * becomes visible.
2604b1e82065SPeter Zijlstra *
2605b1e82065SPeter Zijlstra * This isn't part of ->can_fork() because while the re-cloning is
2606b1e82065SPeter Zijlstra * cgroup specific, it unconditionally needs to place the task on a
2607b1e82065SPeter Zijlstra * runqueue.
2608b1e82065SPeter Zijlstra */
2609b1e82065SPeter Zijlstra sched_cgroup_fork(p, args);
2610b1e82065SPeter Zijlstra
2611b1e82065SPeter Zijlstra /*
26127b558513SDavid Herrmann * From this point on we must avoid any synchronous user-space
26137b558513SDavid Herrmann * communication until we take the tasklist-lock. In particular, we do
26147b558513SDavid Herrmann * not want user-space to be able to predict the process start-time by
26157b558513SDavid Herrmann * stalling fork(2) after we recorded the start_time but before it is
26167b558513SDavid Herrmann * visible to the system.
26177b558513SDavid Herrmann */
26187b558513SDavid Herrmann
26197b558513SDavid Herrmann p->start_time = ktime_get_ns();
2620cf25e24dSPeter Zijlstra p->start_boottime = ktime_get_boottime_ns();
26217b558513SDavid Herrmann
26227b558513SDavid Herrmann /*
262318c830dfSOleg Nesterov * Make it visible to the rest of the system, but dont wake it up yet.
262418c830dfSOleg Nesterov * Need tasklist lock for parent etc handling!
262518c830dfSOleg Nesterov */
26261da177e4SLinus Torvalds write_lock_irq(&tasklist_lock);
26271da177e4SLinus Torvalds
26281da177e4SLinus Torvalds /* CLONE_PARENT re-uses the old parent */
26292d5516cbSOleg Nesterov if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
26301da177e4SLinus Torvalds p->real_parent = current->real_parent;
26312d5516cbSOleg Nesterov p->parent_exec_id = current->parent_exec_id;
2632b4e00444SEddy Wu if (clone_flags & CLONE_THREAD)
2633b4e00444SEddy Wu p->exit_signal = -1;
2634b4e00444SEddy Wu else
2635b4e00444SEddy Wu p->exit_signal = current->group_leader->exit_signal;
26362d5516cbSOleg Nesterov } else {
26371da177e4SLinus Torvalds p->real_parent = current;
26382d5516cbSOleg Nesterov p->parent_exec_id = current->self_exec_id;
2639b4e00444SEddy Wu p->exit_signal = args->exit_signal;
26402d5516cbSOleg Nesterov }
26411da177e4SLinus Torvalds
2642d83a7cb3SJosh Poimboeuf klp_copy_process(p);
2643d83a7cb3SJosh Poimboeuf
264485dd3f61SPeter Zijlstra sched_core_fork(p);
264585dd3f61SPeter Zijlstra
26461da177e4SLinus Torvalds spin_lock(¤t->sighand->siglock);
26474a2c7a78SOleg Nesterov
264879257534SDaniel Bristot de Oliveira rv_task_fork(p);
264979257534SDaniel Bristot de Oliveira
2650d7822b1eSMathieu Desnoyers rseq_fork(p, clone_flags);
2651d7822b1eSMathieu Desnoyers
26524ca1d3eeSEric W. Biederman /* Don't start children in a dying pid namespace */
2653e8cfbc24SGargi Sharma if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) {
26543fd37226SKirill Tkhai retval = -ENOMEM;
26553fd37226SKirill Tkhai goto bad_fork_cancel_cgroup;
26563fd37226SKirill Tkhai }
26574a2c7a78SOleg Nesterov
26587673bf55SEric W. Biederman /* Let kill terminate clone/fork in the middle */
26597673bf55SEric W. Biederman if (fatal_signal_pending(current)) {
26607673bf55SEric W. Biederman retval = -EINTR;
26617673bf55SEric W. Biederman goto bad_fork_cancel_cgroup;
26627673bf55SEric W. Biederman }
26637673bf55SEric W. Biederman
2664a1140cb2SKuniyuki Iwashima /* No more failure paths after this point. */
2665a1140cb2SKuniyuki Iwashima
2666a1140cb2SKuniyuki Iwashima /*
2667a1140cb2SKuniyuki Iwashima * Copy seccomp details explicitly here, in case they were changed
2668a1140cb2SKuniyuki Iwashima * before holding sighand lock.
2669a1140cb2SKuniyuki Iwashima */
2670a1140cb2SKuniyuki Iwashima copy_seccomp(p);
2671a1140cb2SKuniyuki Iwashima
26722c470475SEric W. Biederman init_task_pid_links(p);
267373b9ebfeSOleg Nesterov if (likely(p->pid)) {
26744b9d33e6STejun Heo ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
26751da177e4SLinus Torvalds
267681907739SOleg Nesterov init_task_pid(p, PIDTYPE_PID, pid);
26771da177e4SLinus Torvalds if (thread_group_leader(p)) {
26786883f81aSEric W. Biederman init_task_pid(p, PIDTYPE_TGID, pid);
267981907739SOleg Nesterov init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
268081907739SOleg Nesterov init_task_pid(p, PIDTYPE_SID, task_session(current));
268181907739SOleg Nesterov
26821c4042c2SEric W. Biederman if (is_child_reaper(pid)) {
268317cf22c3SEric W. Biederman ns_of_pid(pid)->child_reaper = p;
26841c4042c2SEric W. Biederman p->signal->flags |= SIGNAL_UNKILLABLE;
26851c4042c2SEric W. Biederman }
2686c3ad2c3bSEric W. Biederman p->signal->shared_pending.signal = delayed.signal;
26879c9f4dedSAlan Cox p->signal->tty = tty_kref_get(current->signal->tty);
2688749860ceSPavel Tikhomirov /*
2689749860ceSPavel Tikhomirov * Inherit has_child_subreaper flag under the same
2690749860ceSPavel Tikhomirov * tasklist_lock with adding child to the process tree
2691749860ceSPavel Tikhomirov * for propagate_has_child_subreaper optimization.
2692749860ceSPavel Tikhomirov */
2693749860ceSPavel Tikhomirov p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper ||
2694749860ceSPavel Tikhomirov p->real_parent->signal->is_child_subreaper;
26959cd80bbbSOleg Nesterov list_add_tail(&p->sibling, &p->real_parent->children);
26965e85d4abSEric W. Biederman list_add_tail_rcu(&p->tasks, &init_task.tasks);
26976883f81aSEric W. Biederman attach_pid(p, PIDTYPE_TGID);
269881907739SOleg Nesterov attach_pid(p, PIDTYPE_PGID);
269981907739SOleg Nesterov attach_pid(p, PIDTYPE_SID);
2700909ea964SChristoph Lameter __this_cpu_inc(process_counts);
270180628ca0SOleg Nesterov } else {
270280628ca0SOleg Nesterov current->signal->nr_threads++;
2703d80f7d7bSEric W. Biederman current->signal->quick_threads++;
270480628ca0SOleg Nesterov atomic_inc(¤t->signal->live);
270560d4de3fSElena Reshetova refcount_inc(¤t->signal->sigcnt);
2706924de3b8SEric W. Biederman task_join_group_stop(p);
270780628ca0SOleg Nesterov list_add_tail_rcu(&p->thread_group,
270880628ca0SOleg Nesterov &p->group_leader->thread_group);
27090c740d0aSOleg Nesterov list_add_tail_rcu(&p->thread_node,
27100c740d0aSOleg Nesterov &p->signal->thread_head);
27111da177e4SLinus Torvalds }
271281907739SOleg Nesterov attach_pid(p, PIDTYPE_PID);
27131da177e4SLinus Torvalds nr_threads++;
271473b9ebfeSOleg Nesterov }
27151da177e4SLinus Torvalds total_forks++;
2716c3ad2c3bSEric W. Biederman hlist_del_init(&delayed.node);
27173f17da69SOleg Nesterov spin_unlock(¤t->sighand->siglock);
27184af4206bSOleg Nesterov syscall_tracepoint_update(p);
27191da177e4SLinus Torvalds write_unlock_irq(&tasklist_lock);
27204af4206bSOleg Nesterov
2721ddc204b5SWaiman Long if (pidfile)
2722ddc204b5SWaiman Long fd_install(pidfd, pidfile);
2723ddc204b5SWaiman Long
2724c13cf856SAndrew Morton proc_fork_connector(p);
2725b1e82065SPeter Zijlstra sched_post_fork(p);
2726ef2c41cfSChristian Brauner cgroup_post_fork(p, args);
2727cdd6c482SIngo Molnar perf_event_fork(p);
272843d2b113SKAMEZAWA Hiroyuki
272943d2b113SKAMEZAWA Hiroyuki trace_task_newtask(p, clone_flags);
27303ab67966SOleg Nesterov uprobe_copy_process(p, clone_flags);
2731fd593511SBeau Belgrave user_events_fork(p, clone_flags);
273243d2b113SKAMEZAWA Hiroyuki
273367197a4fSSuren Baghdasaryan copy_oom_score_adj(clone_flags, p);
273467197a4fSSuren Baghdasaryan
27351da177e4SLinus Torvalds return p;
27361da177e4SLinus Torvalds
27377e47682eSAleksa Sarai bad_fork_cancel_cgroup:
273885dd3f61SPeter Zijlstra sched_core_free(p);
27393fd37226SKirill Tkhai spin_unlock(¤t->sighand->siglock);
27403fd37226SKirill Tkhai write_unlock_irq(&tasklist_lock);
2741ef2c41cfSChristian Brauner cgroup_cancel_fork(p, args);
2742b3e58382SChristian Brauner bad_fork_put_pidfd:
27436fd2fe49SAl Viro if (clone_flags & CLONE_PIDFD) {
27446fd2fe49SAl Viro fput(pidfile);
27456fd2fe49SAl Viro put_unused_fd(pidfd);
27466fd2fe49SAl Viro }
2747425fb2b4SPavel Emelyanov bad_fork_free_pid:
2748425fb2b4SPavel Emelyanov if (pid != &init_struct_pid)
2749425fb2b4SPavel Emelyanov free_pid(pid);
27500740aa5fSJiri Slaby bad_fork_cleanup_thread:
27510740aa5fSJiri Slaby exit_thread(p);
2752fd0928dfSJens Axboe bad_fork_cleanup_io:
2753b69f2292SLouis Rilling if (p->io_context)
2754b69f2292SLouis Rilling exit_io_context(p);
2755ab516013SSerge E. Hallyn bad_fork_cleanup_namespaces:
2756444f378bSLinus Torvalds exit_task_namespaces(p);
27571da177e4SLinus Torvalds bad_fork_cleanup_mm:
2758c3f3ce04SAndrea Arcangeli if (p->mm) {
2759c3f3ce04SAndrea Arcangeli mm_clear_owner(p->mm, p);
27601da177e4SLinus Torvalds mmput(p->mm);
2761c3f3ce04SAndrea Arcangeli }
27621da177e4SLinus Torvalds bad_fork_cleanup_signal:
27634ab6c083SOleg Nesterov if (!(clone_flags & CLONE_THREAD))
27641c5354deSMike Galbraith free_signal_struct(p->signal);
27651da177e4SLinus Torvalds bad_fork_cleanup_sighand:
2766a7e5328aSOleg Nesterov __cleanup_sighand(p->sighand);
27671da177e4SLinus Torvalds bad_fork_cleanup_fs:
27681da177e4SLinus Torvalds exit_fs(p); /* blocking */
27691da177e4SLinus Torvalds bad_fork_cleanup_files:
27701da177e4SLinus Torvalds exit_files(p); /* blocking */
27711da177e4SLinus Torvalds bad_fork_cleanup_semundo:
27721da177e4SLinus Torvalds exit_sem(p);
2773e4e55b47STetsuo Handa bad_fork_cleanup_security:
2774e4e55b47STetsuo Handa security_task_free(p);
27751da177e4SLinus Torvalds bad_fork_cleanup_audit:
27761da177e4SLinus Torvalds audit_free(p);
27776c72e350SPeter Zijlstra bad_fork_cleanup_perf:
2778cdd6c482SIngo Molnar perf_event_free_task(p);
27796c72e350SPeter Zijlstra bad_fork_cleanup_policy:
2780b09be676SByungchul Park lockdep_free_task(p);
27811da177e4SLinus Torvalds #ifdef CONFIG_NUMA
2782f0be3d32SLee Schermerhorn mpol_put(p->mempolicy);
27831da177e4SLinus Torvalds #endif
2784ff8288ffSEric W. Biederman bad_fork_cleanup_delayacct:
278535df17c5SShailabh Nagar delayacct_tsk_free(p);
27861da177e4SLinus Torvalds bad_fork_cleanup_count:
278721d1c5e3SAlexey Gladkov dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
2788e0e81739SDavid Howells exit_creds(p);
27891da177e4SLinus Torvalds bad_fork_free:
27902f064a59SPeter Zijlstra WRITE_ONCE(p->__state, TASK_DEAD);
27911a03d3f1SSebastian Andrzej Siewior exit_task_stack_account(p);
279268f24b08SAndy Lutomirski put_task_stack(p);
2793c3f3ce04SAndrea Arcangeli delayed_free_task(p);
2794fe7d37d1SOleg Nesterov fork_out:
2795c3ad2c3bSEric W. Biederman spin_lock_irq(¤t->sighand->siglock);
2796c3ad2c3bSEric W. Biederman hlist_del_init(&delayed.node);
2797c3ad2c3bSEric W. Biederman spin_unlock_irq(¤t->sighand->siglock);
2798fe7d37d1SOleg Nesterov return ERR_PTR(retval);
27991da177e4SLinus Torvalds }
28001da177e4SLinus Torvalds
init_idle_pids(struct task_struct * idle)28012c470475SEric W. Biederman static inline void init_idle_pids(struct task_struct *idle)
2802f106eee1SOleg Nesterov {
2803f106eee1SOleg Nesterov enum pid_type type;
2804f106eee1SOleg Nesterov
2805f106eee1SOleg Nesterov for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
28062c470475SEric W. Biederman INIT_HLIST_NODE(&idle->pid_links[type]); /* not really needed */
28072c470475SEric W. Biederman init_task_pid(idle, type, &init_struct_pid);
2808f106eee1SOleg Nesterov }
2809f106eee1SOleg Nesterov }
2810f106eee1SOleg Nesterov
idle_dummy(void * dummy)281136cb0e1cSEric W. Biederman static int idle_dummy(void *dummy)
281236cb0e1cSEric W. Biederman {
281336cb0e1cSEric W. Biederman /* This function is never called */
281436cb0e1cSEric W. Biederman return 0;
281536cb0e1cSEric W. Biederman }
281636cb0e1cSEric W. Biederman
fork_idle(int cpu)2817f1a0a376SValentin Schneider struct task_struct * __init fork_idle(int cpu)
28181da177e4SLinus Torvalds {
281936c8b586SIngo Molnar struct task_struct *task;
28207f192e3cSChristian Brauner struct kernel_clone_args args = {
28217f192e3cSChristian Brauner .flags = CLONE_VM,
28225bd2e97cSEric W. Biederman .fn = &idle_dummy,
28235bd2e97cSEric W. Biederman .fn_arg = NULL,
2824343f4c49SEric W. Biederman .kthread = 1,
282536cb0e1cSEric W. Biederman .idle = 1,
28267f192e3cSChristian Brauner };
28277f192e3cSChristian Brauner
28287f192e3cSChristian Brauner task = copy_process(&init_struct_pid, 0, cpu_to_node(cpu), &args);
2829f106eee1SOleg Nesterov if (!IS_ERR(task)) {
28302c470475SEric W. Biederman init_idle_pids(task);
28311da177e4SLinus Torvalds init_idle(task, cpu);
2832f106eee1SOleg Nesterov }
283373b9ebfeSOleg Nesterov
28341da177e4SLinus Torvalds return task;
28351da177e4SLinus Torvalds }
28361da177e4SLinus Torvalds
28371da177e4SLinus Torvalds /*
2838cc440e87SJens Axboe * This is like kernel_clone(), but shaved down and tailored to just
2839cc440e87SJens Axboe * creating io_uring workers. It returns a created task, or an error pointer.
2840cc440e87SJens Axboe * The returned task is inactive, and the caller must fire it up through
2841cc440e87SJens Axboe * wake_up_new_task(p). All signals are blocked in the created task.
2842cc440e87SJens Axboe */
create_io_thread(int (* fn)(void *),void * arg,int node)2843cc440e87SJens Axboe struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node)
2844cc440e87SJens Axboe {
2845cc440e87SJens Axboe unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD|
2846cc440e87SJens Axboe CLONE_IO;
2847cc440e87SJens Axboe struct kernel_clone_args args = {
2848cc440e87SJens Axboe .flags = ((lower_32_bits(flags) | CLONE_VM |
2849cc440e87SJens Axboe CLONE_UNTRACED) & ~CSIGNAL),
2850cc440e87SJens Axboe .exit_signal = (lower_32_bits(flags) & CSIGNAL),
28515bd2e97cSEric W. Biederman .fn = fn,
28525bd2e97cSEric W. Biederman .fn_arg = arg,
2853cc440e87SJens Axboe .io_thread = 1,
285454e6842dSMike Christie .user_worker = 1,
2855cc440e87SJens Axboe };
2856cc440e87SJens Axboe
2857b16b3855SJens Axboe return copy_process(NULL, 0, node, &args);
2858cc440e87SJens Axboe }
2859cc440e87SJens Axboe
2860cc440e87SJens Axboe /*
28611da177e4SLinus Torvalds * Ok, this is the main fork-routine.
28621da177e4SLinus Torvalds *
28631da177e4SLinus Torvalds * It copies the process, and if successful kick-starts
28641da177e4SLinus Torvalds * it and waits for it to finish using the VM if required.
2865a0eb9abdSEugene Syromiatnikov *
2866a0eb9abdSEugene Syromiatnikov * args->exit_signal is expected to be checked for sanity by the caller.
28671da177e4SLinus Torvalds */
kernel_clone(struct kernel_clone_args * args)2868cad6967aSChristian Brauner pid_t kernel_clone(struct kernel_clone_args *args)
28691da177e4SLinus Torvalds {
28707f192e3cSChristian Brauner u64 clone_flags = args->flags;
28719f5325aaSMarcos Paulo de Souza struct completion vfork;
28729f5325aaSMarcos Paulo de Souza struct pid *pid;
28731da177e4SLinus Torvalds struct task_struct *p;
28741da177e4SLinus Torvalds int trace = 0;
2875cad6967aSChristian Brauner pid_t nr;
28761da177e4SLinus Torvalds
2877bdff746aSAndrew Morton /*
28783af8588cSChristian Brauner * For legacy clone() calls, CLONE_PIDFD uses the parent_tid argument
28793af8588cSChristian Brauner * to return the pidfd. Hence, CLONE_PIDFD and CLONE_PARENT_SETTID are
28803af8588cSChristian Brauner * mutually exclusive. With clone3() CLONE_PIDFD has grown a separate
28813af8588cSChristian Brauner * field in struct clone_args and it still doesn't make sense to have
28823af8588cSChristian Brauner * them both point at the same memory location. Performing this check
28833af8588cSChristian Brauner * here has the advantage that we don't need to have a separate helper
28843af8588cSChristian Brauner * to check for legacy clone().
28853af8588cSChristian Brauner */
28863af8588cSChristian Brauner if ((args->flags & CLONE_PIDFD) &&
28873af8588cSChristian Brauner (args->flags & CLONE_PARENT_SETTID) &&
28883af8588cSChristian Brauner (args->pidfd == args->parent_tid))
28893af8588cSChristian Brauner return -EINVAL;
28903af8588cSChristian Brauner
28913af8588cSChristian Brauner /*
28924b9d33e6STejun Heo * Determine whether and which event to report to ptracer. When
28934b9d33e6STejun Heo * called from kernel_thread or CLONE_UNTRACED is explicitly
28944b9d33e6STejun Heo * requested, no event is reported; otherwise, report if the event
28954b9d33e6STejun Heo * for the type of forking is enabled.
289609a05394SRoland McGrath */
2897e80d6661SAl Viro if (!(clone_flags & CLONE_UNTRACED)) {
28984b9d33e6STejun Heo if (clone_flags & CLONE_VFORK)
28994b9d33e6STejun Heo trace = PTRACE_EVENT_VFORK;
29007f192e3cSChristian Brauner else if (args->exit_signal != SIGCHLD)
29014b9d33e6STejun Heo trace = PTRACE_EVENT_CLONE;
29024b9d33e6STejun Heo else
29034b9d33e6STejun Heo trace = PTRACE_EVENT_FORK;
29044b9d33e6STejun Heo
29054b9d33e6STejun Heo if (likely(!ptrace_event_enabled(current, trace)))
29064b9d33e6STejun Heo trace = 0;
29074b9d33e6STejun Heo }
29081da177e4SLinus Torvalds
29097f192e3cSChristian Brauner p = copy_process(NULL, trace, NUMA_NO_NODE, args);
291038addce8SEmese Revfy add_latent_entropy();
29119f5325aaSMarcos Paulo de Souza
29129f5325aaSMarcos Paulo de Souza if (IS_ERR(p))
29139f5325aaSMarcos Paulo de Souza return PTR_ERR(p);
29149f5325aaSMarcos Paulo de Souza
29151da177e4SLinus Torvalds /*
29161da177e4SLinus Torvalds * Do this prior waking up the new thread - the thread pointer
29171da177e4SLinus Torvalds * might get invalid after that point, if the thread exits quickly.
29181da177e4SLinus Torvalds */
29190a16b607SMathieu Desnoyers trace_sched_process_fork(current, p);
29200a16b607SMathieu Desnoyers
29214e52365fSMatthew Dempsky pid = get_task_pid(p, PIDTYPE_PID);
29224e52365fSMatthew Dempsky nr = pid_vnr(pid);
292330e49c26SPavel Emelyanov
292430e49c26SPavel Emelyanov if (clone_flags & CLONE_PARENT_SETTID)
29257f192e3cSChristian Brauner put_user(nr, args->parent_tid);
2926a6f5e063SSukadev Bhattiprolu
29271da177e4SLinus Torvalds if (clone_flags & CLONE_VFORK) {
29281da177e4SLinus Torvalds p->vfork_done = &vfork;
29291da177e4SLinus Torvalds init_completion(&vfork);
2930d68b46feSOleg Nesterov get_task_struct(p);
29311da177e4SLinus Torvalds }
29321da177e4SLinus Torvalds
2933bd74fdaeSYu Zhao if (IS_ENABLED(CONFIG_LRU_GEN) && !(clone_flags & CLONE_VM)) {
2934bd74fdaeSYu Zhao /* lock the task to synchronize with memcg migration */
2935bd74fdaeSYu Zhao task_lock(p);
2936bd74fdaeSYu Zhao lru_gen_add_mm(p->mm);
2937bd74fdaeSYu Zhao task_unlock(p);
2938bd74fdaeSYu Zhao }
2939bd74fdaeSYu Zhao
29403e51e3edSSamir Bellabes wake_up_new_task(p);
29411da177e4SLinus Torvalds
29424b9d33e6STejun Heo /* forking complete and child started to run, tell ptracer */
29434b9d33e6STejun Heo if (unlikely(trace))
29444e52365fSMatthew Dempsky ptrace_event_pid(trace, pid);
294509a05394SRoland McGrath
29461da177e4SLinus Torvalds if (clone_flags & CLONE_VFORK) {
2947d68b46feSOleg Nesterov if (!wait_for_vfork_done(p, &vfork))
29484e52365fSMatthew Dempsky ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
29499f59ce5dSChuck Ebbert }
29504e52365fSMatthew Dempsky
29514e52365fSMatthew Dempsky put_pid(pid);
295292476d7fSEric W. Biederman return nr;
29531da177e4SLinus Torvalds }
29541da177e4SLinus Torvalds
29552aa3a7f8SAl Viro /*
29562aa3a7f8SAl Viro * Create a kernel thread.
29572aa3a7f8SAl Viro */
kernel_thread(int (* fn)(void *),void * arg,const char * name,unsigned long flags)2958cf587db2SMike Christie pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name,
2959cf587db2SMike Christie unsigned long flags)
29602aa3a7f8SAl Viro {
29617f192e3cSChristian Brauner struct kernel_clone_args args = {
29623f2c788aSChristian Brauner .flags = ((lower_32_bits(flags) | CLONE_VM |
29633f2c788aSChristian Brauner CLONE_UNTRACED) & ~CSIGNAL),
29643f2c788aSChristian Brauner .exit_signal = (lower_32_bits(flags) & CSIGNAL),
29655bd2e97cSEric W. Biederman .fn = fn,
29665bd2e97cSEric W. Biederman .fn_arg = arg,
2967cf587db2SMike Christie .name = name,
2968343f4c49SEric W. Biederman .kthread = 1,
2969343f4c49SEric W. Biederman };
2970343f4c49SEric W. Biederman
2971343f4c49SEric W. Biederman return kernel_clone(&args);
2972343f4c49SEric W. Biederman }
2973343f4c49SEric W. Biederman
2974343f4c49SEric W. Biederman /*
2975343f4c49SEric W. Biederman * Create a user mode thread.
2976343f4c49SEric W. Biederman */
user_mode_thread(int (* fn)(void *),void * arg,unsigned long flags)2977343f4c49SEric W. Biederman pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags)
2978343f4c49SEric W. Biederman {
2979343f4c49SEric W. Biederman struct kernel_clone_args args = {
2980343f4c49SEric W. Biederman .flags = ((lower_32_bits(flags) | CLONE_VM |
2981343f4c49SEric W. Biederman CLONE_UNTRACED) & ~CSIGNAL),
2982343f4c49SEric W. Biederman .exit_signal = (lower_32_bits(flags) & CSIGNAL),
29835bd2e97cSEric W. Biederman .fn = fn,
29845bd2e97cSEric W. Biederman .fn_arg = arg,
29857f192e3cSChristian Brauner };
29867f192e3cSChristian Brauner
2987cad6967aSChristian Brauner return kernel_clone(&args);
29882aa3a7f8SAl Viro }
29892aa3a7f8SAl Viro
2990d2125043SAl Viro #ifdef __ARCH_WANT_SYS_FORK
SYSCALL_DEFINE0(fork)2991d2125043SAl Viro SYSCALL_DEFINE0(fork)
2992d2125043SAl Viro {
2993d2125043SAl Viro #ifdef CONFIG_MMU
29947f192e3cSChristian Brauner struct kernel_clone_args args = {
29957f192e3cSChristian Brauner .exit_signal = SIGCHLD,
29967f192e3cSChristian Brauner };
29977f192e3cSChristian Brauner
2998cad6967aSChristian Brauner return kernel_clone(&args);
2999d2125043SAl Viro #else
3000d2125043SAl Viro /* can not support in nommu mode */
30015d59e182SDaeseok Youn return -EINVAL;
3002d2125043SAl Viro #endif
3003d2125043SAl Viro }
3004d2125043SAl Viro #endif
3005d2125043SAl Viro
3006d2125043SAl Viro #ifdef __ARCH_WANT_SYS_VFORK
SYSCALL_DEFINE0(vfork)3007d2125043SAl Viro SYSCALL_DEFINE0(vfork)
3008d2125043SAl Viro {
30097f192e3cSChristian Brauner struct kernel_clone_args args = {
30107f192e3cSChristian Brauner .flags = CLONE_VFORK | CLONE_VM,
30117f192e3cSChristian Brauner .exit_signal = SIGCHLD,
30127f192e3cSChristian Brauner };
30137f192e3cSChristian Brauner
3014cad6967aSChristian Brauner return kernel_clone(&args);
3015d2125043SAl Viro }
3016d2125043SAl Viro #endif
3017d2125043SAl Viro
3018d2125043SAl Viro #ifdef __ARCH_WANT_SYS_CLONE
3019d2125043SAl Viro #ifdef CONFIG_CLONE_BACKWARDS
SYSCALL_DEFINE5(clone,unsigned long,clone_flags,unsigned long,newsp,int __user *,parent_tidptr,unsigned long,tls,int __user *,child_tidptr)3020d2125043SAl Viro SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
3021d2125043SAl Viro int __user *, parent_tidptr,
30223033f14aSJosh Triplett unsigned long, tls,
3023d2125043SAl Viro int __user *, child_tidptr)
3024d2125043SAl Viro #elif defined(CONFIG_CLONE_BACKWARDS2)
3025d2125043SAl Viro SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
3026d2125043SAl Viro int __user *, parent_tidptr,
3027d2125043SAl Viro int __user *, child_tidptr,
30283033f14aSJosh Triplett unsigned long, tls)
3029dfa9771aSMichal Simek #elif defined(CONFIG_CLONE_BACKWARDS3)
3030dfa9771aSMichal Simek SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
3031dfa9771aSMichal Simek int, stack_size,
3032dfa9771aSMichal Simek int __user *, parent_tidptr,
3033dfa9771aSMichal Simek int __user *, child_tidptr,
30343033f14aSJosh Triplett unsigned long, tls)
3035d2125043SAl Viro #else
3036d2125043SAl Viro SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
3037d2125043SAl Viro int __user *, parent_tidptr,
3038d2125043SAl Viro int __user *, child_tidptr,
30393033f14aSJosh Triplett unsigned long, tls)
3040d2125043SAl Viro #endif
3041d2125043SAl Viro {
30427f192e3cSChristian Brauner struct kernel_clone_args args = {
30433f2c788aSChristian Brauner .flags = (lower_32_bits(clone_flags) & ~CSIGNAL),
30447f192e3cSChristian Brauner .pidfd = parent_tidptr,
30457f192e3cSChristian Brauner .child_tid = child_tidptr,
30467f192e3cSChristian Brauner .parent_tid = parent_tidptr,
30473f2c788aSChristian Brauner .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL),
30487f192e3cSChristian Brauner .stack = newsp,
30497f192e3cSChristian Brauner .tls = tls,
30507f192e3cSChristian Brauner };
30517f192e3cSChristian Brauner
3052cad6967aSChristian Brauner return kernel_clone(&args);
30537f192e3cSChristian Brauner }
3054d68dbb0cSChristian Brauner #endif
30557f192e3cSChristian Brauner
3056d68dbb0cSChristian Brauner #ifdef __ARCH_WANT_SYS_CLONE3
3057dd499f7aSAmanieu d'Antras
copy_clone_args_from_user(struct kernel_clone_args * kargs,struct clone_args __user * uargs,size_t usize)30587f192e3cSChristian Brauner noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
30597f192e3cSChristian Brauner struct clone_args __user *uargs,
3060f14c234bSAleksa Sarai size_t usize)
30617f192e3cSChristian Brauner {
3062f14c234bSAleksa Sarai int err;
30637f192e3cSChristian Brauner struct clone_args args;
306449cb2fc4SAdrian Reber pid_t *kset_tid = kargs->set_tid;
30657f192e3cSChristian Brauner
3066a966dcfeSEugene Syromiatnikov BUILD_BUG_ON(offsetofend(struct clone_args, tls) !=
3067a966dcfeSEugene Syromiatnikov CLONE_ARGS_SIZE_VER0);
3068a966dcfeSEugene Syromiatnikov BUILD_BUG_ON(offsetofend(struct clone_args, set_tid_size) !=
3069a966dcfeSEugene Syromiatnikov CLONE_ARGS_SIZE_VER1);
3070a966dcfeSEugene Syromiatnikov BUILD_BUG_ON(offsetofend(struct clone_args, cgroup) !=
3071a966dcfeSEugene Syromiatnikov CLONE_ARGS_SIZE_VER2);
3072a966dcfeSEugene Syromiatnikov BUILD_BUG_ON(sizeof(struct clone_args) != CLONE_ARGS_SIZE_VER2);
3073a966dcfeSEugene Syromiatnikov
3074f14c234bSAleksa Sarai if (unlikely(usize > PAGE_SIZE))
30757f192e3cSChristian Brauner return -E2BIG;
3076f14c234bSAleksa Sarai if (unlikely(usize < CLONE_ARGS_SIZE_VER0))
30777f192e3cSChristian Brauner return -EINVAL;
30787f192e3cSChristian Brauner
3079f14c234bSAleksa Sarai err = copy_struct_from_user(&args, sizeof(args), uargs, usize);
3080f14c234bSAleksa Sarai if (err)
3081f14c234bSAleksa Sarai return err;
30827f192e3cSChristian Brauner
308349cb2fc4SAdrian Reber if (unlikely(args.set_tid_size > MAX_PID_NS_LEVEL))
308449cb2fc4SAdrian Reber return -EINVAL;
308549cb2fc4SAdrian Reber
308649cb2fc4SAdrian Reber if (unlikely(!args.set_tid && args.set_tid_size > 0))
308749cb2fc4SAdrian Reber return -EINVAL;
308849cb2fc4SAdrian Reber
308949cb2fc4SAdrian Reber if (unlikely(args.set_tid && args.set_tid_size == 0))
309049cb2fc4SAdrian Reber return -EINVAL;
309149cb2fc4SAdrian Reber
3092a0eb9abdSEugene Syromiatnikov /*
3093a0eb9abdSEugene Syromiatnikov * Verify that higher 32bits of exit_signal are unset and that
3094a0eb9abdSEugene Syromiatnikov * it is a valid signal
3095a0eb9abdSEugene Syromiatnikov */
3096a0eb9abdSEugene Syromiatnikov if (unlikely((args.exit_signal & ~((u64)CSIGNAL)) ||
3097a0eb9abdSEugene Syromiatnikov !valid_signal(args.exit_signal)))
3098a0eb9abdSEugene Syromiatnikov return -EINVAL;
3099a0eb9abdSEugene Syromiatnikov
310062173872SEugene Syromiatnikov if ((args.flags & CLONE_INTO_CGROUP) &&
310162173872SEugene Syromiatnikov (args.cgroup > INT_MAX || usize < CLONE_ARGS_SIZE_VER2))
3102ef2c41cfSChristian Brauner return -EINVAL;
3103ef2c41cfSChristian Brauner
31047f192e3cSChristian Brauner *kargs = (struct kernel_clone_args){
31057f192e3cSChristian Brauner .flags = args.flags,
31067f192e3cSChristian Brauner .pidfd = u64_to_user_ptr(args.pidfd),
31077f192e3cSChristian Brauner .child_tid = u64_to_user_ptr(args.child_tid),
31087f192e3cSChristian Brauner .parent_tid = u64_to_user_ptr(args.parent_tid),
31097f192e3cSChristian Brauner .exit_signal = args.exit_signal,
31107f192e3cSChristian Brauner .stack = args.stack,
31117f192e3cSChristian Brauner .stack_size = args.stack_size,
31127f192e3cSChristian Brauner .tls = args.tls,
311349cb2fc4SAdrian Reber .set_tid_size = args.set_tid_size,
3114ef2c41cfSChristian Brauner .cgroup = args.cgroup,
31157f192e3cSChristian Brauner };
31167f192e3cSChristian Brauner
311749cb2fc4SAdrian Reber if (args.set_tid &&
311849cb2fc4SAdrian Reber copy_from_user(kset_tid, u64_to_user_ptr(args.set_tid),
311949cb2fc4SAdrian Reber (kargs->set_tid_size * sizeof(pid_t))))
312049cb2fc4SAdrian Reber return -EFAULT;
312149cb2fc4SAdrian Reber
312249cb2fc4SAdrian Reber kargs->set_tid = kset_tid;
312349cb2fc4SAdrian Reber
31247f192e3cSChristian Brauner return 0;
31257f192e3cSChristian Brauner }
31267f192e3cSChristian Brauner
3127fa729c4dSChristian Brauner /**
3128fa729c4dSChristian Brauner * clone3_stack_valid - check and prepare stack
3129fa729c4dSChristian Brauner * @kargs: kernel clone args
3130fa729c4dSChristian Brauner *
3131fa729c4dSChristian Brauner * Verify that the stack arguments userspace gave us are sane.
3132fa729c4dSChristian Brauner * In addition, set the stack direction for userspace since it's easy for us to
3133fa729c4dSChristian Brauner * determine.
3134fa729c4dSChristian Brauner */
clone3_stack_valid(struct kernel_clone_args * kargs)3135fa729c4dSChristian Brauner static inline bool clone3_stack_valid(struct kernel_clone_args *kargs)
3136fa729c4dSChristian Brauner {
3137fa729c4dSChristian Brauner if (kargs->stack == 0) {
3138fa729c4dSChristian Brauner if (kargs->stack_size > 0)
3139fa729c4dSChristian Brauner return false;
3140fa729c4dSChristian Brauner } else {
3141fa729c4dSChristian Brauner if (kargs->stack_size == 0)
3142fa729c4dSChristian Brauner return false;
3143fa729c4dSChristian Brauner
3144fa729c4dSChristian Brauner if (!access_ok((void __user *)kargs->stack, kargs->stack_size))
3145fa729c4dSChristian Brauner return false;
3146fa729c4dSChristian Brauner
3147fa729c4dSChristian Brauner #if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_IA64)
3148fa729c4dSChristian Brauner kargs->stack += kargs->stack_size;
3149fa729c4dSChristian Brauner #endif
3150fa729c4dSChristian Brauner }
3151fa729c4dSChristian Brauner
3152fa729c4dSChristian Brauner return true;
3153fa729c4dSChristian Brauner }
3154fa729c4dSChristian Brauner
clone3_args_valid(struct kernel_clone_args * kargs)3155fa729c4dSChristian Brauner static bool clone3_args_valid(struct kernel_clone_args *kargs)
31567f192e3cSChristian Brauner {
3157b612e5dfSChristian Brauner /* Verify that no unknown flags are passed along. */
3158ef2c41cfSChristian Brauner if (kargs->flags &
3159ef2c41cfSChristian Brauner ~(CLONE_LEGACY_FLAGS | CLONE_CLEAR_SIGHAND | CLONE_INTO_CGROUP))
31607f192e3cSChristian Brauner return false;
31617f192e3cSChristian Brauner
31627f192e3cSChristian Brauner /*
3163a8ca6b13SXiaofeng Cao * - make the CLONE_DETACHED bit reusable for clone3
3164a8ca6b13SXiaofeng Cao * - make the CSIGNAL bits reusable for clone3
31657f192e3cSChristian Brauner */
3166a402f1e3STobias Klauser if (kargs->flags & (CLONE_DETACHED | (CSIGNAL & (~CLONE_NEWTIME))))
31677f192e3cSChristian Brauner return false;
31687f192e3cSChristian Brauner
3169b612e5dfSChristian Brauner if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) ==
3170b612e5dfSChristian Brauner (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND))
3171b612e5dfSChristian Brauner return false;
3172b612e5dfSChristian Brauner
31737f192e3cSChristian Brauner if ((kargs->flags & (CLONE_THREAD | CLONE_PARENT)) &&
31747f192e3cSChristian Brauner kargs->exit_signal)
31757f192e3cSChristian Brauner return false;
31767f192e3cSChristian Brauner
3177fa729c4dSChristian Brauner if (!clone3_stack_valid(kargs))
3178fa729c4dSChristian Brauner return false;
3179fa729c4dSChristian Brauner
31807f192e3cSChristian Brauner return true;
31817f192e3cSChristian Brauner }
31827f192e3cSChristian Brauner
3183501bd016SChristian Brauner /**
3184501bd016SChristian Brauner * clone3 - create a new process with specific properties
3185501bd016SChristian Brauner * @uargs: argument structure
3186501bd016SChristian Brauner * @size: size of @uargs
3187501bd016SChristian Brauner *
3188501bd016SChristian Brauner * clone3() is the extensible successor to clone()/clone2().
3189501bd016SChristian Brauner * It takes a struct as argument that is versioned by its size.
3190501bd016SChristian Brauner *
3191501bd016SChristian Brauner * Return: On success, a positive PID for the child process.
3192501bd016SChristian Brauner * On error, a negative errno number.
3193501bd016SChristian Brauner */
SYSCALL_DEFINE2(clone3,struct clone_args __user *,uargs,size_t,size)31947f192e3cSChristian Brauner SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size)
31957f192e3cSChristian Brauner {
31967f192e3cSChristian Brauner int err;
31977f192e3cSChristian Brauner
31987f192e3cSChristian Brauner struct kernel_clone_args kargs;
319949cb2fc4SAdrian Reber pid_t set_tid[MAX_PID_NS_LEVEL];
320049cb2fc4SAdrian Reber
320149cb2fc4SAdrian Reber kargs.set_tid = set_tid;
32027f192e3cSChristian Brauner
32037f192e3cSChristian Brauner err = copy_clone_args_from_user(&kargs, uargs, size);
32047f192e3cSChristian Brauner if (err)
32057f192e3cSChristian Brauner return err;
32067f192e3cSChristian Brauner
32077f192e3cSChristian Brauner if (!clone3_args_valid(&kargs))
32087f192e3cSChristian Brauner return -EINVAL;
32097f192e3cSChristian Brauner
3210cad6967aSChristian Brauner return kernel_clone(&kargs);
3211d2125043SAl Viro }
3212d2125043SAl Viro #endif
3213d2125043SAl Viro
walk_process_tree(struct task_struct * top,proc_visitor visitor,void * data)32140f1b92cbSOleg Nesterov void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data)
32150f1b92cbSOleg Nesterov {
32160f1b92cbSOleg Nesterov struct task_struct *leader, *parent, *child;
32170f1b92cbSOleg Nesterov int res;
32180f1b92cbSOleg Nesterov
32190f1b92cbSOleg Nesterov read_lock(&tasklist_lock);
32200f1b92cbSOleg Nesterov leader = top = top->group_leader;
32210f1b92cbSOleg Nesterov down:
32220f1b92cbSOleg Nesterov for_each_thread(leader, parent) {
32230f1b92cbSOleg Nesterov list_for_each_entry(child, &parent->children, sibling) {
32240f1b92cbSOleg Nesterov res = visitor(child, data);
32250f1b92cbSOleg Nesterov if (res) {
32260f1b92cbSOleg Nesterov if (res < 0)
32270f1b92cbSOleg Nesterov goto out;
32280f1b92cbSOleg Nesterov leader = child;
32290f1b92cbSOleg Nesterov goto down;
32300f1b92cbSOleg Nesterov }
32310f1b92cbSOleg Nesterov up:
32320f1b92cbSOleg Nesterov ;
32330f1b92cbSOleg Nesterov }
32340f1b92cbSOleg Nesterov }
32350f1b92cbSOleg Nesterov
32360f1b92cbSOleg Nesterov if (leader != top) {
32370f1b92cbSOleg Nesterov child = leader;
32380f1b92cbSOleg Nesterov parent = child->real_parent;
32390f1b92cbSOleg Nesterov leader = parent->group_leader;
32400f1b92cbSOleg Nesterov goto up;
32410f1b92cbSOleg Nesterov }
32420f1b92cbSOleg Nesterov out:
32430f1b92cbSOleg Nesterov read_unlock(&tasklist_lock);
32440f1b92cbSOleg Nesterov }
32450f1b92cbSOleg Nesterov
32465fd63b30SRavikiran G Thirumalai #ifndef ARCH_MIN_MMSTRUCT_ALIGN
32475fd63b30SRavikiran G Thirumalai #define ARCH_MIN_MMSTRUCT_ALIGN 0
32485fd63b30SRavikiran G Thirumalai #endif
32495fd63b30SRavikiran G Thirumalai
sighand_ctor(void * data)325051cc5068SAlexey Dobriyan static void sighand_ctor(void *data)
3251aa1757f9SOleg Nesterov {
3252aa1757f9SOleg Nesterov struct sighand_struct *sighand = data;
3253aa1757f9SOleg Nesterov
3254aa1757f9SOleg Nesterov spin_lock_init(&sighand->siglock);
3255b8fceee1SDavide Libenzi init_waitqueue_head(&sighand->signalfd_wqh);
3256fba2afaaSDavide Libenzi }
3257aa1757f9SOleg Nesterov
mm_cache_init(void)3258af806027SPeter Zijlstra void __init mm_cache_init(void)
32591da177e4SLinus Torvalds {
3260c1a2f7f0SRik van Riel unsigned int mm_size;
3261c1a2f7f0SRik van Riel
3262af806027SPeter Zijlstra /*
3263af806027SPeter Zijlstra * The mm_cpumask is located at the end of mm_struct, and is
3264af806027SPeter Zijlstra * dynamically sized based on the maximum CPU number this system
3265af806027SPeter Zijlstra * can have, taking hotplug into account (nr_cpu_ids).
3266af806027SPeter Zijlstra */
3267af7f588dSMathieu Desnoyers mm_size = sizeof(struct mm_struct) + cpumask_size() + mm_cid_size();
3268af806027SPeter Zijlstra
3269af806027SPeter Zijlstra mm_cachep = kmem_cache_create_usercopy("mm_struct",
3270af806027SPeter Zijlstra mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
3271af806027SPeter Zijlstra SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
3272af806027SPeter Zijlstra offsetof(struct mm_struct, saved_auxv),
3273af806027SPeter Zijlstra sizeof_field(struct mm_struct, saved_auxv),
3274af806027SPeter Zijlstra NULL);
3275af806027SPeter Zijlstra }
3276af806027SPeter Zijlstra
proc_caches_init(void)3277af806027SPeter Zijlstra void __init proc_caches_init(void)
3278af806027SPeter Zijlstra {
32791da177e4SLinus Torvalds sighand_cachep = kmem_cache_create("sighand_cache",
32801da177e4SLinus Torvalds sizeof(struct sighand_struct), 0,
32815f0d5a3aSPaul E. McKenney SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
328275f296d9SLevin, Alexander (Sasha Levin) SLAB_ACCOUNT, sighand_ctor);
32831da177e4SLinus Torvalds signal_cachep = kmem_cache_create("signal_cache",
32841da177e4SLinus Torvalds sizeof(struct signal_struct), 0,
328575f296d9SLevin, Alexander (Sasha Levin) SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
32865d097056SVladimir Davydov NULL);
32871da177e4SLinus Torvalds files_cachep = kmem_cache_create("files_cache",
32881da177e4SLinus Torvalds sizeof(struct files_struct), 0,
328975f296d9SLevin, Alexander (Sasha Levin) SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
32905d097056SVladimir Davydov NULL);
32911da177e4SLinus Torvalds fs_cachep = kmem_cache_create("fs_cache",
32921da177e4SLinus Torvalds sizeof(struct fs_struct), 0,
329375f296d9SLevin, Alexander (Sasha Levin) SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
32945d097056SVladimir Davydov NULL);
3295c1a2f7f0SRik van Riel
32965d097056SVladimir Davydov vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
3297c7f8f31cSSuren Baghdasaryan #ifdef CONFIG_PER_VMA_LOCK
3298c7f8f31cSSuren Baghdasaryan vma_lock_cachep = KMEM_CACHE(vma_lock, SLAB_PANIC|SLAB_ACCOUNT);
3299c7f8f31cSSuren Baghdasaryan #endif
33008feae131SDavid Howells mmap_init();
330166577193SAl Viro nsproxy_cache_init();
33021da177e4SLinus Torvalds }
3303cf2e340fSJANAK DESAI
3304cf2e340fSJANAK DESAI /*
33059bfb23fcSOleg Nesterov * Check constraints on flags passed to the unshare system call.
3306cf2e340fSJANAK DESAI */
check_unshare_flags(unsigned long unshare_flags)33079bfb23fcSOleg Nesterov static int check_unshare_flags(unsigned long unshare_flags)
3308cf2e340fSJANAK DESAI {
33099bfb23fcSOleg Nesterov if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
33109bfb23fcSOleg Nesterov CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
331150804fe3SEric W. Biederman CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
3312769071acSAndrei Vagin CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP|
3313769071acSAndrei Vagin CLONE_NEWTIME))
3314cf2e340fSJANAK DESAI return -EINVAL;
33159bfb23fcSOleg Nesterov /*
331612c641abSEric W. Biederman * Not implemented, but pretend it works if there is nothing
331712c641abSEric W. Biederman * to unshare. Note that unsharing the address space or the
331812c641abSEric W. Biederman * signal handlers also need to unshare the signal queues (aka
331912c641abSEric W. Biederman * CLONE_THREAD).
33209bfb23fcSOleg Nesterov */
33219bfb23fcSOleg Nesterov if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
332212c641abSEric W. Biederman if (!thread_group_empty(current))
332312c641abSEric W. Biederman return -EINVAL;
332412c641abSEric W. Biederman }
332512c641abSEric W. Biederman if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
3326d036bda7SElena Reshetova if (refcount_read(¤t->sighand->count) > 1)
332712c641abSEric W. Biederman return -EINVAL;
332812c641abSEric W. Biederman }
332912c641abSEric W. Biederman if (unshare_flags & CLONE_VM) {
333012c641abSEric W. Biederman if (!current_is_single_threaded())
33319bfb23fcSOleg Nesterov return -EINVAL;
33329bfb23fcSOleg Nesterov }
3333cf2e340fSJANAK DESAI
3334cf2e340fSJANAK DESAI return 0;
3335cf2e340fSJANAK DESAI }
3336cf2e340fSJANAK DESAI
3337cf2e340fSJANAK DESAI /*
333899d1419dSJANAK DESAI * Unshare the filesystem structure if it is being shared
3339cf2e340fSJANAK DESAI */
unshare_fs(unsigned long unshare_flags,struct fs_struct ** new_fsp)3340cf2e340fSJANAK DESAI static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
3341cf2e340fSJANAK DESAI {
3342cf2e340fSJANAK DESAI struct fs_struct *fs = current->fs;
3343cf2e340fSJANAK DESAI
3344498052bbSAl Viro if (!(unshare_flags & CLONE_FS) || !fs)
3345498052bbSAl Viro return 0;
3346498052bbSAl Viro
3347498052bbSAl Viro /* don't need lock here; in the worst case we'll do useless copy */
3348498052bbSAl Viro if (fs->users == 1)
3349498052bbSAl Viro return 0;
3350498052bbSAl Viro
3351498052bbSAl Viro *new_fsp = copy_fs_struct(fs);
335299d1419dSJANAK DESAI if (!*new_fsp)
335399d1419dSJANAK DESAI return -ENOMEM;
3354cf2e340fSJANAK DESAI
3355cf2e340fSJANAK DESAI return 0;
3356cf2e340fSJANAK DESAI }
3357cf2e340fSJANAK DESAI
3358cf2e340fSJANAK DESAI /*
3359a016f338SJANAK DESAI * Unshare file descriptor table if it is being shared
3360cf2e340fSJANAK DESAI */
unshare_fd(unsigned long unshare_flags,struct files_struct ** new_fdp)3361a8023f8bSAl Viro static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
3362cf2e340fSJANAK DESAI {
3363cf2e340fSJANAK DESAI struct files_struct *fd = current->files;
3364cf2e340fSJANAK DESAI
3365cf2e340fSJANAK DESAI if ((unshare_flags & CLONE_FILES) &&
3366a016f338SJANAK DESAI (fd && atomic_read(&fd->count) > 1)) {
3367a8023f8bSAl Viro fd = dup_fd(fd, NULL);
3368a8023f8bSAl Viro if (IS_ERR(fd))
3369a8023f8bSAl Viro return PTR_ERR(fd);
3370a8023f8bSAl Viro *new_fdp = fd;
3371a016f338SJANAK DESAI }
3372cf2e340fSJANAK DESAI
3373cf2e340fSJANAK DESAI return 0;
3374cf2e340fSJANAK DESAI }
3375cf2e340fSJANAK DESAI
3376cf2e340fSJANAK DESAI /*
3377cf2e340fSJANAK DESAI * unshare allows a process to 'unshare' part of the process
3378cf2e340fSJANAK DESAI * context which was originally shared using clone. copy_*
3379cad6967aSChristian Brauner * functions used by kernel_clone() cannot be used here directly
3380cf2e340fSJANAK DESAI * because they modify an inactive task_struct that is being
3381cf2e340fSJANAK DESAI * constructed. Here we are modifying the current, active,
3382cf2e340fSJANAK DESAI * task_struct.
3383cf2e340fSJANAK DESAI */
ksys_unshare(unsigned long unshare_flags)33849b32105eSDominik Brodowski int ksys_unshare(unsigned long unshare_flags)
3385cf2e340fSJANAK DESAI {
3386cf2e340fSJANAK DESAI struct fs_struct *fs, *new_fs = NULL;
3387ba1f70ddSRan Xiaokai struct files_struct *new_fd = NULL;
3388b2e0d987SEric W. Biederman struct cred *new_cred = NULL;
3389cf7b708cSPavel Emelyanov struct nsproxy *new_nsproxy = NULL;
33909edff4abSManfred Spraul int do_sysvsem = 0;
33919bfb23fcSOleg Nesterov int err;
3392cf2e340fSJANAK DESAI
339350804fe3SEric W. Biederman /*
3394faf00da5SEric W. Biederman * If unsharing a user namespace must also unshare the thread group
3395faf00da5SEric W. Biederman * and unshare the filesystem root and working directories.
3396b2e0d987SEric W. Biederman */
3397b2e0d987SEric W. Biederman if (unshare_flags & CLONE_NEWUSER)
3398e66eded8SEric W. Biederman unshare_flags |= CLONE_THREAD | CLONE_FS;
3399b2e0d987SEric W. Biederman /*
340050804fe3SEric W. Biederman * If unsharing vm, must also unshare signal handlers.
340150804fe3SEric W. Biederman */
340250804fe3SEric W. Biederman if (unshare_flags & CLONE_VM)
340350804fe3SEric W. Biederman unshare_flags |= CLONE_SIGHAND;
34046013f67fSManfred Spraul /*
340512c641abSEric W. Biederman * If unsharing a signal handlers, must also unshare the signal queues.
340612c641abSEric W. Biederman */
340712c641abSEric W. Biederman if (unshare_flags & CLONE_SIGHAND)
340812c641abSEric W. Biederman unshare_flags |= CLONE_THREAD;
340912c641abSEric W. Biederman /*
34109bfb23fcSOleg Nesterov * If unsharing namespace, must also unshare filesystem information.
34119bfb23fcSOleg Nesterov */
34129bfb23fcSOleg Nesterov if (unshare_flags & CLONE_NEWNS)
34139bfb23fcSOleg Nesterov unshare_flags |= CLONE_FS;
341450804fe3SEric W. Biederman
341550804fe3SEric W. Biederman err = check_unshare_flags(unshare_flags);
341650804fe3SEric W. Biederman if (err)
341750804fe3SEric W. Biederman goto bad_unshare_out;
34189bfb23fcSOleg Nesterov /*
34196013f67fSManfred Spraul * CLONE_NEWIPC must also detach from the undolist: after switching
34206013f67fSManfred Spraul * to a new ipc namespace, the semaphore arrays from the old
34216013f67fSManfred Spraul * namespace are unreachable.
34226013f67fSManfred Spraul */
34236013f67fSManfred Spraul if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
34249edff4abSManfred Spraul do_sysvsem = 1;
3425fb0a685cSDaniel Rebelo de Oliveira err = unshare_fs(unshare_flags, &new_fs);
3426fb0a685cSDaniel Rebelo de Oliveira if (err)
34279bfb23fcSOleg Nesterov goto bad_unshare_out;
3428a8023f8bSAl Viro err = unshare_fd(unshare_flags, &new_fd);
3429fb0a685cSDaniel Rebelo de Oliveira if (err)
34309bfb23fcSOleg Nesterov goto bad_unshare_cleanup_fs;
3431b2e0d987SEric W. Biederman err = unshare_userns(unshare_flags, &new_cred);
3432fb0a685cSDaniel Rebelo de Oliveira if (err)
34339edff4abSManfred Spraul goto bad_unshare_cleanup_fd;
3434b2e0d987SEric W. Biederman err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
3435b2e0d987SEric W. Biederman new_cred, new_fs);
3436b2e0d987SEric W. Biederman if (err)
3437b2e0d987SEric W. Biederman goto bad_unshare_cleanup_cred;
3438cf2e340fSJANAK DESAI
3439905ae01cSAlexey Gladkov if (new_cred) {
3440905ae01cSAlexey Gladkov err = set_cred_ucounts(new_cred);
3441905ae01cSAlexey Gladkov if (err)
3442905ae01cSAlexey Gladkov goto bad_unshare_cleanup_cred;
3443905ae01cSAlexey Gladkov }
3444905ae01cSAlexey Gladkov
3445b2e0d987SEric W. Biederman if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
34469edff4abSManfred Spraul if (do_sysvsem) {
34479edff4abSManfred Spraul /*
34489edff4abSManfred Spraul * CLONE_SYSVSEM is equivalent to sys_exit().
34499edff4abSManfred Spraul */
34509edff4abSManfred Spraul exit_sem(current);
34519edff4abSManfred Spraul }
3452ab602f79SJack Miller if (unshare_flags & CLONE_NEWIPC) {
3453ab602f79SJack Miller /* Orphan segments in old ns (see sem above). */
3454ab602f79SJack Miller exit_shm(current);
3455ab602f79SJack Miller shm_init_task(current);
3456ab602f79SJack Miller }
3457ab516013SSerge E. Hallyn
34586f977e6bSAlan Cox if (new_nsproxy)
3459cf7b708cSPavel Emelyanov switch_task_namespaces(current, new_nsproxy);
3460cf2e340fSJANAK DESAI
3461cf7b708cSPavel Emelyanov task_lock(current);
3462cf7b708cSPavel Emelyanov
3463cf2e340fSJANAK DESAI if (new_fs) {
3464cf2e340fSJANAK DESAI fs = current->fs;
34652a4419b5SNick Piggin spin_lock(&fs->lock);
3466cf2e340fSJANAK DESAI current->fs = new_fs;
3467498052bbSAl Viro if (--fs->users)
3468498052bbSAl Viro new_fs = NULL;
3469498052bbSAl Viro else
3470cf2e340fSJANAK DESAI new_fs = fs;
34712a4419b5SNick Piggin spin_unlock(&fs->lock);
3472cf2e340fSJANAK DESAI }
3473cf2e340fSJANAK DESAI
3474ba1f70ddSRan Xiaokai if (new_fd)
3475ba1f70ddSRan Xiaokai swap(current->files, new_fd);
3476cf2e340fSJANAK DESAI
3477cf2e340fSJANAK DESAI task_unlock(current);
3478b2e0d987SEric W. Biederman
3479b2e0d987SEric W. Biederman if (new_cred) {
3480b2e0d987SEric W. Biederman /* Install the new user namespace */
3481b2e0d987SEric W. Biederman commit_creds(new_cred);
3482b2e0d987SEric W. Biederman new_cred = NULL;
3483b2e0d987SEric W. Biederman }
3484cf2e340fSJANAK DESAI }
3485cf2e340fSJANAK DESAI
3486e4222673SHari Bathini perf_event_namespaces(current);
3487e4222673SHari Bathini
3488b2e0d987SEric W. Biederman bad_unshare_cleanup_cred:
3489b2e0d987SEric W. Biederman if (new_cred)
3490b2e0d987SEric W. Biederman put_cred(new_cred);
3491cf2e340fSJANAK DESAI bad_unshare_cleanup_fd:
3492cf2e340fSJANAK DESAI if (new_fd)
3493cf2e340fSJANAK DESAI put_files_struct(new_fd);
3494cf2e340fSJANAK DESAI
3495cf2e340fSJANAK DESAI bad_unshare_cleanup_fs:
3496cf2e340fSJANAK DESAI if (new_fs)
3497498052bbSAl Viro free_fs_struct(new_fs);
3498cf2e340fSJANAK DESAI
3499cf2e340fSJANAK DESAI bad_unshare_out:
3500cf2e340fSJANAK DESAI return err;
3501cf2e340fSJANAK DESAI }
35023b125388SAl Viro
SYSCALL_DEFINE1(unshare,unsigned long,unshare_flags)35039b32105eSDominik Brodowski SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
35049b32105eSDominik Brodowski {
35059b32105eSDominik Brodowski return ksys_unshare(unshare_flags);
35069b32105eSDominik Brodowski }
35079b32105eSDominik Brodowski
35083b125388SAl Viro /*
35093b125388SAl Viro * Helper to unshare the files of the current task.
35103b125388SAl Viro * We don't want to expose copy_files internals to
35113b125388SAl Viro * the exec layer of the kernel.
35123b125388SAl Viro */
35133b125388SAl Viro
unshare_files(void)35141f702603SEric W. Biederman int unshare_files(void)
35153b125388SAl Viro {
35163b125388SAl Viro struct task_struct *task = current;
35171f702603SEric W. Biederman struct files_struct *old, *copy = NULL;
35183b125388SAl Viro int error;
35193b125388SAl Viro
3520a8023f8bSAl Viro error = unshare_fd(CLONE_FILES, ©);
35211f702603SEric W. Biederman if (error || !copy)
35223b125388SAl Viro return error;
35231f702603SEric W. Biederman
35241f702603SEric W. Biederman old = task->files;
35253b125388SAl Viro task_lock(task);
35263b125388SAl Viro task->files = copy;
35273b125388SAl Viro task_unlock(task);
35281f702603SEric W. Biederman put_files_struct(old);
35293b125388SAl Viro return 0;
35303b125388SAl Viro }
353116db3d3fSHeinrich Schuchardt
sysctl_max_threads(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)353216db3d3fSHeinrich Schuchardt int sysctl_max_threads(struct ctl_table *table, int write,
3533b0daa2c7STobias Klauser void *buffer, size_t *lenp, loff_t *ppos)
353416db3d3fSHeinrich Schuchardt {
353516db3d3fSHeinrich Schuchardt struct ctl_table t;
353616db3d3fSHeinrich Schuchardt int ret;
353716db3d3fSHeinrich Schuchardt int threads = max_threads;
3538b0f53dbcSMichal Hocko int min = 1;
353916db3d3fSHeinrich Schuchardt int max = MAX_THREADS;
354016db3d3fSHeinrich Schuchardt
354116db3d3fSHeinrich Schuchardt t = *table;
354216db3d3fSHeinrich Schuchardt t.data = &threads;
354316db3d3fSHeinrich Schuchardt t.extra1 = &min;
354416db3d3fSHeinrich Schuchardt t.extra2 = &max;
354516db3d3fSHeinrich Schuchardt
354616db3d3fSHeinrich Schuchardt ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
354716db3d3fSHeinrich Schuchardt if (ret || !write)
354816db3d3fSHeinrich Schuchardt return ret;
354916db3d3fSHeinrich Schuchardt
3550b0f53dbcSMichal Hocko max_threads = threads;
355116db3d3fSHeinrich Schuchardt
355216db3d3fSHeinrich Schuchardt return 0;
355316db3d3fSHeinrich Schuchardt }
3554