1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/kernel/fork.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 1991, 1992 Linus Torvalds 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds /* 91da177e4SLinus Torvalds * 'fork.c' contains the help-routines for the 'fork' system call 101da177e4SLinus Torvalds * (see also entry.S and others). 111da177e4SLinus Torvalds * Fork is rather simple, once you get the hang of it, but the memory 121da177e4SLinus Torvalds * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' 131da177e4SLinus Torvalds */ 141da177e4SLinus Torvalds 15b3e58382SChristian Brauner #include <linux/anon_inodes.h> 161da177e4SLinus Torvalds #include <linux/slab.h> 174eb5aaa3SIngo Molnar #include <linux/sched/autogroup.h> 186e84f315SIngo Molnar #include <linux/sched/mm.h> 19f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 208703e8a4SIngo Molnar #include <linux/sched/user.h> 216a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 2203441a34SIngo Molnar #include <linux/sched/stat.h> 2329930025SIngo Molnar #include <linux/sched/task.h> 2468db0cf1SIngo Molnar #include <linux/sched/task_stack.h> 2532ef5517SIngo Molnar #include <linux/sched/cputime.h> 26b3e58382SChristian Brauner #include <linux/seq_file.h> 27037741a6SIngo Molnar #include <linux/rtmutex.h> 281da177e4SLinus Torvalds #include <linux/init.h> 291da177e4SLinus Torvalds #include <linux/unistd.h> 301da177e4SLinus Torvalds #include <linux/module.h> 311da177e4SLinus Torvalds #include <linux/vmalloc.h> 321da177e4SLinus Torvalds #include <linux/completion.h> 331da177e4SLinus Torvalds #include <linux/personality.h> 341da177e4SLinus Torvalds #include <linux/mempolicy.h> 351da177e4SLinus Torvalds #include <linux/sem.h> 361da177e4SLinus Torvalds #include <linux/file.h> 379f3acc31SAl Viro #include <linux/fdtable.h> 38da9cbc87SJens Axboe #include <linux/iocontext.h> 391da177e4SLinus Torvalds #include <linux/key.h> 401da177e4SLinus Torvalds #include <linux/binfmts.h> 411da177e4SLinus Torvalds #include <linux/mman.h> 42cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 431da177e4SLinus Torvalds #include <linux/fs.h> 44615d6e87SDavidlohr Bueso #include <linux/mm.h> 4517fca131SArnd Bergmann #include <linux/mm_inline.h> 46615d6e87SDavidlohr Bueso #include <linux/vmacache.h> 47ab516013SSerge E. Hallyn #include <linux/nsproxy.h> 48c59ede7bSRandy.Dunlap #include <linux/capability.h> 491da177e4SLinus Torvalds #include <linux/cpu.h> 50b4f48b63SPaul Menage #include <linux/cgroup.h> 511da177e4SLinus Torvalds #include <linux/security.h> 52a1e78772SMel Gorman #include <linux/hugetlb.h> 53e2cfabdfSWill Drewry #include <linux/seccomp.h> 541da177e4SLinus Torvalds #include <linux/swap.h> 551da177e4SLinus Torvalds #include <linux/syscalls.h> 561da177e4SLinus Torvalds #include <linux/jiffies.h> 571da177e4SLinus Torvalds #include <linux/futex.h> 588141c7f3SLinus Torvalds #include <linux/compat.h> 59207205a2SEric Dumazet #include <linux/kthread.h> 607c3ab738SAndrew Morton #include <linux/task_io_accounting_ops.h> 61ab2af1f5SDipankar Sarma #include <linux/rcupdate.h> 621da177e4SLinus Torvalds #include <linux/ptrace.h> 631da177e4SLinus Torvalds #include <linux/mount.h> 641da177e4SLinus Torvalds #include <linux/audit.h> 6578fb7466SPavel Emelianov #include <linux/memcontrol.h> 66f201ae23SFrederic Weisbecker #include <linux/ftrace.h> 675e2bf014SMike Galbraith #include <linux/proc_fs.h> 681da177e4SLinus Torvalds #include <linux/profile.h> 691da177e4SLinus Torvalds #include <linux/rmap.h> 70f8af4da3SHugh Dickins #include <linux/ksm.h> 711da177e4SLinus Torvalds #include <linux/acct.h> 72893e26e6SPavel Emelyanov #include <linux/userfaultfd_k.h> 738f0ab514SJay Lan #include <linux/tsacct_kern.h> 749f46080cSMatt Helsley #include <linux/cn_proc.h> 75ba96a0c8SRafael J. Wysocki #include <linux/freezer.h> 76ca74e92bSShailabh Nagar #include <linux/delayacct.h> 77ad4ecbcbSShailabh Nagar #include <linux/taskstats_kern.h> 780a425405SArjan van de Ven #include <linux/random.h> 79522ed776SMiloslav Trmac #include <linux/tty.h> 805ad4e53bSAl Viro #include <linux/fs_struct.h> 817c9f8861SEric Sandeen #include <linux/magic.h> 82cdd6c482SIngo Molnar #include <linux/perf_event.h> 8342c4ab41SStanislaw Gruszka #include <linux/posix-timers.h> 848e7cac79SAvi Kivity #include <linux/user-return-notifier.h> 853d5992d2SYing Han #include <linux/oom.h> 86ba76149fSAndrea Arcangeli #include <linux/khugepaged.h> 87d80e731eSOleg Nesterov #include <linux/signalfd.h> 880326f5a9SSrikar Dronamraju #include <linux/uprobes.h> 89a27bb332SKent Overstreet #include <linux/aio.h> 9052f5684cSGideon Israel Dsouza #include <linux/compiler.h> 9116db3d3fSHeinrich Schuchardt #include <linux/sysctl.h> 925c9a8750SDmitry Vyukov #include <linux/kcov.h> 93d83a7cb3SJosh Poimboeuf #include <linux/livepatch.h> 9448ac3c18SMark Rutland #include <linux/thread_info.h> 95afaef01cSAlexander Popov #include <linux/stackleak.h> 96eafb149eSDaniel Axtens #include <linux/kasan.h> 97d08b9f0cSSami Tolvanen #include <linux/scs.h> 980f212204SJens Axboe #include <linux/io_uring.h> 99a10787e6SSong Liu #include <linux/bpf.h> 100a6cbd440SFenghua Yu #include <linux/sched/mm.h> 1011da177e4SLinus Torvalds 1021da177e4SLinus Torvalds #include <asm/pgalloc.h> 1037c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 1041da177e4SLinus Torvalds #include <asm/mmu_context.h> 1051da177e4SLinus Torvalds #include <asm/cacheflush.h> 1061da177e4SLinus Torvalds #include <asm/tlbflush.h> 1071da177e4SLinus Torvalds 108ad8d75ffSSteven Rostedt #include <trace/events/sched.h> 109ad8d75ffSSteven Rostedt 11043d2b113SKAMEZAWA Hiroyuki #define CREATE_TRACE_POINTS 11143d2b113SKAMEZAWA Hiroyuki #include <trace/events/task.h> 11243d2b113SKAMEZAWA Hiroyuki 1131da177e4SLinus Torvalds /* 114ac1b398dSHeinrich Schuchardt * Minimum number of threads to boot the kernel 115ac1b398dSHeinrich Schuchardt */ 116ac1b398dSHeinrich Schuchardt #define MIN_THREADS 20 117ac1b398dSHeinrich Schuchardt 118ac1b398dSHeinrich Schuchardt /* 119ac1b398dSHeinrich Schuchardt * Maximum number of threads 120ac1b398dSHeinrich Schuchardt */ 121ac1b398dSHeinrich Schuchardt #define MAX_THREADS FUTEX_TID_MASK 122ac1b398dSHeinrich Schuchardt 123ac1b398dSHeinrich Schuchardt /* 1241da177e4SLinus Torvalds * Protected counters by write_lock_irq(&tasklist_lock) 1251da177e4SLinus Torvalds */ 1261da177e4SLinus Torvalds unsigned long total_forks; /* Handle normal Linux uptimes. */ 1271da177e4SLinus Torvalds int nr_threads; /* The idle threads do not count.. */ 1281da177e4SLinus Torvalds 1298856ae4dSKefeng Wang static int max_threads; /* tunable limit on nr_threads */ 1301da177e4SLinus Torvalds 1318495f7e6SSai Praneeth Prakhya #define NAMED_ARRAY_INDEX(x) [x] = __stringify(x) 1328495f7e6SSai Praneeth Prakhya 1338495f7e6SSai Praneeth Prakhya static const char * const resident_page_types[] = { 1348495f7e6SSai Praneeth Prakhya NAMED_ARRAY_INDEX(MM_FILEPAGES), 1358495f7e6SSai Praneeth Prakhya NAMED_ARRAY_INDEX(MM_ANONPAGES), 1368495f7e6SSai Praneeth Prakhya NAMED_ARRAY_INDEX(MM_SWAPENTS), 1378495f7e6SSai Praneeth Prakhya NAMED_ARRAY_INDEX(MM_SHMEMPAGES), 1388495f7e6SSai Praneeth Prakhya }; 1398495f7e6SSai Praneeth Prakhya 1401da177e4SLinus Torvalds DEFINE_PER_CPU(unsigned long, process_counts) = 0; 1411da177e4SLinus Torvalds 1421da177e4SLinus Torvalds __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 143db1466b3SPaul E. McKenney 144db1466b3SPaul E. McKenney #ifdef CONFIG_PROVE_RCU 145db1466b3SPaul E. McKenney int lockdep_tasklist_lock_is_held(void) 146db1466b3SPaul E. McKenney { 147db1466b3SPaul E. McKenney return lockdep_is_held(&tasklist_lock); 148db1466b3SPaul E. McKenney } 149db1466b3SPaul E. McKenney EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); 150db1466b3SPaul E. McKenney #endif /* #ifdef CONFIG_PROVE_RCU */ 1511da177e4SLinus Torvalds 1521da177e4SLinus Torvalds int nr_processes(void) 1531da177e4SLinus Torvalds { 1541da177e4SLinus Torvalds int cpu; 1551da177e4SLinus Torvalds int total = 0; 1561da177e4SLinus Torvalds 1571d510750SIan Campbell for_each_possible_cpu(cpu) 1581da177e4SLinus Torvalds total += per_cpu(process_counts, cpu); 1591da177e4SLinus Torvalds 1601da177e4SLinus Torvalds return total; 1611da177e4SLinus Torvalds } 1621da177e4SLinus Torvalds 163f19b9f74SAkinobu Mita void __weak arch_release_task_struct(struct task_struct *tsk) 164f19b9f74SAkinobu Mita { 165f19b9f74SAkinobu Mita } 166f19b9f74SAkinobu Mita 167f5e10287SThomas Gleixner #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 168e18b890bSChristoph Lameter static struct kmem_cache *task_struct_cachep; 16941101809SThomas Gleixner 17041101809SThomas Gleixner static inline struct task_struct *alloc_task_struct_node(int node) 17141101809SThomas Gleixner { 17241101809SThomas Gleixner return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); 17341101809SThomas Gleixner } 17441101809SThomas Gleixner 17541101809SThomas Gleixner static inline void free_task_struct(struct task_struct *tsk) 17641101809SThomas Gleixner { 17741101809SThomas Gleixner kmem_cache_free(task_struct_cachep, tsk); 17841101809SThomas Gleixner } 1791da177e4SLinus Torvalds #endif 1801da177e4SLinus Torvalds 181b235beeaSLinus Torvalds #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR 18241101809SThomas Gleixner 1830d15d74aSThomas Gleixner /* 1840d15d74aSThomas Gleixner * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a 1850d15d74aSThomas Gleixner * kmemcache based allocator. 1860d15d74aSThomas Gleixner */ 187ba14a194SAndy Lutomirski # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) 188ac496bf4SAndy Lutomirski 189ac496bf4SAndy Lutomirski # ifdef CONFIG_VMAP_STACK 190ac496bf4SAndy Lutomirski /* 191ac496bf4SAndy Lutomirski * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB 192ac496bf4SAndy Lutomirski * flush. Try to minimize the number of calls by caching stacks. 193ac496bf4SAndy Lutomirski */ 194ac496bf4SAndy Lutomirski #define NR_CACHED_STACKS 2 195ac496bf4SAndy Lutomirski static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]); 19619659c59SHoeun Ryu 197e540bf31SSebastian Andrzej Siewior struct vm_stack { 198e540bf31SSebastian Andrzej Siewior struct rcu_head rcu; 199e540bf31SSebastian Andrzej Siewior struct vm_struct *stack_vm_area; 200e540bf31SSebastian Andrzej Siewior }; 201e540bf31SSebastian Andrzej Siewior 202e540bf31SSebastian Andrzej Siewior static bool try_release_thread_stack_to_cache(struct vm_struct *vm) 203e540bf31SSebastian Andrzej Siewior { 204e540bf31SSebastian Andrzej Siewior unsigned int i; 205e540bf31SSebastian Andrzej Siewior 206e540bf31SSebastian Andrzej Siewior for (i = 0; i < NR_CACHED_STACKS; i++) { 207e540bf31SSebastian Andrzej Siewior if (this_cpu_cmpxchg(cached_stacks[i], NULL, vm) != NULL) 208e540bf31SSebastian Andrzej Siewior continue; 209e540bf31SSebastian Andrzej Siewior return true; 210e540bf31SSebastian Andrzej Siewior } 211e540bf31SSebastian Andrzej Siewior return false; 212e540bf31SSebastian Andrzej Siewior } 213e540bf31SSebastian Andrzej Siewior 214e540bf31SSebastian Andrzej Siewior static void thread_stack_free_rcu(struct rcu_head *rh) 215e540bf31SSebastian Andrzej Siewior { 216e540bf31SSebastian Andrzej Siewior struct vm_stack *vm_stack = container_of(rh, struct vm_stack, rcu); 217e540bf31SSebastian Andrzej Siewior 218e540bf31SSebastian Andrzej Siewior if (try_release_thread_stack_to_cache(vm_stack->stack_vm_area)) 219e540bf31SSebastian Andrzej Siewior return; 220e540bf31SSebastian Andrzej Siewior 221e540bf31SSebastian Andrzej Siewior vfree(vm_stack); 222e540bf31SSebastian Andrzej Siewior } 223e540bf31SSebastian Andrzej Siewior 224e540bf31SSebastian Andrzej Siewior static void thread_stack_delayed_free(struct task_struct *tsk) 225e540bf31SSebastian Andrzej Siewior { 226e540bf31SSebastian Andrzej Siewior struct vm_stack *vm_stack = tsk->stack; 227e540bf31SSebastian Andrzej Siewior 228e540bf31SSebastian Andrzej Siewior vm_stack->stack_vm_area = tsk->stack_vm_area; 229e540bf31SSebastian Andrzej Siewior call_rcu(&vm_stack->rcu, thread_stack_free_rcu); 230e540bf31SSebastian Andrzej Siewior } 231e540bf31SSebastian Andrzej Siewior 23219659c59SHoeun Ryu static int free_vm_stack_cache(unsigned int cpu) 23319659c59SHoeun Ryu { 23419659c59SHoeun Ryu struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu); 23519659c59SHoeun Ryu int i; 23619659c59SHoeun Ryu 23719659c59SHoeun Ryu for (i = 0; i < NR_CACHED_STACKS; i++) { 23819659c59SHoeun Ryu struct vm_struct *vm_stack = cached_vm_stacks[i]; 23919659c59SHoeun Ryu 24019659c59SHoeun Ryu if (!vm_stack) 24119659c59SHoeun Ryu continue; 24219659c59SHoeun Ryu 24319659c59SHoeun Ryu vfree(vm_stack->addr); 24419659c59SHoeun Ryu cached_vm_stacks[i] = NULL; 24519659c59SHoeun Ryu } 24619659c59SHoeun Ryu 24719659c59SHoeun Ryu return 0; 24819659c59SHoeun Ryu } 249ac496bf4SAndy Lutomirski 2501a03d3f1SSebastian Andrzej Siewior static int memcg_charge_kernel_stack(struct vm_struct *vm) 251b69c49b7SFUJITA Tomonori { 252f1c1a9eeSSebastian Andrzej Siewior int i; 253f1c1a9eeSSebastian Andrzej Siewior int ret; 254f1c1a9eeSSebastian Andrzej Siewior 255f1c1a9eeSSebastian Andrzej Siewior BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0); 256f1c1a9eeSSebastian Andrzej Siewior BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE); 257f1c1a9eeSSebastian Andrzej Siewior 258f1c1a9eeSSebastian Andrzej Siewior for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { 259f1c1a9eeSSebastian Andrzej Siewior ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL, 0); 260f1c1a9eeSSebastian Andrzej Siewior if (ret) 261f1c1a9eeSSebastian Andrzej Siewior goto err; 262f1c1a9eeSSebastian Andrzej Siewior } 263f1c1a9eeSSebastian Andrzej Siewior return 0; 264f1c1a9eeSSebastian Andrzej Siewior err: 265f1c1a9eeSSebastian Andrzej Siewior /* 266f1c1a9eeSSebastian Andrzej Siewior * If memcg_kmem_charge_page() fails, page's memory cgroup pointer is 267f1c1a9eeSSebastian Andrzej Siewior * NULL, and memcg_kmem_uncharge_page() in free_thread_stack() will 268f1c1a9eeSSebastian Andrzej Siewior * ignore this page. 269f1c1a9eeSSebastian Andrzej Siewior */ 270f1c1a9eeSSebastian Andrzej Siewior for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) 271f1c1a9eeSSebastian Andrzej Siewior memcg_kmem_uncharge_page(vm->pages[i], 0); 272f1c1a9eeSSebastian Andrzej Siewior return ret; 273f1c1a9eeSSebastian Andrzej Siewior } 274f1c1a9eeSSebastian Andrzej Siewior 2757865aba3SSebastian Andrzej Siewior static int alloc_thread_stack_node(struct task_struct *tsk, int node) 276b69c49b7SFUJITA Tomonori { 2771a03d3f1SSebastian Andrzej Siewior struct vm_struct *vm; 278ac496bf4SAndy Lutomirski void *stack; 279ac496bf4SAndy Lutomirski int i; 280ac496bf4SAndy Lutomirski 281ac496bf4SAndy Lutomirski for (i = 0; i < NR_CACHED_STACKS; i++) { 282112166f8SChristoph Lameter struct vm_struct *s; 283112166f8SChristoph Lameter 284112166f8SChristoph Lameter s = this_cpu_xchg(cached_stacks[i], NULL); 285ac496bf4SAndy Lutomirski 286ac496bf4SAndy Lutomirski if (!s) 287ac496bf4SAndy Lutomirski continue; 288ac496bf4SAndy Lutomirski 28951fb34deSAndrey Konovalov /* Reset stack metadata. */ 290cebd0eb2SAndrey Konovalov kasan_unpoison_range(s->addr, THREAD_SIZE); 291eafb149eSDaniel Axtens 29251fb34deSAndrey Konovalov stack = kasan_reset_tag(s->addr); 29351fb34deSAndrey Konovalov 294ca182551SKonstantin Khlebnikov /* Clear stale pointers from reused stack. */ 29551fb34deSAndrey Konovalov memset(stack, 0, THREAD_SIZE); 296e01e8063SKees Cook 2971a03d3f1SSebastian Andrzej Siewior if (memcg_charge_kernel_stack(s)) { 298f1c1a9eeSSebastian Andrzej Siewior vfree(s->addr); 299f1c1a9eeSSebastian Andrzej Siewior return -ENOMEM; 300f1c1a9eeSSebastian Andrzej Siewior } 301f1c1a9eeSSebastian Andrzej Siewior 302ac496bf4SAndy Lutomirski tsk->stack_vm_area = s; 30351fb34deSAndrey Konovalov tsk->stack = stack; 3047865aba3SSebastian Andrzej Siewior return 0; 305ac496bf4SAndy Lutomirski } 306ac496bf4SAndy Lutomirski 3079b6f7e16SRoman Gushchin /* 3089b6f7e16SRoman Gushchin * Allocated stacks are cached and later reused by new threads, 3099b6f7e16SRoman Gushchin * so memcg accounting is performed manually on assigning/releasing 3109b6f7e16SRoman Gushchin * stacks to tasks. Drop __GFP_ACCOUNT. 3119b6f7e16SRoman Gushchin */ 31248ac3c18SMark Rutland stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN, 313ba14a194SAndy Lutomirski VMALLOC_START, VMALLOC_END, 3149b6f7e16SRoman Gushchin THREADINFO_GFP & ~__GFP_ACCOUNT, 315ba14a194SAndy Lutomirski PAGE_KERNEL, 316ac496bf4SAndy Lutomirski 0, node, __builtin_return_address(0)); 3177865aba3SSebastian Andrzej Siewior if (!stack) 3187865aba3SSebastian Andrzej Siewior return -ENOMEM; 319ba14a194SAndy Lutomirski 3201a03d3f1SSebastian Andrzej Siewior vm = find_vm_area(stack); 3211a03d3f1SSebastian Andrzej Siewior if (memcg_charge_kernel_stack(vm)) { 322f1c1a9eeSSebastian Andrzej Siewior vfree(stack); 323f1c1a9eeSSebastian Andrzej Siewior return -ENOMEM; 324f1c1a9eeSSebastian Andrzej Siewior } 325ba14a194SAndy Lutomirski /* 326ba14a194SAndy Lutomirski * We can't call find_vm_area() in interrupt context, and 327ba14a194SAndy Lutomirski * free_thread_stack() can be called in interrupt context, 328ba14a194SAndy Lutomirski * so cache the vm_struct. 329ba14a194SAndy Lutomirski */ 3301a03d3f1SSebastian Andrzej Siewior tsk->stack_vm_area = vm; 331c08e6a12SAndrey Konovalov stack = kasan_reset_tag(stack); 3325eed6f1dSRik van Riel tsk->stack = stack; 3337865aba3SSebastian Andrzej Siewior return 0; 3345eed6f1dSRik van Riel } 335be9a2277SSebastian Andrzej Siewior 336be9a2277SSebastian Andrzej Siewior static void free_thread_stack(struct task_struct *tsk) 337be9a2277SSebastian Andrzej Siewior { 338e540bf31SSebastian Andrzej Siewior if (!try_release_thread_stack_to_cache(tsk->stack_vm_area)) 339e540bf31SSebastian Andrzej Siewior thread_stack_delayed_free(tsk); 340be9a2277SSebastian Andrzej Siewior 341be9a2277SSebastian Andrzej Siewior tsk->stack = NULL; 342be9a2277SSebastian Andrzej Siewior tsk->stack_vm_area = NULL; 343be9a2277SSebastian Andrzej Siewior } 344be9a2277SSebastian Andrzej Siewior 345be9a2277SSebastian Andrzej Siewior # else /* !CONFIG_VMAP_STACK */ 346be9a2277SSebastian Andrzej Siewior 347e540bf31SSebastian Andrzej Siewior static void thread_stack_free_rcu(struct rcu_head *rh) 348e540bf31SSebastian Andrzej Siewior { 349e540bf31SSebastian Andrzej Siewior __free_pages(virt_to_page(rh), THREAD_SIZE_ORDER); 350e540bf31SSebastian Andrzej Siewior } 351e540bf31SSebastian Andrzej Siewior 352e540bf31SSebastian Andrzej Siewior static void thread_stack_delayed_free(struct task_struct *tsk) 353e540bf31SSebastian Andrzej Siewior { 354e540bf31SSebastian Andrzej Siewior struct rcu_head *rh = tsk->stack; 355e540bf31SSebastian Andrzej Siewior 356e540bf31SSebastian Andrzej Siewior call_rcu(rh, thread_stack_free_rcu); 357e540bf31SSebastian Andrzej Siewior } 358e540bf31SSebastian Andrzej Siewior 3597865aba3SSebastian Andrzej Siewior static int alloc_thread_stack_node(struct task_struct *tsk, int node) 360be9a2277SSebastian Andrzej Siewior { 3614949148aSVladimir Davydov struct page *page = alloc_pages_node(node, THREADINFO_GFP, 3622889f608SThomas Gleixner THREAD_SIZE_ORDER); 363b6a84016SEric Dumazet 3641bf4580eSAndrea Arcangeli if (likely(page)) { 3658dcc1d34SAndrey Konovalov tsk->stack = kasan_reset_tag(page_address(page)); 3667865aba3SSebastian Andrzej Siewior return 0; 3671bf4580eSAndrea Arcangeli } 3687865aba3SSebastian Andrzej Siewior return -ENOMEM; 369b69c49b7SFUJITA Tomonori } 370b69c49b7SFUJITA Tomonori 371be9a2277SSebastian Andrzej Siewior static void free_thread_stack(struct task_struct *tsk) 372b69c49b7SFUJITA Tomonori { 373e540bf31SSebastian Andrzej Siewior thread_stack_delayed_free(tsk); 374be9a2277SSebastian Andrzej Siewior tsk->stack = NULL; 375ac496bf4SAndy Lutomirski } 376ac496bf4SAndy Lutomirski 377be9a2277SSebastian Andrzej Siewior # endif /* CONFIG_VMAP_STACK */ 378be9a2277SSebastian Andrzej Siewior # else /* !(THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)) */ 379ac496bf4SAndy Lutomirski 380b235beeaSLinus Torvalds static struct kmem_cache *thread_stack_cache; 3810d15d74aSThomas Gleixner 382e540bf31SSebastian Andrzej Siewior static void thread_stack_free_rcu(struct rcu_head *rh) 383e540bf31SSebastian Andrzej Siewior { 384e540bf31SSebastian Andrzej Siewior kmem_cache_free(thread_stack_cache, rh); 385e540bf31SSebastian Andrzej Siewior } 386e540bf31SSebastian Andrzej Siewior 387e540bf31SSebastian Andrzej Siewior static void thread_stack_delayed_free(struct task_struct *tsk) 388e540bf31SSebastian Andrzej Siewior { 389e540bf31SSebastian Andrzej Siewior struct rcu_head *rh = tsk->stack; 390e540bf31SSebastian Andrzej Siewior 391e540bf31SSebastian Andrzej Siewior call_rcu(rh, thread_stack_free_rcu); 392e540bf31SSebastian Andrzej Siewior } 393e540bf31SSebastian Andrzej Siewior 3947865aba3SSebastian Andrzej Siewior static int alloc_thread_stack_node(struct task_struct *tsk, int node) 3950d15d74aSThomas Gleixner { 3965eed6f1dSRik van Riel unsigned long *stack; 3975eed6f1dSRik van Riel stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); 3988dcc1d34SAndrey Konovalov stack = kasan_reset_tag(stack); 3995eed6f1dSRik van Riel tsk->stack = stack; 4007865aba3SSebastian Andrzej Siewior return stack ? 0 : -ENOMEM; 4010d15d74aSThomas Gleixner } 4020d15d74aSThomas Gleixner 403ba14a194SAndy Lutomirski static void free_thread_stack(struct task_struct *tsk) 4040d15d74aSThomas Gleixner { 405e540bf31SSebastian Andrzej Siewior thread_stack_delayed_free(tsk); 406be9a2277SSebastian Andrzej Siewior tsk->stack = NULL; 4070d15d74aSThomas Gleixner } 4080d15d74aSThomas Gleixner 409b235beeaSLinus Torvalds void thread_stack_cache_init(void) 4100d15d74aSThomas Gleixner { 411f9d29946SDavid Windsor thread_stack_cache = kmem_cache_create_usercopy("thread_stack", 412f9d29946SDavid Windsor THREAD_SIZE, THREAD_SIZE, 0, 0, 413f9d29946SDavid Windsor THREAD_SIZE, NULL); 414b235beeaSLinus Torvalds BUG_ON(thread_stack_cache == NULL); 4150d15d74aSThomas Gleixner } 416be9a2277SSebastian Andrzej Siewior 417be9a2277SSebastian Andrzej Siewior # endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */ 4182bb0529cSSebastian Andrzej Siewior #else /* CONFIG_ARCH_THREAD_STACK_ALLOCATOR */ 4192bb0529cSSebastian Andrzej Siewior 4207865aba3SSebastian Andrzej Siewior static int alloc_thread_stack_node(struct task_struct *tsk, int node) 4212bb0529cSSebastian Andrzej Siewior { 4222bb0529cSSebastian Andrzej Siewior unsigned long *stack; 4232bb0529cSSebastian Andrzej Siewior 4242bb0529cSSebastian Andrzej Siewior stack = arch_alloc_thread_stack_node(tsk, node); 4252bb0529cSSebastian Andrzej Siewior tsk->stack = stack; 4267865aba3SSebastian Andrzej Siewior return stack ? 0 : -ENOMEM; 4272bb0529cSSebastian Andrzej Siewior } 4282bb0529cSSebastian Andrzej Siewior 4292bb0529cSSebastian Andrzej Siewior static void free_thread_stack(struct task_struct *tsk) 4302bb0529cSSebastian Andrzej Siewior { 4312bb0529cSSebastian Andrzej Siewior arch_free_thread_stack(tsk); 4322bb0529cSSebastian Andrzej Siewior tsk->stack = NULL; 4332bb0529cSSebastian Andrzej Siewior } 4342bb0529cSSebastian Andrzej Siewior 435be9a2277SSebastian Andrzej Siewior #endif /* !CONFIG_ARCH_THREAD_STACK_ALLOCATOR */ 436b69c49b7SFUJITA Tomonori 4371da177e4SLinus Torvalds /* SLAB cache for signal_struct structures (tsk->signal) */ 438e18b890bSChristoph Lameter static struct kmem_cache *signal_cachep; 4391da177e4SLinus Torvalds 4401da177e4SLinus Torvalds /* SLAB cache for sighand_struct structures (tsk->sighand) */ 441e18b890bSChristoph Lameter struct kmem_cache *sighand_cachep; 4421da177e4SLinus Torvalds 4431da177e4SLinus Torvalds /* SLAB cache for files_struct structures (tsk->files) */ 444e18b890bSChristoph Lameter struct kmem_cache *files_cachep; 4451da177e4SLinus Torvalds 4461da177e4SLinus Torvalds /* SLAB cache for fs_struct structures (tsk->fs) */ 447e18b890bSChristoph Lameter struct kmem_cache *fs_cachep; 4481da177e4SLinus Torvalds 4491da177e4SLinus Torvalds /* SLAB cache for vm_area_struct structures */ 4503928d4f5SLinus Torvalds static struct kmem_cache *vm_area_cachep; 4511da177e4SLinus Torvalds 4521da177e4SLinus Torvalds /* SLAB cache for mm_struct structures (tsk->mm) */ 453e18b890bSChristoph Lameter static struct kmem_cache *mm_cachep; 4541da177e4SLinus Torvalds 455490fc053SLinus Torvalds struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) 4563928d4f5SLinus Torvalds { 457a670468fSAndrew Morton struct vm_area_struct *vma; 458490fc053SLinus Torvalds 459a670468fSAndrew Morton vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 460027232daSKirill A. Shutemov if (vma) 461027232daSKirill A. Shutemov vma_init(vma, mm); 462490fc053SLinus Torvalds return vma; 4633928d4f5SLinus Torvalds } 4643928d4f5SLinus Torvalds 4653928d4f5SLinus Torvalds struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) 4663928d4f5SLinus Torvalds { 46795faf699SLinus Torvalds struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 46895faf699SLinus Torvalds 46995faf699SLinus Torvalds if (new) { 470cda099b3SQian Cai ASSERT_EXCLUSIVE_WRITER(orig->vm_flags); 471cda099b3SQian Cai ASSERT_EXCLUSIVE_WRITER(orig->vm_file); 472cda099b3SQian Cai /* 473cda099b3SQian Cai * orig->shared.rb may be modified concurrently, but the clone 474cda099b3SQian Cai * will be reinitialized. 475cda099b3SQian Cai */ 476cda099b3SQian Cai *new = data_race(*orig); 47795faf699SLinus Torvalds INIT_LIST_HEAD(&new->anon_vma_chain); 478e39a4b33SLi Xinhai new->vm_next = new->vm_prev = NULL; 4795c26f6acSSuren Baghdasaryan dup_anon_vma_name(orig, new); 48095faf699SLinus Torvalds } 48195faf699SLinus Torvalds return new; 4823928d4f5SLinus Torvalds } 4833928d4f5SLinus Torvalds 4843928d4f5SLinus Torvalds void vm_area_free(struct vm_area_struct *vma) 4853928d4f5SLinus Torvalds { 4865c26f6acSSuren Baghdasaryan free_anon_vma_name(vma); 4873928d4f5SLinus Torvalds kmem_cache_free(vm_area_cachep, vma); 4883928d4f5SLinus Torvalds } 4893928d4f5SLinus Torvalds 490ba14a194SAndy Lutomirski static void account_kernel_stack(struct task_struct *tsk, int account) 491c6a7f572SKOSAKI Motohiro { 4920ce055f8SSebastian Andrzej Siewior if (IS_ENABLED(CONFIG_VMAP_STACK)) { 493ba14a194SAndy Lutomirski struct vm_struct *vm = task_stack_vm_area(tsk); 49427faca83SMuchun Song int i; 495ba14a194SAndy Lutomirski 49627faca83SMuchun Song for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) 49727faca83SMuchun Song mod_lruvec_page_state(vm->pages[i], NR_KERNEL_STACK_KB, 49827faca83SMuchun Song account * (PAGE_SIZE / 1024)); 49927faca83SMuchun Song } else { 5000ce055f8SSebastian Andrzej Siewior void *stack = task_stack_page(tsk); 5010ce055f8SSebastian Andrzej Siewior 502991e7673SShakeel Butt /* All stack pages are in the same node. */ 503da3ceeffSMuchun Song mod_lruvec_kmem_state(stack, NR_KERNEL_STACK_KB, 504991e7673SShakeel Butt account * (THREAD_SIZE / 1024)); 505ba14a194SAndy Lutomirski } 50627faca83SMuchun Song } 507c6a7f572SKOSAKI Motohiro 5081a03d3f1SSebastian Andrzej Siewior void exit_task_stack_account(struct task_struct *tsk) 5099b6f7e16SRoman Gushchin { 5101a03d3f1SSebastian Andrzej Siewior account_kernel_stack(tsk, -1); 5119b6f7e16SRoman Gushchin 5121a03d3f1SSebastian Andrzej Siewior if (IS_ENABLED(CONFIG_VMAP_STACK)) { 5131a03d3f1SSebastian Andrzej Siewior struct vm_struct *vm; 5149b6f7e16SRoman Gushchin int i; 5159b6f7e16SRoman Gushchin 5161a03d3f1SSebastian Andrzej Siewior vm = task_stack_vm_area(tsk); 5171a03d3f1SSebastian Andrzej Siewior for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) 5181a03d3f1SSebastian Andrzej Siewior memcg_kmem_uncharge_page(vm->pages[i], 0); 5199b6f7e16SRoman Gushchin } 5209b6f7e16SRoman Gushchin } 5219b6f7e16SRoman Gushchin 52268f24b08SAndy Lutomirski static void release_task_stack(struct task_struct *tsk) 5231da177e4SLinus Torvalds { 5242f064a59SPeter Zijlstra if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD)) 525405c0759SAndy Lutomirski return; /* Better to leak the stack than to free prematurely */ 526405c0759SAndy Lutomirski 527ba14a194SAndy Lutomirski free_thread_stack(tsk); 52868f24b08SAndy Lutomirski } 52968f24b08SAndy Lutomirski 53068f24b08SAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 53168f24b08SAndy Lutomirski void put_task_stack(struct task_struct *tsk) 53268f24b08SAndy Lutomirski { 533f0b89d39SElena Reshetova if (refcount_dec_and_test(&tsk->stack_refcount)) 53468f24b08SAndy Lutomirski release_task_stack(tsk); 53568f24b08SAndy Lutomirski } 53668f24b08SAndy Lutomirski #endif 53768f24b08SAndy Lutomirski 53868f24b08SAndy Lutomirski void free_task(struct task_struct *tsk) 53968f24b08SAndy Lutomirski { 540b90ca8baSWill Deacon release_user_cpus_ptr(tsk); 541d08b9f0cSSami Tolvanen scs_release(tsk); 542d08b9f0cSSami Tolvanen 54368f24b08SAndy Lutomirski #ifndef CONFIG_THREAD_INFO_IN_TASK 54468f24b08SAndy Lutomirski /* 54568f24b08SAndy Lutomirski * The task is finally done with both the stack and thread_info, 54668f24b08SAndy Lutomirski * so free both. 54768f24b08SAndy Lutomirski */ 54868f24b08SAndy Lutomirski release_task_stack(tsk); 54968f24b08SAndy Lutomirski #else 55068f24b08SAndy Lutomirski /* 55168f24b08SAndy Lutomirski * If the task had a separate stack allocation, it should be gone 55268f24b08SAndy Lutomirski * by now. 55368f24b08SAndy Lutomirski */ 554f0b89d39SElena Reshetova WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0); 55568f24b08SAndy Lutomirski #endif 55623f78d4aSIngo Molnar rt_mutex_debug_task_free(tsk); 557fb52607aSFrederic Weisbecker ftrace_graph_exit_task(tsk); 558f19b9f74SAkinobu Mita arch_release_task_struct(tsk); 5591da5c46fSOleg Nesterov if (tsk->flags & PF_KTHREAD) 5601da5c46fSOleg Nesterov free_kthread_struct(tsk); 5611da177e4SLinus Torvalds free_task_struct(tsk); 5621da177e4SLinus Torvalds } 5631da177e4SLinus Torvalds EXPORT_SYMBOL(free_task); 5641da177e4SLinus Torvalds 565fe69d560SDavid Hildenbrand static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm) 566fe69d560SDavid Hildenbrand { 567fe69d560SDavid Hildenbrand struct file *exe_file; 568fe69d560SDavid Hildenbrand 569fe69d560SDavid Hildenbrand exe_file = get_mm_exe_file(oldmm); 570fe69d560SDavid Hildenbrand RCU_INIT_POINTER(mm->exe_file, exe_file); 571fe69d560SDavid Hildenbrand /* 572fe69d560SDavid Hildenbrand * We depend on the oldmm having properly denied write access to the 573fe69d560SDavid Hildenbrand * exe_file already. 574fe69d560SDavid Hildenbrand */ 575fe69d560SDavid Hildenbrand if (exe_file && deny_write_access(exe_file)) 576fe69d560SDavid Hildenbrand pr_warn_once("deny_write_access() failed in %s\n", __func__); 577fe69d560SDavid Hildenbrand } 578fe69d560SDavid Hildenbrand 5791da177e4SLinus Torvalds #ifdef CONFIG_MMU 5800766f788SEmese Revfy static __latent_entropy int dup_mmap(struct mm_struct *mm, 5810766f788SEmese Revfy struct mm_struct *oldmm) 5821da177e4SLinus Torvalds { 583297c5eeeSLinus Torvalds struct vm_area_struct *mpnt, *tmp, *prev, **pprev; 5841da177e4SLinus Torvalds struct rb_node **rb_link, *rb_parent; 5851da177e4SLinus Torvalds int retval; 5861da177e4SLinus Torvalds unsigned long charge; 587893e26e6SPavel Emelyanov LIST_HEAD(uf); 588*d4af56c5SLiam R. Howlett MA_STATE(mas, &mm->mm_mt, 0, 0); 5891da177e4SLinus Torvalds 59032cdba1eSOleg Nesterov uprobe_start_dup_mmap(); 591d8ed45c5SMichel Lespinasse if (mmap_write_lock_killable(oldmm)) { 5927c051267SMichal Hocko retval = -EINTR; 5937c051267SMichal Hocko goto fail_uprobe_end; 5947c051267SMichal Hocko } 595ec8c0446SRalf Baechle flush_cache_dup_mm(oldmm); 596f8ac4ec9SOleg Nesterov uprobe_dup_mmap(oldmm, mm); 597ad339451SIngo Molnar /* 598ad339451SIngo Molnar * Not linked in yet - no deadlock potential: 599ad339451SIngo Molnar */ 600aaa2cc56SMichel Lespinasse mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING); 6017ee78232SHugh Dickins 60290f31d0eSKonstantin Khlebnikov /* No ordering required: file already has been exposed. */ 603fe69d560SDavid Hildenbrand dup_mm_exe_file(mm, oldmm); 60490f31d0eSKonstantin Khlebnikov 6054f7d4614SVladimir Davydov mm->total_vm = oldmm->total_vm; 60684638335SKonstantin Khlebnikov mm->data_vm = oldmm->data_vm; 6074f7d4614SVladimir Davydov mm->exec_vm = oldmm->exec_vm; 6084f7d4614SVladimir Davydov mm->stack_vm = oldmm->stack_vm; 6094f7d4614SVladimir Davydov 6101da177e4SLinus Torvalds rb_link = &mm->mm_rb.rb_node; 6111da177e4SLinus Torvalds rb_parent = NULL; 6121da177e4SLinus Torvalds pprev = &mm->mmap; 613f8af4da3SHugh Dickins retval = ksm_fork(mm, oldmm); 614f8af4da3SHugh Dickins if (retval) 615f8af4da3SHugh Dickins goto out; 616d2081b2bSYang Shi khugepaged_fork(mm, oldmm); 6171da177e4SLinus Torvalds 618*d4af56c5SLiam R. Howlett retval = mas_expected_entries(&mas, oldmm->map_count); 619*d4af56c5SLiam R. Howlett if (retval) 620*d4af56c5SLiam R. Howlett goto out; 621*d4af56c5SLiam R. Howlett 622297c5eeeSLinus Torvalds prev = NULL; 623fd3e42fcSHugh Dickins for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { 6241da177e4SLinus Torvalds struct file *file; 6251da177e4SLinus Torvalds 6261da177e4SLinus Torvalds if (mpnt->vm_flags & VM_DONTCOPY) { 62784638335SKonstantin Khlebnikov vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt)); 6281da177e4SLinus Torvalds continue; 6291da177e4SLinus Torvalds } 6301da177e4SLinus Torvalds charge = 0; 631655c79bbSTetsuo Handa /* 632655c79bbSTetsuo Handa * Don't duplicate many vmas if we've been oom-killed (for 633655c79bbSTetsuo Handa * example) 634655c79bbSTetsuo Handa */ 635655c79bbSTetsuo Handa if (fatal_signal_pending(current)) { 636655c79bbSTetsuo Handa retval = -EINTR; 637*d4af56c5SLiam R. Howlett goto loop_out; 638655c79bbSTetsuo Handa } 6391da177e4SLinus Torvalds if (mpnt->vm_flags & VM_ACCOUNT) { 640b2412b7fSHuang Shijie unsigned long len = vma_pages(mpnt); 641b2412b7fSHuang Shijie 642191c5424SAl Viro if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ 6431da177e4SLinus Torvalds goto fail_nomem; 6441da177e4SLinus Torvalds charge = len; 6451da177e4SLinus Torvalds } 6463928d4f5SLinus Torvalds tmp = vm_area_dup(mpnt); 6471da177e4SLinus Torvalds if (!tmp) 6481da177e4SLinus Torvalds goto fail_nomem; 649ef0855d3SOleg Nesterov retval = vma_dup_policy(mpnt, tmp); 650ef0855d3SOleg Nesterov if (retval) 6511da177e4SLinus Torvalds goto fail_nomem_policy; 652a247c3a9SAndrea Arcangeli tmp->vm_mm = mm; 653893e26e6SPavel Emelyanov retval = dup_userfaultfd(tmp, &uf); 654893e26e6SPavel Emelyanov if (retval) 655893e26e6SPavel Emelyanov goto fail_nomem_anon_vma_fork; 656d2cd9edeSRik van Riel if (tmp->vm_flags & VM_WIPEONFORK) { 65793949bb2SLi Xinhai /* 65893949bb2SLi Xinhai * VM_WIPEONFORK gets a clean slate in the child. 65993949bb2SLi Xinhai * Don't prepare anon_vma until fault since we don't 66093949bb2SLi Xinhai * copy page for current vma. 66193949bb2SLi Xinhai */ 662d2cd9edeSRik van Riel tmp->anon_vma = NULL; 663d2cd9edeSRik van Riel } else if (anon_vma_fork(tmp, mpnt)) 6645beb4930SRik van Riel goto fail_nomem_anon_vma_fork; 665893e26e6SPavel Emelyanov tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT); 6661da177e4SLinus Torvalds file = tmp->vm_file; 6671da177e4SLinus Torvalds if (file) { 668b88ed205SHugh Dickins struct address_space *mapping = file->f_mapping; 669b88ed205SHugh Dickins 6701da177e4SLinus Torvalds get_file(file); 67183cde9e8SDavidlohr Bueso i_mmap_lock_write(mapping); 672b88ed205SHugh Dickins if (tmp->vm_flags & VM_SHARED) 673cf508b58SMiaohe Lin mapping_allow_writable(mapping); 674b88ed205SHugh Dickins flush_dcache_mmap_lock(mapping); 675b88ed205SHugh Dickins /* insert tmp into the share list, just after mpnt */ 6769826a516SMichel Lespinasse vma_interval_tree_insert_after(tmp, mpnt, 6779826a516SMichel Lespinasse &mapping->i_mmap); 678b88ed205SHugh Dickins flush_dcache_mmap_unlock(mapping); 67983cde9e8SDavidlohr Bueso i_mmap_unlock_write(mapping); 6801da177e4SLinus Torvalds } 6811da177e4SLinus Torvalds 6821da177e4SLinus Torvalds /* 683a1e78772SMel Gorman * Clear hugetlb-related page reserves for children. This only 684a1e78772SMel Gorman * affects MAP_PRIVATE mappings. Faults generated by the child 685a1e78772SMel Gorman * are not guaranteed to succeed, even if read-only 686a1e78772SMel Gorman */ 687a1e78772SMel Gorman if (is_vm_hugetlb_page(tmp)) 688a1e78772SMel Gorman reset_vma_resv_huge_pages(tmp); 689a1e78772SMel Gorman 690a1e78772SMel Gorman /* 6917ee78232SHugh Dickins * Link in the new vma and copy the page table entries. 6921da177e4SLinus Torvalds */ 6931da177e4SLinus Torvalds *pprev = tmp; 6941da177e4SLinus Torvalds pprev = &tmp->vm_next; 695297c5eeeSLinus Torvalds tmp->vm_prev = prev; 696297c5eeeSLinus Torvalds prev = tmp; 6971da177e4SLinus Torvalds 6981da177e4SLinus Torvalds __vma_link_rb(mm, tmp, rb_link, rb_parent); 6991da177e4SLinus Torvalds rb_link = &tmp->vm_rb.rb_right; 7001da177e4SLinus Torvalds rb_parent = &tmp->vm_rb; 7011da177e4SLinus Torvalds 702*d4af56c5SLiam R. Howlett /* Link the vma into the MT */ 703*d4af56c5SLiam R. Howlett mas.index = tmp->vm_start; 704*d4af56c5SLiam R. Howlett mas.last = tmp->vm_end - 1; 705*d4af56c5SLiam R. Howlett mas_store(&mas, tmp); 706*d4af56c5SLiam R. Howlett 7071da177e4SLinus Torvalds mm->map_count++; 708d2cd9edeSRik van Riel if (!(tmp->vm_flags & VM_WIPEONFORK)) 709c78f4636SPeter Xu retval = copy_page_range(tmp, mpnt); 7101da177e4SLinus Torvalds 7111da177e4SLinus Torvalds if (tmp->vm_ops && tmp->vm_ops->open) 7121da177e4SLinus Torvalds tmp->vm_ops->open(tmp); 7131da177e4SLinus Torvalds 7141da177e4SLinus Torvalds if (retval) 715*d4af56c5SLiam R. Howlett goto loop_out; 7161da177e4SLinus Torvalds } 717d6dd61c8SJeremy Fitzhardinge /* a new mm has just been created */ 7181ed0cc5aSNadav Amit retval = arch_dup_mmap(oldmm, mm); 719*d4af56c5SLiam R. Howlett loop_out: 720*d4af56c5SLiam R. Howlett mas_destroy(&mas); 7211da177e4SLinus Torvalds out: 722d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 723fd3e42fcSHugh Dickins flush_tlb_mm(oldmm); 724d8ed45c5SMichel Lespinasse mmap_write_unlock(oldmm); 725893e26e6SPavel Emelyanov dup_userfaultfd_complete(&uf); 7267c051267SMichal Hocko fail_uprobe_end: 72732cdba1eSOleg Nesterov uprobe_end_dup_mmap(); 7281da177e4SLinus Torvalds return retval; 7295beb4930SRik van Riel fail_nomem_anon_vma_fork: 730ef0855d3SOleg Nesterov mpol_put(vma_policy(tmp)); 7311da177e4SLinus Torvalds fail_nomem_policy: 7323928d4f5SLinus Torvalds vm_area_free(tmp); 7331da177e4SLinus Torvalds fail_nomem: 7341da177e4SLinus Torvalds retval = -ENOMEM; 7351da177e4SLinus Torvalds vm_unacct_memory(charge); 736*d4af56c5SLiam R. Howlett goto loop_out; 7371da177e4SLinus Torvalds } 7381da177e4SLinus Torvalds 7391da177e4SLinus Torvalds static inline int mm_alloc_pgd(struct mm_struct *mm) 7401da177e4SLinus Torvalds { 7411da177e4SLinus Torvalds mm->pgd = pgd_alloc(mm); 7421da177e4SLinus Torvalds if (unlikely(!mm->pgd)) 7431da177e4SLinus Torvalds return -ENOMEM; 7441da177e4SLinus Torvalds return 0; 7451da177e4SLinus Torvalds } 7461da177e4SLinus Torvalds 7471da177e4SLinus Torvalds static inline void mm_free_pgd(struct mm_struct *mm) 7481da177e4SLinus Torvalds { 7495e541973SBenjamin Herrenschmidt pgd_free(mm, mm->pgd); 7501da177e4SLinus Torvalds } 7511da177e4SLinus Torvalds #else 75290f31d0eSKonstantin Khlebnikov static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 75390f31d0eSKonstantin Khlebnikov { 754d8ed45c5SMichel Lespinasse mmap_write_lock(oldmm); 755fe69d560SDavid Hildenbrand dup_mm_exe_file(mm, oldmm); 756d8ed45c5SMichel Lespinasse mmap_write_unlock(oldmm); 75790f31d0eSKonstantin Khlebnikov return 0; 75890f31d0eSKonstantin Khlebnikov } 7591da177e4SLinus Torvalds #define mm_alloc_pgd(mm) (0) 7601da177e4SLinus Torvalds #define mm_free_pgd(mm) 7611da177e4SLinus Torvalds #endif /* CONFIG_MMU */ 7621da177e4SLinus Torvalds 763d70f2a14SAndrew Morton static void check_mm(struct mm_struct *mm) 764d70f2a14SAndrew Morton { 765d70f2a14SAndrew Morton int i; 766d70f2a14SAndrew Morton 7678495f7e6SSai Praneeth Prakhya BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types) != NR_MM_COUNTERS, 7688495f7e6SSai Praneeth Prakhya "Please make sure 'struct resident_page_types[]' is updated as well"); 7698495f7e6SSai Praneeth Prakhya 770d70f2a14SAndrew Morton for (i = 0; i < NR_MM_COUNTERS; i++) { 771d70f2a14SAndrew Morton long x = atomic_long_read(&mm->rss_stat.count[i]); 772d70f2a14SAndrew Morton 773d70f2a14SAndrew Morton if (unlikely(x)) 7748495f7e6SSai Praneeth Prakhya pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n", 7758495f7e6SSai Praneeth Prakhya mm, resident_page_types[i], x); 776d70f2a14SAndrew Morton } 777d70f2a14SAndrew Morton 778d70f2a14SAndrew Morton if (mm_pgtables_bytes(mm)) 779d70f2a14SAndrew Morton pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n", 780d70f2a14SAndrew Morton mm_pgtables_bytes(mm)); 781d70f2a14SAndrew Morton 782d70f2a14SAndrew Morton #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 783d70f2a14SAndrew Morton VM_BUG_ON_MM(mm->pmd_huge_pte, mm); 784d70f2a14SAndrew Morton #endif 785d70f2a14SAndrew Morton } 7861da177e4SLinus Torvalds 787e94b1766SChristoph Lameter #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) 7881da177e4SLinus Torvalds #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) 7891da177e4SLinus Torvalds 790d70f2a14SAndrew Morton /* 791d70f2a14SAndrew Morton * Called when the last reference to the mm 792d70f2a14SAndrew Morton * is dropped: either by a lazy thread or by 793d70f2a14SAndrew Morton * mmput. Free the page directory and the mm. 794d70f2a14SAndrew Morton */ 795d34bc48fSAndrew Morton void __mmdrop(struct mm_struct *mm) 796d70f2a14SAndrew Morton { 797d70f2a14SAndrew Morton BUG_ON(mm == &init_mm); 7983eda69c9SMark Rutland WARN_ON_ONCE(mm == current->mm); 7993eda69c9SMark Rutland WARN_ON_ONCE(mm == current->active_mm); 800d70f2a14SAndrew Morton mm_free_pgd(mm); 801d70f2a14SAndrew Morton destroy_context(mm); 802984cfe4eSJason Gunthorpe mmu_notifier_subscriptions_destroy(mm); 803d70f2a14SAndrew Morton check_mm(mm); 804d70f2a14SAndrew Morton put_user_ns(mm->user_ns); 8052667ed10SFenghua Yu mm_pasid_drop(mm); 806d70f2a14SAndrew Morton free_mm(mm); 807d70f2a14SAndrew Morton } 808d34bc48fSAndrew Morton EXPORT_SYMBOL_GPL(__mmdrop); 809d70f2a14SAndrew Morton 810d70f2a14SAndrew Morton static void mmdrop_async_fn(struct work_struct *work) 811d70f2a14SAndrew Morton { 812d70f2a14SAndrew Morton struct mm_struct *mm; 813d70f2a14SAndrew Morton 814d70f2a14SAndrew Morton mm = container_of(work, struct mm_struct, async_put_work); 815d70f2a14SAndrew Morton __mmdrop(mm); 816d70f2a14SAndrew Morton } 817d70f2a14SAndrew Morton 818d70f2a14SAndrew Morton static void mmdrop_async(struct mm_struct *mm) 819d70f2a14SAndrew Morton { 820d70f2a14SAndrew Morton if (unlikely(atomic_dec_and_test(&mm->mm_count))) { 821d70f2a14SAndrew Morton INIT_WORK(&mm->async_put_work, mmdrop_async_fn); 822d70f2a14SAndrew Morton schedule_work(&mm->async_put_work); 823d70f2a14SAndrew Morton } 824d70f2a14SAndrew Morton } 825d70f2a14SAndrew Morton 8261da177e4SLinus Torvalds static inline void free_signal_struct(struct signal_struct *sig) 8271da177e4SLinus Torvalds { 8281da177e4SLinus Torvalds taskstats_tgid_free(sig); 8291da177e4SLinus Torvalds sched_autogroup_exit(sig); 8301da177e4SLinus Torvalds /* 8311da177e4SLinus Torvalds * __mmdrop is not safe to call from softirq context on x86 due to 8321da177e4SLinus Torvalds * pgd_dtor so postpone it to the async context 8331da177e4SLinus Torvalds */ 8341da177e4SLinus Torvalds if (sig->oom_mm) 8351da177e4SLinus Torvalds mmdrop_async(sig->oom_mm); 8361da177e4SLinus Torvalds kmem_cache_free(signal_cachep, sig); 8371da177e4SLinus Torvalds } 8381da177e4SLinus Torvalds 8391da177e4SLinus Torvalds static inline void put_signal_struct(struct signal_struct *sig) 8401da177e4SLinus Torvalds { 84160d4de3fSElena Reshetova if (refcount_dec_and_test(&sig->sigcnt)) 8421da177e4SLinus Torvalds free_signal_struct(sig); 8431da177e4SLinus Torvalds } 8441da177e4SLinus Torvalds 8451da177e4SLinus Torvalds void __put_task_struct(struct task_struct *tsk) 8461da177e4SLinus Torvalds { 8471da177e4SLinus Torvalds WARN_ON(!tsk->exit_state); 848ec1d2819SElena Reshetova WARN_ON(refcount_read(&tsk->usage)); 8491da177e4SLinus Torvalds WARN_ON(tsk == current); 8501da177e4SLinus Torvalds 8510f212204SJens Axboe io_uring_free(tsk); 8521da177e4SLinus Torvalds cgroup_free(tsk); 85316d51a59SJann Horn task_numa_free(tsk, true); 8541da177e4SLinus Torvalds security_task_free(tsk); 855a10787e6SSong Liu bpf_task_storage_free(tsk); 8561da177e4SLinus Torvalds exit_creds(tsk); 8571da177e4SLinus Torvalds delayacct_tsk_free(tsk); 8581da177e4SLinus Torvalds put_signal_struct(tsk->signal); 8596e33cad0SPeter Zijlstra sched_core_free(tsk); 8601da177e4SLinus Torvalds free_task(tsk); 8611da177e4SLinus Torvalds } 8621da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(__put_task_struct); 8631da177e4SLinus Torvalds 8641da177e4SLinus Torvalds void __init __weak arch_task_cache_init(void) { } 8651da177e4SLinus Torvalds 8661da177e4SLinus Torvalds /* 8671da177e4SLinus Torvalds * set_max_threads 8681da177e4SLinus Torvalds */ 8691da177e4SLinus Torvalds static void set_max_threads(unsigned int max_threads_suggested) 8701da177e4SLinus Torvalds { 8711da177e4SLinus Torvalds u64 threads; 872ca79b0c2SArun KS unsigned long nr_pages = totalram_pages(); 8731da177e4SLinus Torvalds 8741da177e4SLinus Torvalds /* 8751da177e4SLinus Torvalds * The number of threads shall be limited such that the thread 8761da177e4SLinus Torvalds * structures may only consume a small part of the available memory. 8771da177e4SLinus Torvalds */ 8783d6357deSArun KS if (fls64(nr_pages) + fls64(PAGE_SIZE) > 64) 8791da177e4SLinus Torvalds threads = MAX_THREADS; 8801da177e4SLinus Torvalds else 8813d6357deSArun KS threads = div64_u64((u64) nr_pages * (u64) PAGE_SIZE, 8821da177e4SLinus Torvalds (u64) THREAD_SIZE * 8UL); 8831da177e4SLinus Torvalds 8841da177e4SLinus Torvalds if (threads > max_threads_suggested) 8851da177e4SLinus Torvalds threads = max_threads_suggested; 8861da177e4SLinus Torvalds 8871da177e4SLinus Torvalds max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); 8881da177e4SLinus Torvalds } 8891da177e4SLinus Torvalds 8901da177e4SLinus Torvalds #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT 8911da177e4SLinus Torvalds /* Initialized by the architecture: */ 8921da177e4SLinus Torvalds int arch_task_struct_size __read_mostly; 8931da177e4SLinus Torvalds #endif 8941da177e4SLinus Torvalds 8954189ff23SChristoph Hellwig #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 8965905429aSKees Cook static void task_struct_whitelist(unsigned long *offset, unsigned long *size) 8975905429aSKees Cook { 8985905429aSKees Cook /* Fetch thread_struct whitelist for the architecture. */ 8995905429aSKees Cook arch_thread_struct_whitelist(offset, size); 9005905429aSKees Cook 9015905429aSKees Cook /* 9025905429aSKees Cook * Handle zero-sized whitelist or empty thread_struct, otherwise 9035905429aSKees Cook * adjust offset to position of thread_struct in task_struct. 9045905429aSKees Cook */ 9055905429aSKees Cook if (unlikely(*size == 0)) 9065905429aSKees Cook *offset = 0; 9075905429aSKees Cook else 9085905429aSKees Cook *offset += offsetof(struct task_struct, thread); 9095905429aSKees Cook } 9104189ff23SChristoph Hellwig #endif /* CONFIG_ARCH_TASK_STRUCT_ALLOCATOR */ 9115905429aSKees Cook 9121da177e4SLinus Torvalds void __init fork_init(void) 9131da177e4SLinus Torvalds { 9141da177e4SLinus Torvalds int i; 9151da177e4SLinus Torvalds #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 9161da177e4SLinus Torvalds #ifndef ARCH_MIN_TASKALIGN 9171da177e4SLinus Torvalds #define ARCH_MIN_TASKALIGN 0 9181da177e4SLinus Torvalds #endif 9191da177e4SLinus Torvalds int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN); 9205905429aSKees Cook unsigned long useroffset, usersize; 9211da177e4SLinus Torvalds 9221da177e4SLinus Torvalds /* create a slab on which task_structs can be allocated */ 9235905429aSKees Cook task_struct_whitelist(&useroffset, &usersize); 9245905429aSKees Cook task_struct_cachep = kmem_cache_create_usercopy("task_struct", 9251da177e4SLinus Torvalds arch_task_struct_size, align, 9265905429aSKees Cook SLAB_PANIC|SLAB_ACCOUNT, 9275905429aSKees Cook useroffset, usersize, NULL); 9281da177e4SLinus Torvalds #endif 9291da177e4SLinus Torvalds 9301da177e4SLinus Torvalds /* do the arch specific task caches init */ 9311da177e4SLinus Torvalds arch_task_cache_init(); 9321da177e4SLinus Torvalds 9331da177e4SLinus Torvalds set_max_threads(MAX_THREADS); 9341da177e4SLinus Torvalds 9351da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; 9361da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; 9371da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_SIGPENDING] = 9381da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_NPROC]; 9391da177e4SLinus Torvalds 94021d1c5e3SAlexey Gladkov for (i = 0; i < MAX_PER_NAMESPACE_UCOUNTS; i++) 9411da177e4SLinus Torvalds init_user_ns.ucount_max[i] = max_threads/2; 9421da177e4SLinus Torvalds 9435ddf994fSEric W. Biederman set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, RLIM_INFINITY); 9445ddf994fSEric W. Biederman set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, RLIM_INFINITY); 9455ddf994fSEric W. Biederman set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, RLIM_INFINITY); 9465ddf994fSEric W. Biederman set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, RLIM_INFINITY); 94721d1c5e3SAlexey Gladkov 9481da177e4SLinus Torvalds #ifdef CONFIG_VMAP_STACK 9491da177e4SLinus Torvalds cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache", 9501da177e4SLinus Torvalds NULL, free_vm_stack_cache); 9511da177e4SLinus Torvalds #endif 9521da177e4SLinus Torvalds 953d08b9f0cSSami Tolvanen scs_init(); 954d08b9f0cSSami Tolvanen 9551da177e4SLinus Torvalds lockdep_init_task(&init_task); 956aad42dd4SNadav Amit uprobes_init(); 9571da177e4SLinus Torvalds } 9581da177e4SLinus Torvalds 9591da177e4SLinus Torvalds int __weak arch_dup_task_struct(struct task_struct *dst, 9601da177e4SLinus Torvalds struct task_struct *src) 9611da177e4SLinus Torvalds { 9621da177e4SLinus Torvalds *dst = *src; 9631da177e4SLinus Torvalds return 0; 9641da177e4SLinus Torvalds } 9651da177e4SLinus Torvalds 9661da177e4SLinus Torvalds void set_task_stack_end_magic(struct task_struct *tsk) 9671da177e4SLinus Torvalds { 9681da177e4SLinus Torvalds unsigned long *stackend; 9691da177e4SLinus Torvalds 9701da177e4SLinus Torvalds stackend = end_of_stack(tsk); 9711da177e4SLinus Torvalds *stackend = STACK_END_MAGIC; /* for overflow detection */ 9721da177e4SLinus Torvalds } 9731da177e4SLinus Torvalds 9741da177e4SLinus Torvalds static struct task_struct *dup_task_struct(struct task_struct *orig, int node) 9751da177e4SLinus Torvalds { 9761da177e4SLinus Torvalds struct task_struct *tsk; 9771da177e4SLinus Torvalds int err; 9781da177e4SLinus Torvalds 9791da177e4SLinus Torvalds if (node == NUMA_NO_NODE) 9801da177e4SLinus Torvalds node = tsk_fork_get_node(orig); 9811da177e4SLinus Torvalds tsk = alloc_task_struct_node(node); 9821da177e4SLinus Torvalds if (!tsk) 9831da177e4SLinus Torvalds return NULL; 9841da177e4SLinus Torvalds 985546c42b2SSebastian Andrzej Siewior err = arch_dup_task_struct(tsk, orig); 986546c42b2SSebastian Andrzej Siewior if (err) 9871da177e4SLinus Torvalds goto free_tsk; 9881da177e4SLinus Torvalds 9897865aba3SSebastian Andrzej Siewior err = alloc_thread_stack_node(tsk, node); 9907865aba3SSebastian Andrzej Siewior if (err) 9911da177e4SLinus Torvalds goto free_tsk; 9929b6f7e16SRoman Gushchin 9931da177e4SLinus Torvalds #ifdef CONFIG_THREAD_INFO_IN_TASK 994f0b89d39SElena Reshetova refcount_set(&tsk->stack_refcount, 1); 9951da177e4SLinus Torvalds #endif 9961a03d3f1SSebastian Andrzej Siewior account_kernel_stack(tsk, 1); 9971da177e4SLinus Torvalds 998d08b9f0cSSami Tolvanen err = scs_prepare(tsk, node); 999d08b9f0cSSami Tolvanen if (err) 1000d08b9f0cSSami Tolvanen goto free_stack; 1001d08b9f0cSSami Tolvanen 10021da177e4SLinus Torvalds #ifdef CONFIG_SECCOMP 10031da177e4SLinus Torvalds /* 10041da177e4SLinus Torvalds * We must handle setting up seccomp filters once we're under 10051da177e4SLinus Torvalds * the sighand lock in case orig has changed between now and 10061da177e4SLinus Torvalds * then. Until then, filter must be NULL to avoid messing up 10071da177e4SLinus Torvalds * the usage counts on the error path calling free_task. 10081da177e4SLinus Torvalds */ 10091da177e4SLinus Torvalds tsk->seccomp.filter = NULL; 10101da177e4SLinus Torvalds #endif 10111da177e4SLinus Torvalds 10121da177e4SLinus Torvalds setup_thread_stack(tsk, orig); 10131da177e4SLinus Torvalds clear_user_return_notifier(tsk); 10141da177e4SLinus Torvalds clear_tsk_need_resched(tsk); 10151da177e4SLinus Torvalds set_task_stack_end_magic(tsk); 10161446e1dfSGabriel Krisman Bertazi clear_syscall_work_syscall_user_dispatch(tsk); 10171da177e4SLinus Torvalds 1018050e9baaSLinus Torvalds #ifdef CONFIG_STACKPROTECTOR 10191da177e4SLinus Torvalds tsk->stack_canary = get_random_canary(); 10201da177e4SLinus Torvalds #endif 10213bd37062SSebastian Andrzej Siewior if (orig->cpus_ptr == &orig->cpus_mask) 10223bd37062SSebastian Andrzej Siewior tsk->cpus_ptr = &tsk->cpus_mask; 1023b90ca8baSWill Deacon dup_user_cpus_ptr(tsk, orig, node); 10241da177e4SLinus Torvalds 10251da177e4SLinus Torvalds /* 10260ff7b2cfSEric W. Biederman * One for the user space visible state that goes away when reaped. 10270ff7b2cfSEric W. Biederman * One for the scheduler. 10281da177e4SLinus Torvalds */ 10290ff7b2cfSEric W. Biederman refcount_set(&tsk->rcu_users, 2); 10300ff7b2cfSEric W. Biederman /* One for the rcu users */ 10310ff7b2cfSEric W. Biederman refcount_set(&tsk->usage, 1); 10321da177e4SLinus Torvalds #ifdef CONFIG_BLK_DEV_IO_TRACE 10331da177e4SLinus Torvalds tsk->btrace_seq = 0; 10341da177e4SLinus Torvalds #endif 10351da177e4SLinus Torvalds tsk->splice_pipe = NULL; 10361da177e4SLinus Torvalds tsk->task_frag.page = NULL; 10371da177e4SLinus Torvalds tsk->wake_q.next = NULL; 1038e32cf5dfSEric W. Biederman tsk->worker_private = NULL; 10391da177e4SLinus Torvalds 10401da177e4SLinus Torvalds kcov_task_init(tsk); 10415fbda3ecSThomas Gleixner kmap_local_fork(tsk); 10421da177e4SLinus Torvalds 10431da177e4SLinus Torvalds #ifdef CONFIG_FAULT_INJECTION 10441da177e4SLinus Torvalds tsk->fail_nth = 0; 10451da177e4SLinus Torvalds #endif 10461da177e4SLinus Torvalds 10472c323017SJosef Bacik #ifdef CONFIG_BLK_CGROUP 10482c323017SJosef Bacik tsk->throttle_queue = NULL; 10492c323017SJosef Bacik tsk->use_memdelay = 0; 10502c323017SJosef Bacik #endif 10512c323017SJosef Bacik 1052a3d29e82SPeter Zijlstra #ifdef CONFIG_IOMMU_SVA 1053a3d29e82SPeter Zijlstra tsk->pasid_activated = 0; 1054a3d29e82SPeter Zijlstra #endif 1055a3d29e82SPeter Zijlstra 1056d46eb14bSShakeel Butt #ifdef CONFIG_MEMCG 1057d46eb14bSShakeel Butt tsk->active_memcg = NULL; 1058d46eb14bSShakeel Butt #endif 1059b041b525STony Luck 1060b041b525STony Luck #ifdef CONFIG_CPU_SUP_INTEL 1061b041b525STony Luck tsk->reported_split_lock = 0; 1062b041b525STony Luck #endif 1063b041b525STony Luck 10641da177e4SLinus Torvalds return tsk; 10651da177e4SLinus Torvalds 10661da177e4SLinus Torvalds free_stack: 10671a03d3f1SSebastian Andrzej Siewior exit_task_stack_account(tsk); 10681da177e4SLinus Torvalds free_thread_stack(tsk); 10691da177e4SLinus Torvalds free_tsk: 10701da177e4SLinus Torvalds free_task_struct(tsk); 10711da177e4SLinus Torvalds return NULL; 10721da177e4SLinus Torvalds } 10731da177e4SLinus Torvalds 10741da177e4SLinus Torvalds __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); 10751da177e4SLinus Torvalds 10764cb0e11bSHidehiro Kawai static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; 10774cb0e11bSHidehiro Kawai 10784cb0e11bSHidehiro Kawai static int __init coredump_filter_setup(char *s) 10794cb0e11bSHidehiro Kawai { 10804cb0e11bSHidehiro Kawai default_dump_filter = 10814cb0e11bSHidehiro Kawai (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & 10824cb0e11bSHidehiro Kawai MMF_DUMP_FILTER_MASK; 10834cb0e11bSHidehiro Kawai return 1; 10844cb0e11bSHidehiro Kawai } 10854cb0e11bSHidehiro Kawai 10864cb0e11bSHidehiro Kawai __setup("coredump_filter=", coredump_filter_setup); 10874cb0e11bSHidehiro Kawai 10881da177e4SLinus Torvalds #include <linux/init_task.h> 10891da177e4SLinus Torvalds 1090858f0993SAlexey Dobriyan static void mm_init_aio(struct mm_struct *mm) 1091858f0993SAlexey Dobriyan { 1092858f0993SAlexey Dobriyan #ifdef CONFIG_AIO 1093858f0993SAlexey Dobriyan spin_lock_init(&mm->ioctx_lock); 1094db446a08SBenjamin LaHaise mm->ioctx_table = NULL; 1095858f0993SAlexey Dobriyan #endif 1096858f0993SAlexey Dobriyan } 1097858f0993SAlexey Dobriyan 1098c3f3ce04SAndrea Arcangeli static __always_inline void mm_clear_owner(struct mm_struct *mm, 1099c3f3ce04SAndrea Arcangeli struct task_struct *p) 1100c3f3ce04SAndrea Arcangeli { 1101c3f3ce04SAndrea Arcangeli #ifdef CONFIG_MEMCG 1102c3f3ce04SAndrea Arcangeli if (mm->owner == p) 1103c3f3ce04SAndrea Arcangeli WRITE_ONCE(mm->owner, NULL); 1104c3f3ce04SAndrea Arcangeli #endif 1105c3f3ce04SAndrea Arcangeli } 1106c3f3ce04SAndrea Arcangeli 110733144e84SVladimir Davydov static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) 110833144e84SVladimir Davydov { 110933144e84SVladimir Davydov #ifdef CONFIG_MEMCG 111033144e84SVladimir Davydov mm->owner = p; 111133144e84SVladimir Davydov #endif 111233144e84SVladimir Davydov } 111333144e84SVladimir Davydov 1114355627f5SEric Biggers static void mm_init_uprobes_state(struct mm_struct *mm) 1115355627f5SEric Biggers { 1116355627f5SEric Biggers #ifdef CONFIG_UPROBES 1117355627f5SEric Biggers mm->uprobes_state.xol_area = NULL; 1118355627f5SEric Biggers #endif 1119355627f5SEric Biggers } 1120355627f5SEric Biggers 1121bfedb589SEric W. Biederman static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, 1122bfedb589SEric W. Biederman struct user_namespace *user_ns) 11231da177e4SLinus Torvalds { 112441f727fdSVladimir Davydov mm->mmap = NULL; 112541f727fdSVladimir Davydov mm->mm_rb = RB_ROOT; 1126*d4af56c5SLiam R. Howlett mt_init_flags(&mm->mm_mt, MM_MT_FLAGS); 1127*d4af56c5SLiam R. Howlett mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock); 112841f727fdSVladimir Davydov mm->vmacache_seqnum = 0; 11291da177e4SLinus Torvalds atomic_set(&mm->mm_users, 1); 11301da177e4SLinus Torvalds atomic_set(&mm->mm_count, 1); 113157efa1feSJason Gunthorpe seqcount_init(&mm->write_protect_seq); 1132d8ed45c5SMichel Lespinasse mmap_init_lock(mm); 11331da177e4SLinus Torvalds INIT_LIST_HEAD(&mm->mmlist); 1134af5b0f6aSKirill A. Shutemov mm_pgtables_bytes_init(mm); 113541f727fdSVladimir Davydov mm->map_count = 0; 113641f727fdSVladimir Davydov mm->locked_vm = 0; 113770f8a3caSDavidlohr Bueso atomic64_set(&mm->pinned_vm, 0); 1138d559db08SKAMEZAWA Hiroyuki memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); 11391da177e4SLinus Torvalds spin_lock_init(&mm->page_table_lock); 114088aa7cc6SYang Shi spin_lock_init(&mm->arg_lock); 114141f727fdSVladimir Davydov mm_init_cpumask(mm); 1142858f0993SAlexey Dobriyan mm_init_aio(mm); 1143cf475ad2SBalbir Singh mm_init_owner(mm, p); 1144a6cbd440SFenghua Yu mm_pasid_init(mm); 11452b7e8665SEric Biggers RCU_INIT_POINTER(mm->exe_file, NULL); 1146984cfe4eSJason Gunthorpe mmu_notifier_subscriptions_init(mm); 114716af97dcSNadav Amit init_tlb_flush_pending(mm); 114841f727fdSVladimir Davydov #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 114941f727fdSVladimir Davydov mm->pmd_huge_pte = NULL; 115041f727fdSVladimir Davydov #endif 1151355627f5SEric Biggers mm_init_uprobes_state(mm); 115213db8c50SLiu Zixian hugetlb_count_init(mm); 11531da177e4SLinus Torvalds 1154a0715cc2SAlex Thorlton if (current->mm) { 1155a0715cc2SAlex Thorlton mm->flags = current->mm->flags & MMF_INIT_MASK; 1156a0715cc2SAlex Thorlton mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; 1157a0715cc2SAlex Thorlton } else { 1158a0715cc2SAlex Thorlton mm->flags = default_dump_filter; 11591da177e4SLinus Torvalds mm->def_flags = 0; 1160a0715cc2SAlex Thorlton } 1161a0715cc2SAlex Thorlton 116241f727fdSVladimir Davydov if (mm_alloc_pgd(mm)) 116341f727fdSVladimir Davydov goto fail_nopgd; 116478fb7466SPavel Emelianov 116541f727fdSVladimir Davydov if (init_new_context(p, mm)) 116641f727fdSVladimir Davydov goto fail_nocontext; 116741f727fdSVladimir Davydov 1168bfedb589SEric W. Biederman mm->user_ns = get_user_ns(user_ns); 1169bd74fdaeSYu Zhao lru_gen_init_mm(mm); 117041f727fdSVladimir Davydov return mm; 117141f727fdSVladimir Davydov 117241f727fdSVladimir Davydov fail_nocontext: 117341f727fdSVladimir Davydov mm_free_pgd(mm); 117441f727fdSVladimir Davydov fail_nopgd: 11751da177e4SLinus Torvalds free_mm(mm); 11761da177e4SLinus Torvalds return NULL; 11771da177e4SLinus Torvalds } 11781da177e4SLinus Torvalds 11791da177e4SLinus Torvalds /* 11801da177e4SLinus Torvalds * Allocate and initialize an mm_struct. 11811da177e4SLinus Torvalds */ 11821da177e4SLinus Torvalds struct mm_struct *mm_alloc(void) 11831da177e4SLinus Torvalds { 11841da177e4SLinus Torvalds struct mm_struct *mm; 11851da177e4SLinus Torvalds 11861da177e4SLinus Torvalds mm = allocate_mm(); 1187de03c72cSKOSAKI Motohiro if (!mm) 1188de03c72cSKOSAKI Motohiro return NULL; 1189de03c72cSKOSAKI Motohiro 11901da177e4SLinus Torvalds memset(mm, 0, sizeof(*mm)); 1191bfedb589SEric W. Biederman return mm_init(mm, current, current_user_ns()); 11921da177e4SLinus Torvalds } 11931da177e4SLinus Torvalds 1194ec8d7c14SMichal Hocko static inline void __mmput(struct mm_struct *mm) 11951da177e4SLinus Torvalds { 1196ec8d7c14SMichal Hocko VM_BUG_ON(atomic_read(&mm->mm_users)); 11970ae26f1bSAndrew Morton 1198d4b3b638SSrikar Dronamraju uprobe_clear_state(mm); 11991da177e4SLinus Torvalds exit_aio(mm); 12001c2fb7a4SAndrea Arcangeli ksm_exit(mm); 1201ba76149fSAndrea Arcangeli khugepaged_exit(mm); /* must run before exit_mmap */ 12021da177e4SLinus Torvalds exit_mmap(mm); 12036fcb52a5SAaron Lu mm_put_huge_zero_page(mm); 1204925d1c40SMatt Helsley set_mm_exe_file(mm, NULL); 12051da177e4SLinus Torvalds if (!list_empty(&mm->mmlist)) { 12061da177e4SLinus Torvalds spin_lock(&mmlist_lock); 12071da177e4SLinus Torvalds list_del(&mm->mmlist); 12081da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 12091da177e4SLinus Torvalds } 1210801460d0SHiroshi Shimamoto if (mm->binfmt) 1211801460d0SHiroshi Shimamoto module_put(mm->binfmt->module); 1212bd74fdaeSYu Zhao lru_gen_del_mm(mm); 12131da177e4SLinus Torvalds mmdrop(mm); 12141da177e4SLinus Torvalds } 1215ec8d7c14SMichal Hocko 1216ec8d7c14SMichal Hocko /* 1217ec8d7c14SMichal Hocko * Decrement the use count and release all resources for an mm. 1218ec8d7c14SMichal Hocko */ 1219ec8d7c14SMichal Hocko void mmput(struct mm_struct *mm) 1220ec8d7c14SMichal Hocko { 1221ec8d7c14SMichal Hocko might_sleep(); 1222ec8d7c14SMichal Hocko 1223ec8d7c14SMichal Hocko if (atomic_dec_and_test(&mm->mm_users)) 1224ec8d7c14SMichal Hocko __mmput(mm); 12251da177e4SLinus Torvalds } 12261da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(mmput); 12271da177e4SLinus Torvalds 1228a1b2289cSSherry Yang #ifdef CONFIG_MMU 1229a1b2289cSSherry Yang static void mmput_async_fn(struct work_struct *work) 1230a1b2289cSSherry Yang { 1231a1b2289cSSherry Yang struct mm_struct *mm = container_of(work, struct mm_struct, 1232a1b2289cSSherry Yang async_put_work); 1233a1b2289cSSherry Yang 1234a1b2289cSSherry Yang __mmput(mm); 1235a1b2289cSSherry Yang } 1236a1b2289cSSherry Yang 1237a1b2289cSSherry Yang void mmput_async(struct mm_struct *mm) 1238a1b2289cSSherry Yang { 1239a1b2289cSSherry Yang if (atomic_dec_and_test(&mm->mm_users)) { 1240a1b2289cSSherry Yang INIT_WORK(&mm->async_put_work, mmput_async_fn); 1241a1b2289cSSherry Yang schedule_work(&mm->async_put_work); 1242a1b2289cSSherry Yang } 1243a1b2289cSSherry Yang } 1244a1b2289cSSherry Yang #endif 1245a1b2289cSSherry Yang 124690f31d0eSKonstantin Khlebnikov /** 124790f31d0eSKonstantin Khlebnikov * set_mm_exe_file - change a reference to the mm's executable file 124890f31d0eSKonstantin Khlebnikov * 124990f31d0eSKonstantin Khlebnikov * This changes mm's executable file (shown as symlink /proc/[pid]/exe). 125090f31d0eSKonstantin Khlebnikov * 12516e399cd1SDavidlohr Bueso * Main users are mmput() and sys_execve(). Callers prevent concurrent 12526e399cd1SDavidlohr Bueso * invocations: in mmput() nobody alive left, in execve task is single 125335d7bdc8SDavid Hildenbrand * threaded. 1254fe69d560SDavid Hildenbrand * 1255fe69d560SDavid Hildenbrand * Can only fail if new_exe_file != NULL. 125690f31d0eSKonstantin Khlebnikov */ 1257fe69d560SDavid Hildenbrand int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) 125838646013SJiri Slaby { 12596e399cd1SDavidlohr Bueso struct file *old_exe_file; 12606e399cd1SDavidlohr Bueso 12616e399cd1SDavidlohr Bueso /* 12626e399cd1SDavidlohr Bueso * It is safe to dereference the exe_file without RCU as 12636e399cd1SDavidlohr Bueso * this function is only called if nobody else can access 12646e399cd1SDavidlohr Bueso * this mm -- see comment above for justification. 12656e399cd1SDavidlohr Bueso */ 12666e399cd1SDavidlohr Bueso old_exe_file = rcu_dereference_raw(mm->exe_file); 126790f31d0eSKonstantin Khlebnikov 1268fe69d560SDavid Hildenbrand if (new_exe_file) { 1269fe69d560SDavid Hildenbrand /* 1270fe69d560SDavid Hildenbrand * We expect the caller (i.e., sys_execve) to already denied 1271fe69d560SDavid Hildenbrand * write access, so this is unlikely to fail. 1272fe69d560SDavid Hildenbrand */ 1273fe69d560SDavid Hildenbrand if (unlikely(deny_write_access(new_exe_file))) 1274fe69d560SDavid Hildenbrand return -EACCES; 127538646013SJiri Slaby get_file(new_exe_file); 1276fe69d560SDavid Hildenbrand } 127790f31d0eSKonstantin Khlebnikov rcu_assign_pointer(mm->exe_file, new_exe_file); 1278fe69d560SDavid Hildenbrand if (old_exe_file) { 1279fe69d560SDavid Hildenbrand allow_write_access(old_exe_file); 128090f31d0eSKonstantin Khlebnikov fput(old_exe_file); 128138646013SJiri Slaby } 1282fe69d560SDavid Hildenbrand return 0; 1283fe69d560SDavid Hildenbrand } 128438646013SJiri Slaby 128590f31d0eSKonstantin Khlebnikov /** 128635d7bdc8SDavid Hildenbrand * replace_mm_exe_file - replace a reference to the mm's executable file 128735d7bdc8SDavid Hildenbrand * 128835d7bdc8SDavid Hildenbrand * This changes mm's executable file (shown as symlink /proc/[pid]/exe), 128935d7bdc8SDavid Hildenbrand * dealing with concurrent invocation and without grabbing the mmap lock in 129035d7bdc8SDavid Hildenbrand * write mode. 129135d7bdc8SDavid Hildenbrand * 129235d7bdc8SDavid Hildenbrand * Main user is sys_prctl(PR_SET_MM_MAP/EXE_FILE). 129335d7bdc8SDavid Hildenbrand */ 129435d7bdc8SDavid Hildenbrand int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) 129535d7bdc8SDavid Hildenbrand { 129635d7bdc8SDavid Hildenbrand struct vm_area_struct *vma; 129735d7bdc8SDavid Hildenbrand struct file *old_exe_file; 129835d7bdc8SDavid Hildenbrand int ret = 0; 129935d7bdc8SDavid Hildenbrand 130035d7bdc8SDavid Hildenbrand /* Forbid mm->exe_file change if old file still mapped. */ 130135d7bdc8SDavid Hildenbrand old_exe_file = get_mm_exe_file(mm); 130235d7bdc8SDavid Hildenbrand if (old_exe_file) { 130335d7bdc8SDavid Hildenbrand mmap_read_lock(mm); 130435d7bdc8SDavid Hildenbrand for (vma = mm->mmap; vma && !ret; vma = vma->vm_next) { 130535d7bdc8SDavid Hildenbrand if (!vma->vm_file) 130635d7bdc8SDavid Hildenbrand continue; 130735d7bdc8SDavid Hildenbrand if (path_equal(&vma->vm_file->f_path, 130835d7bdc8SDavid Hildenbrand &old_exe_file->f_path)) 130935d7bdc8SDavid Hildenbrand ret = -EBUSY; 131035d7bdc8SDavid Hildenbrand } 131135d7bdc8SDavid Hildenbrand mmap_read_unlock(mm); 131235d7bdc8SDavid Hildenbrand fput(old_exe_file); 131335d7bdc8SDavid Hildenbrand if (ret) 131435d7bdc8SDavid Hildenbrand return ret; 131535d7bdc8SDavid Hildenbrand } 131635d7bdc8SDavid Hildenbrand 131735d7bdc8SDavid Hildenbrand /* set the new file, lockless */ 1318fe69d560SDavid Hildenbrand ret = deny_write_access(new_exe_file); 1319fe69d560SDavid Hildenbrand if (ret) 1320fe69d560SDavid Hildenbrand return -EACCES; 132135d7bdc8SDavid Hildenbrand get_file(new_exe_file); 1322fe69d560SDavid Hildenbrand 132335d7bdc8SDavid Hildenbrand old_exe_file = xchg(&mm->exe_file, new_exe_file); 1324fe69d560SDavid Hildenbrand if (old_exe_file) { 1325fe69d560SDavid Hildenbrand /* 1326fe69d560SDavid Hildenbrand * Don't race with dup_mmap() getting the file and disallowing 1327fe69d560SDavid Hildenbrand * write access while someone might open the file writable. 1328fe69d560SDavid Hildenbrand */ 1329fe69d560SDavid Hildenbrand mmap_read_lock(mm); 1330fe69d560SDavid Hildenbrand allow_write_access(old_exe_file); 133135d7bdc8SDavid Hildenbrand fput(old_exe_file); 1332fe69d560SDavid Hildenbrand mmap_read_unlock(mm); 1333fe69d560SDavid Hildenbrand } 133435d7bdc8SDavid Hildenbrand return 0; 133535d7bdc8SDavid Hildenbrand } 133638646013SJiri Slaby 133790f31d0eSKonstantin Khlebnikov /** 133890f31d0eSKonstantin Khlebnikov * get_mm_exe_file - acquire a reference to the mm's executable file 133990f31d0eSKonstantin Khlebnikov * 134090f31d0eSKonstantin Khlebnikov * Returns %NULL if mm has no associated executable file. 134190f31d0eSKonstantin Khlebnikov * User must release file via fput(). 134290f31d0eSKonstantin Khlebnikov */ 134338646013SJiri Slaby struct file *get_mm_exe_file(struct mm_struct *mm) 134438646013SJiri Slaby { 134538646013SJiri Slaby struct file *exe_file; 134638646013SJiri Slaby 134790f31d0eSKonstantin Khlebnikov rcu_read_lock(); 134890f31d0eSKonstantin Khlebnikov exe_file = rcu_dereference(mm->exe_file); 134990f31d0eSKonstantin Khlebnikov if (exe_file && !get_file_rcu(exe_file)) 135090f31d0eSKonstantin Khlebnikov exe_file = NULL; 135190f31d0eSKonstantin Khlebnikov rcu_read_unlock(); 135238646013SJiri Slaby return exe_file; 135338646013SJiri Slaby } 135438646013SJiri Slaby 13551da177e4SLinus Torvalds /** 1356cd81a917SMateusz Guzik * get_task_exe_file - acquire a reference to the task's executable file 1357cd81a917SMateusz Guzik * 1358cd81a917SMateusz Guzik * Returns %NULL if task's mm (if any) has no associated executable file or 1359cd81a917SMateusz Guzik * this is a kernel thread with borrowed mm (see the comment above get_task_mm). 1360cd81a917SMateusz Guzik * User must release file via fput(). 1361cd81a917SMateusz Guzik */ 1362cd81a917SMateusz Guzik struct file *get_task_exe_file(struct task_struct *task) 1363cd81a917SMateusz Guzik { 1364cd81a917SMateusz Guzik struct file *exe_file = NULL; 1365cd81a917SMateusz Guzik struct mm_struct *mm; 1366cd81a917SMateusz Guzik 1367cd81a917SMateusz Guzik task_lock(task); 1368cd81a917SMateusz Guzik mm = task->mm; 1369cd81a917SMateusz Guzik if (mm) { 1370cd81a917SMateusz Guzik if (!(task->flags & PF_KTHREAD)) 1371cd81a917SMateusz Guzik exe_file = get_mm_exe_file(mm); 1372cd81a917SMateusz Guzik } 1373cd81a917SMateusz Guzik task_unlock(task); 1374cd81a917SMateusz Guzik return exe_file; 1375cd81a917SMateusz Guzik } 1376cd81a917SMateusz Guzik 1377cd81a917SMateusz Guzik /** 13781da177e4SLinus Torvalds * get_task_mm - acquire a reference to the task's mm 13791da177e4SLinus Torvalds * 1380246bb0b1SOleg Nesterov * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning 13811da177e4SLinus Torvalds * this kernel workthread has transiently adopted a user mm with use_mm, 13821da177e4SLinus Torvalds * to do its AIO) is not set and if so returns a reference to it, after 13831da177e4SLinus Torvalds * bumping up the use count. User must release the mm via mmput() 13841da177e4SLinus Torvalds * after use. Typically used by /proc and ptrace. 13851da177e4SLinus Torvalds */ 13861da177e4SLinus Torvalds struct mm_struct *get_task_mm(struct task_struct *task) 13871da177e4SLinus Torvalds { 13881da177e4SLinus Torvalds struct mm_struct *mm; 13891da177e4SLinus Torvalds 13901da177e4SLinus Torvalds task_lock(task); 13911da177e4SLinus Torvalds mm = task->mm; 13921da177e4SLinus Torvalds if (mm) { 1393246bb0b1SOleg Nesterov if (task->flags & PF_KTHREAD) 13941da177e4SLinus Torvalds mm = NULL; 13951da177e4SLinus Torvalds else 13963fce371bSVegard Nossum mmget(mm); 13971da177e4SLinus Torvalds } 13981da177e4SLinus Torvalds task_unlock(task); 13991da177e4SLinus Torvalds return mm; 14001da177e4SLinus Torvalds } 14011da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(get_task_mm); 14021da177e4SLinus Torvalds 14038cdb878dSChristopher Yeoh struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) 14048cdb878dSChristopher Yeoh { 14058cdb878dSChristopher Yeoh struct mm_struct *mm; 14068cdb878dSChristopher Yeoh int err; 14078cdb878dSChristopher Yeoh 1408f7cfd871SEric W. Biederman err = down_read_killable(&task->signal->exec_update_lock); 14098cdb878dSChristopher Yeoh if (err) 14108cdb878dSChristopher Yeoh return ERR_PTR(err); 14118cdb878dSChristopher Yeoh 14128cdb878dSChristopher Yeoh mm = get_task_mm(task); 14138cdb878dSChristopher Yeoh if (mm && mm != current->mm && 14148cdb878dSChristopher Yeoh !ptrace_may_access(task, mode)) { 14158cdb878dSChristopher Yeoh mmput(mm); 14168cdb878dSChristopher Yeoh mm = ERR_PTR(-EACCES); 14178cdb878dSChristopher Yeoh } 1418f7cfd871SEric W. Biederman up_read(&task->signal->exec_update_lock); 14198cdb878dSChristopher Yeoh 14208cdb878dSChristopher Yeoh return mm; 14218cdb878dSChristopher Yeoh } 14228cdb878dSChristopher Yeoh 142357b59c4aSOleg Nesterov static void complete_vfork_done(struct task_struct *tsk) 1424c415c3b4SOleg Nesterov { 1425d68b46feSOleg Nesterov struct completion *vfork; 1426c415c3b4SOleg Nesterov 1427d68b46feSOleg Nesterov task_lock(tsk); 1428d68b46feSOleg Nesterov vfork = tsk->vfork_done; 1429d68b46feSOleg Nesterov if (likely(vfork)) { 1430c415c3b4SOleg Nesterov tsk->vfork_done = NULL; 1431d68b46feSOleg Nesterov complete(vfork); 1432d68b46feSOleg Nesterov } 1433d68b46feSOleg Nesterov task_unlock(tsk); 1434d68b46feSOleg Nesterov } 1435d68b46feSOleg Nesterov 1436d68b46feSOleg Nesterov static int wait_for_vfork_done(struct task_struct *child, 1437d68b46feSOleg Nesterov struct completion *vfork) 1438d68b46feSOleg Nesterov { 1439d68b46feSOleg Nesterov int killed; 1440d68b46feSOleg Nesterov 1441d68b46feSOleg Nesterov freezer_do_not_count(); 144276f969e8SRoman Gushchin cgroup_enter_frozen(); 1443d68b46feSOleg Nesterov killed = wait_for_completion_killable(vfork); 144476f969e8SRoman Gushchin cgroup_leave_frozen(false); 1445d68b46feSOleg Nesterov freezer_count(); 1446d68b46feSOleg Nesterov 1447d68b46feSOleg Nesterov if (killed) { 1448d68b46feSOleg Nesterov task_lock(child); 1449d68b46feSOleg Nesterov child->vfork_done = NULL; 1450d68b46feSOleg Nesterov task_unlock(child); 1451d68b46feSOleg Nesterov } 1452d68b46feSOleg Nesterov 1453d68b46feSOleg Nesterov put_task_struct(child); 1454d68b46feSOleg Nesterov return killed; 1455c415c3b4SOleg Nesterov } 1456c415c3b4SOleg Nesterov 14571da177e4SLinus Torvalds /* Please note the differences between mmput and mm_release. 14581da177e4SLinus Torvalds * mmput is called whenever we stop holding onto a mm_struct, 14591da177e4SLinus Torvalds * error success whatever. 14601da177e4SLinus Torvalds * 14611da177e4SLinus Torvalds * mm_release is called after a mm_struct has been removed 14621da177e4SLinus Torvalds * from the current process. 14631da177e4SLinus Torvalds * 14641da177e4SLinus Torvalds * This difference is important for error handling, when we 14651da177e4SLinus Torvalds * only half set up a mm_struct for a new process and need to restore 14661da177e4SLinus Torvalds * the old one. Because we mmput the new mm_struct before 14671da177e4SLinus Torvalds * restoring the old one. . . 14681da177e4SLinus Torvalds * Eric Biederman 10 January 1998 14691da177e4SLinus Torvalds */ 14704610ba7aSThomas Gleixner static void mm_release(struct task_struct *tsk, struct mm_struct *mm) 14711da177e4SLinus Torvalds { 14720326f5a9SSrikar Dronamraju uprobe_free_utask(tsk); 14730326f5a9SSrikar Dronamraju 14741da177e4SLinus Torvalds /* Get rid of any cached register state */ 14751da177e4SLinus Torvalds deactivate_mm(tsk, mm); 14761da177e4SLinus Torvalds 1477fec1d011SRoland McGrath /* 1478735f2770SMichal Hocko * Signal userspace if we're not exiting with a core dump 1479735f2770SMichal Hocko * because we want to leave the value intact for debugging 1480735f2770SMichal Hocko * purposes. 1481fec1d011SRoland McGrath */ 14829c8a8228SEric Dumazet if (tsk->clear_child_tid) { 148392307383SEric W. Biederman if (atomic_read(&mm->mm_users) > 1) { 14841da177e4SLinus Torvalds /* 14851da177e4SLinus Torvalds * We don't check the error code - if userspace has 14861da177e4SLinus Torvalds * not set up a proper pointer then tough luck. 14871da177e4SLinus Torvalds */ 14889c8a8228SEric Dumazet put_user(0, tsk->clear_child_tid); 14892de0db99SDominik Brodowski do_futex(tsk->clear_child_tid, FUTEX_WAKE, 14902de0db99SDominik Brodowski 1, NULL, NULL, 0, 0); 14919c8a8228SEric Dumazet } 14929c8a8228SEric Dumazet tsk->clear_child_tid = NULL; 14931da177e4SLinus Torvalds } 1494f7505d64SKonstantin Khlebnikov 1495f7505d64SKonstantin Khlebnikov /* 1496f7505d64SKonstantin Khlebnikov * All done, finally we can wake up parent and return this mm to him. 1497f7505d64SKonstantin Khlebnikov * Also kthread_stop() uses this completion for synchronization. 1498f7505d64SKonstantin Khlebnikov */ 1499f7505d64SKonstantin Khlebnikov if (tsk->vfork_done) 1500f7505d64SKonstantin Khlebnikov complete_vfork_done(tsk); 15011da177e4SLinus Torvalds } 15021da177e4SLinus Torvalds 15034610ba7aSThomas Gleixner void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm) 15044610ba7aSThomas Gleixner { 1505150d7158SThomas Gleixner futex_exit_release(tsk); 15064610ba7aSThomas Gleixner mm_release(tsk, mm); 15074610ba7aSThomas Gleixner } 15084610ba7aSThomas Gleixner 15094610ba7aSThomas Gleixner void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm) 15104610ba7aSThomas Gleixner { 1511150d7158SThomas Gleixner futex_exec_release(tsk); 15124610ba7aSThomas Gleixner mm_release(tsk, mm); 15134610ba7aSThomas Gleixner } 15144610ba7aSThomas Gleixner 151513585fa0SNadav Amit /** 151613585fa0SNadav Amit * dup_mm() - duplicates an existing mm structure 151713585fa0SNadav Amit * @tsk: the task_struct with which the new mm will be associated. 151813585fa0SNadav Amit * @oldmm: the mm to duplicate. 151913585fa0SNadav Amit * 152013585fa0SNadav Amit * Allocates a new mm structure and duplicates the provided @oldmm structure 152113585fa0SNadav Amit * content into it. 152213585fa0SNadav Amit * 152313585fa0SNadav Amit * Return: the duplicated mm or NULL on failure. 1524a0a7ec30SJANAK DESAI */ 152513585fa0SNadav Amit static struct mm_struct *dup_mm(struct task_struct *tsk, 152613585fa0SNadav Amit struct mm_struct *oldmm) 1527a0a7ec30SJANAK DESAI { 152813585fa0SNadav Amit struct mm_struct *mm; 1529a0a7ec30SJANAK DESAI int err; 1530a0a7ec30SJANAK DESAI 1531a0a7ec30SJANAK DESAI mm = allocate_mm(); 1532a0a7ec30SJANAK DESAI if (!mm) 1533a0a7ec30SJANAK DESAI goto fail_nomem; 1534a0a7ec30SJANAK DESAI 1535a0a7ec30SJANAK DESAI memcpy(mm, oldmm, sizeof(*mm)); 1536a0a7ec30SJANAK DESAI 1537bfedb589SEric W. Biederman if (!mm_init(mm, tsk, mm->user_ns)) 1538a0a7ec30SJANAK DESAI goto fail_nomem; 1539a0a7ec30SJANAK DESAI 1540a0a7ec30SJANAK DESAI err = dup_mmap(mm, oldmm); 1541a0a7ec30SJANAK DESAI if (err) 1542a0a7ec30SJANAK DESAI goto free_pt; 1543a0a7ec30SJANAK DESAI 1544a0a7ec30SJANAK DESAI mm->hiwater_rss = get_mm_rss(mm); 1545a0a7ec30SJANAK DESAI mm->hiwater_vm = mm->total_vm; 1546a0a7ec30SJANAK DESAI 1547801460d0SHiroshi Shimamoto if (mm->binfmt && !try_module_get(mm->binfmt->module)) 1548801460d0SHiroshi Shimamoto goto free_pt; 1549801460d0SHiroshi Shimamoto 1550a0a7ec30SJANAK DESAI return mm; 1551a0a7ec30SJANAK DESAI 1552a0a7ec30SJANAK DESAI free_pt: 1553801460d0SHiroshi Shimamoto /* don't put binfmt in mmput, we haven't got module yet */ 1554801460d0SHiroshi Shimamoto mm->binfmt = NULL; 1555c3f3ce04SAndrea Arcangeli mm_init_owner(mm, NULL); 1556a0a7ec30SJANAK DESAI mmput(mm); 1557a0a7ec30SJANAK DESAI 1558a0a7ec30SJANAK DESAI fail_nomem: 1559a0a7ec30SJANAK DESAI return NULL; 1560a0a7ec30SJANAK DESAI } 1561a0a7ec30SJANAK DESAI 15621da177e4SLinus Torvalds static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) 15631da177e4SLinus Torvalds { 15641da177e4SLinus Torvalds struct mm_struct *mm, *oldmm; 15651da177e4SLinus Torvalds 15661da177e4SLinus Torvalds tsk->min_flt = tsk->maj_flt = 0; 15671da177e4SLinus Torvalds tsk->nvcsw = tsk->nivcsw = 0; 156817406b82SMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK 156917406b82SMandeep Singh Baines tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; 1570a2e51445SDmitry Vyukov tsk->last_switch_time = 0; 157117406b82SMandeep Singh Baines #endif 15721da177e4SLinus Torvalds 15731da177e4SLinus Torvalds tsk->mm = NULL; 15741da177e4SLinus Torvalds tsk->active_mm = NULL; 15751da177e4SLinus Torvalds 15761da177e4SLinus Torvalds /* 15771da177e4SLinus Torvalds * Are we cloning a kernel thread? 15781da177e4SLinus Torvalds * 15791da177e4SLinus Torvalds * We need to steal a active VM for that.. 15801da177e4SLinus Torvalds */ 15811da177e4SLinus Torvalds oldmm = current->mm; 15821da177e4SLinus Torvalds if (!oldmm) 15831da177e4SLinus Torvalds return 0; 15841da177e4SLinus Torvalds 1585615d6e87SDavidlohr Bueso /* initialize the new vmacache entries */ 1586615d6e87SDavidlohr Bueso vmacache_flush(tsk); 1587615d6e87SDavidlohr Bueso 15881da177e4SLinus Torvalds if (clone_flags & CLONE_VM) { 15893fce371bSVegard Nossum mmget(oldmm); 15901da177e4SLinus Torvalds mm = oldmm; 1591a6895399SRolf Eike Beer } else { 159213585fa0SNadav Amit mm = dup_mm(tsk, current->mm); 15931da177e4SLinus Torvalds if (!mm) 1594a6895399SRolf Eike Beer return -ENOMEM; 1595a6895399SRolf Eike Beer } 15961da177e4SLinus Torvalds 15971da177e4SLinus Torvalds tsk->mm = mm; 15981da177e4SLinus Torvalds tsk->active_mm = mm; 15991da177e4SLinus Torvalds return 0; 16001da177e4SLinus Torvalds } 16011da177e4SLinus Torvalds 1602a39bc516SAlexey Dobriyan static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) 16031da177e4SLinus Torvalds { 1604498052bbSAl Viro struct fs_struct *fs = current->fs; 16051da177e4SLinus Torvalds if (clone_flags & CLONE_FS) { 1606498052bbSAl Viro /* tsk->fs is already what we want */ 16072a4419b5SNick Piggin spin_lock(&fs->lock); 1608498052bbSAl Viro if (fs->in_exec) { 16092a4419b5SNick Piggin spin_unlock(&fs->lock); 1610498052bbSAl Viro return -EAGAIN; 1611498052bbSAl Viro } 1612498052bbSAl Viro fs->users++; 16132a4419b5SNick Piggin spin_unlock(&fs->lock); 16141da177e4SLinus Torvalds return 0; 16151da177e4SLinus Torvalds } 1616498052bbSAl Viro tsk->fs = copy_fs_struct(fs); 16171da177e4SLinus Torvalds if (!tsk->fs) 16181da177e4SLinus Torvalds return -ENOMEM; 16191da177e4SLinus Torvalds return 0; 16201da177e4SLinus Torvalds } 16211da177e4SLinus Torvalds 1622a016f338SJANAK DESAI static int copy_files(unsigned long clone_flags, struct task_struct *tsk) 1623a016f338SJANAK DESAI { 1624a016f338SJANAK DESAI struct files_struct *oldf, *newf; 1625a016f338SJANAK DESAI int error = 0; 1626a016f338SJANAK DESAI 1627a016f338SJANAK DESAI /* 1628a016f338SJANAK DESAI * A background process may not have any files ... 1629a016f338SJANAK DESAI */ 1630a016f338SJANAK DESAI oldf = current->files; 1631a016f338SJANAK DESAI if (!oldf) 1632a016f338SJANAK DESAI goto out; 1633a016f338SJANAK DESAI 1634a016f338SJANAK DESAI if (clone_flags & CLONE_FILES) { 1635a016f338SJANAK DESAI atomic_inc(&oldf->count); 1636a016f338SJANAK DESAI goto out; 1637a016f338SJANAK DESAI } 1638a016f338SJANAK DESAI 163960997c3dSChristian Brauner newf = dup_fd(oldf, NR_OPEN_MAX, &error); 1640a016f338SJANAK DESAI if (!newf) 1641a016f338SJANAK DESAI goto out; 1642a016f338SJANAK DESAI 1643a016f338SJANAK DESAI tsk->files = newf; 1644a016f338SJANAK DESAI error = 0; 1645a016f338SJANAK DESAI out: 1646a016f338SJANAK DESAI return error; 1647a016f338SJANAK DESAI } 1648a016f338SJANAK DESAI 1649a39bc516SAlexey Dobriyan static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) 16501da177e4SLinus Torvalds { 16511da177e4SLinus Torvalds struct sighand_struct *sig; 16521da177e4SLinus Torvalds 165360348802SZhaolei if (clone_flags & CLONE_SIGHAND) { 1654d036bda7SElena Reshetova refcount_inc(¤t->sighand->count); 16551da177e4SLinus Torvalds return 0; 16561da177e4SLinus Torvalds } 16571da177e4SLinus Torvalds sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); 16580c282b06SMadhuparna Bhowmik RCU_INIT_POINTER(tsk->sighand, sig); 16591da177e4SLinus Torvalds if (!sig) 16601da177e4SLinus Torvalds return -ENOMEM; 16619d7fb042SPeter Zijlstra 1662d036bda7SElena Reshetova refcount_set(&sig->count, 1); 166306e62a46SJann Horn spin_lock_irq(¤t->sighand->siglock); 16641da177e4SLinus Torvalds memcpy(sig->action, current->sighand->action, sizeof(sig->action)); 166506e62a46SJann Horn spin_unlock_irq(¤t->sighand->siglock); 1666b612e5dfSChristian Brauner 1667b612e5dfSChristian Brauner /* Reset all signal handler not set to SIG_IGN to SIG_DFL. */ 1668b612e5dfSChristian Brauner if (clone_flags & CLONE_CLEAR_SIGHAND) 1669b612e5dfSChristian Brauner flush_signal_handlers(tsk, 0); 1670b612e5dfSChristian Brauner 16711da177e4SLinus Torvalds return 0; 16721da177e4SLinus Torvalds } 16731da177e4SLinus Torvalds 1674a7e5328aSOleg Nesterov void __cleanup_sighand(struct sighand_struct *sighand) 1675c81addc9SOleg Nesterov { 1676d036bda7SElena Reshetova if (refcount_dec_and_test(&sighand->count)) { 1677d80e731eSOleg Nesterov signalfd_cleanup(sighand); 1678392809b2SOleg Nesterov /* 16795f0d5a3aSPaul E. McKenney * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it 1680392809b2SOleg Nesterov * without an RCU grace period, see __lock_task_sighand(). 1681392809b2SOleg Nesterov */ 1682c81addc9SOleg Nesterov kmem_cache_free(sighand_cachep, sighand); 1683c81addc9SOleg Nesterov } 1684d80e731eSOleg Nesterov } 1685c81addc9SOleg Nesterov 1686f06febc9SFrank Mayhar /* 1687f06febc9SFrank Mayhar * Initialize POSIX timer handling for a thread group. 1688f06febc9SFrank Mayhar */ 1689f06febc9SFrank Mayhar static void posix_cpu_timers_init_group(struct signal_struct *sig) 1690f06febc9SFrank Mayhar { 16912b69942fSThomas Gleixner struct posix_cputimers *pct = &sig->posix_cputimers; 169278d7d407SJiri Slaby unsigned long cpu_limit; 169378d7d407SJiri Slaby 1694316c1608SJason Low cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); 16953a245c0fSThomas Gleixner posix_cputimers_group_init(pct, cpu_limit); 16966279a751SOleg Nesterov } 16976279a751SOleg Nesterov 1698a39bc516SAlexey Dobriyan static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) 16991da177e4SLinus Torvalds { 17001da177e4SLinus Torvalds struct signal_struct *sig; 17011da177e4SLinus Torvalds 17024ab6c083SOleg Nesterov if (clone_flags & CLONE_THREAD) 1703490dea45SPeter Zijlstra return 0; 17046279a751SOleg Nesterov 1705a56704efSVeaceslav Falico sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL); 17061da177e4SLinus Torvalds tsk->signal = sig; 17071da177e4SLinus Torvalds if (!sig) 17081da177e4SLinus Torvalds return -ENOMEM; 17091da177e4SLinus Torvalds 1710b3ac022cSOleg Nesterov sig->nr_threads = 1; 17111da177e4SLinus Torvalds atomic_set(&sig->live, 1); 171260d4de3fSElena Reshetova refcount_set(&sig->sigcnt, 1); 17130c740d0aSOleg Nesterov 17140c740d0aSOleg Nesterov /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */ 17150c740d0aSOleg Nesterov sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node); 17160c740d0aSOleg Nesterov tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head); 17170c740d0aSOleg Nesterov 17181da177e4SLinus Torvalds init_waitqueue_head(&sig->wait_chldexit); 1719db51aeccSOleg Nesterov sig->curr_target = tsk; 17201da177e4SLinus Torvalds init_sigpending(&sig->shared_pending); 1721c3ad2c3bSEric W. Biederman INIT_HLIST_HEAD(&sig->multiprocess); 1722e78c3496SRik van Riel seqlock_init(&sig->stats_lock); 17239d7fb042SPeter Zijlstra prev_cputime_init(&sig->prev_cputime); 17241da177e4SLinus Torvalds 1725baa73d9eSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS 1726b18b6a9cSNicolas Pitre INIT_LIST_HEAD(&sig->posix_timers); 1727c9cb2e3dSThomas Gleixner hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 17281da177e4SLinus Torvalds sig->real_timer.function = it_real_fn; 1729baa73d9eSNicolas Pitre #endif 17301da177e4SLinus Torvalds 17311da177e4SLinus Torvalds task_lock(current->group_leader); 17321da177e4SLinus Torvalds memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 17331da177e4SLinus Torvalds task_unlock(current->group_leader); 17341da177e4SLinus Torvalds 17356279a751SOleg Nesterov posix_cpu_timers_init_group(sig); 17366279a751SOleg Nesterov 1737522ed776SMiloslav Trmac tty_audit_fork(sig); 17385091faa4SMike Galbraith sched_autogroup_fork(sig); 1739522ed776SMiloslav Trmac 1740a63d83f4SDavid Rientjes sig->oom_score_adj = current->signal->oom_score_adj; 1741dabb16f6SMandeep Singh Baines sig->oom_score_adj_min = current->signal->oom_score_adj_min; 174228b83c51SKOSAKI Motohiro 17439b1bf12dSKOSAKI Motohiro mutex_init(&sig->cred_guard_mutex); 1744f7cfd871SEric W. Biederman init_rwsem(&sig->exec_update_lock); 17459b1bf12dSKOSAKI Motohiro 17461da177e4SLinus Torvalds return 0; 17471da177e4SLinus Torvalds } 17481da177e4SLinus Torvalds 1749dbd95212SKees Cook static void copy_seccomp(struct task_struct *p) 1750dbd95212SKees Cook { 1751dbd95212SKees Cook #ifdef CONFIG_SECCOMP 1752dbd95212SKees Cook /* 1753dbd95212SKees Cook * Must be called with sighand->lock held, which is common to 1754dbd95212SKees Cook * all threads in the group. Holding cred_guard_mutex is not 1755dbd95212SKees Cook * needed because this new task is not yet running and cannot 1756dbd95212SKees Cook * be racing exec. 1757dbd95212SKees Cook */ 175869f6a34bSGuenter Roeck assert_spin_locked(¤t->sighand->siglock); 1759dbd95212SKees Cook 1760dbd95212SKees Cook /* Ref-count the new filter user, and assign it. */ 1761dbd95212SKees Cook get_seccomp_filter(current); 1762dbd95212SKees Cook p->seccomp = current->seccomp; 1763dbd95212SKees Cook 1764dbd95212SKees Cook /* 1765dbd95212SKees Cook * Explicitly enable no_new_privs here in case it got set 1766dbd95212SKees Cook * between the task_struct being duplicated and holding the 1767dbd95212SKees Cook * sighand lock. The seccomp state and nnp must be in sync. 1768dbd95212SKees Cook */ 1769dbd95212SKees Cook if (task_no_new_privs(current)) 1770dbd95212SKees Cook task_set_no_new_privs(p); 1771dbd95212SKees Cook 1772dbd95212SKees Cook /* 1773dbd95212SKees Cook * If the parent gained a seccomp mode after copying thread 1774dbd95212SKees Cook * flags and between before we held the sighand lock, we have 1775dbd95212SKees Cook * to manually enable the seccomp thread flag here. 1776dbd95212SKees Cook */ 1777dbd95212SKees Cook if (p->seccomp.mode != SECCOMP_MODE_DISABLED) 177823d67a54SGabriel Krisman Bertazi set_task_syscall_work(p, SECCOMP); 1779dbd95212SKees Cook #endif 1780dbd95212SKees Cook } 1781dbd95212SKees Cook 178217da2bd9SHeiko Carstens SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) 17831da177e4SLinus Torvalds { 17841da177e4SLinus Torvalds current->clear_child_tid = tidptr; 17851da177e4SLinus Torvalds 1786b488893aSPavel Emelyanov return task_pid_vnr(current); 17871da177e4SLinus Torvalds } 17881da177e4SLinus Torvalds 1789a39bc516SAlexey Dobriyan static void rt_mutex_init_task(struct task_struct *p) 179023f78d4aSIngo Molnar { 17911d615482SThomas Gleixner raw_spin_lock_init(&p->pi_lock); 1792e29e175bSZilvinas Valinskas #ifdef CONFIG_RT_MUTEXES 1793a23ba907SDavidlohr Bueso p->pi_waiters = RB_ROOT_CACHED; 1794e96a7705SXunlei Pang p->pi_top_task = NULL; 179523f78d4aSIngo Molnar p->pi_blocked_on = NULL; 179623f78d4aSIngo Molnar #endif 179723f78d4aSIngo Molnar } 179823f78d4aSIngo Molnar 17992c470475SEric W. Biederman static inline void init_task_pid_links(struct task_struct *task) 18002c470475SEric W. Biederman { 18012c470475SEric W. Biederman enum pid_type type; 18022c470475SEric W. Biederman 180396e1e984SAlexander Guril for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) 18042c470475SEric W. Biederman INIT_HLIST_NODE(&task->pid_links[type]); 18052c470475SEric W. Biederman } 18062c470475SEric W. Biederman 180781907739SOleg Nesterov static inline void 180881907739SOleg Nesterov init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) 180981907739SOleg Nesterov { 18102c470475SEric W. Biederman if (type == PIDTYPE_PID) 18112c470475SEric W. Biederman task->thread_pid = pid; 18122c470475SEric W. Biederman else 18132c470475SEric W. Biederman task->signal->pids[type] = pid; 181481907739SOleg Nesterov } 181581907739SOleg Nesterov 18166bfbaa51SIngo Molnar static inline void rcu_copy_process(struct task_struct *p) 18176bfbaa51SIngo Molnar { 18186bfbaa51SIngo Molnar #ifdef CONFIG_PREEMPT_RCU 18196bfbaa51SIngo Molnar p->rcu_read_lock_nesting = 0; 18206bfbaa51SIngo Molnar p->rcu_read_unlock_special.s = 0; 18216bfbaa51SIngo Molnar p->rcu_blocked_node = NULL; 18226bfbaa51SIngo Molnar INIT_LIST_HEAD(&p->rcu_node_entry); 18236bfbaa51SIngo Molnar #endif /* #ifdef CONFIG_PREEMPT_RCU */ 18246bfbaa51SIngo Molnar #ifdef CONFIG_TASKS_RCU 18256bfbaa51SIngo Molnar p->rcu_tasks_holdout = false; 18266bfbaa51SIngo Molnar INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); 18276bfbaa51SIngo Molnar p->rcu_tasks_idle_cpu = -1; 18286bfbaa51SIngo Molnar #endif /* #ifdef CONFIG_TASKS_RCU */ 1829d5f177d3SPaul E. McKenney #ifdef CONFIG_TASKS_TRACE_RCU 1830d5f177d3SPaul E. McKenney p->trc_reader_nesting = 0; 1831276c4104SPaul E. McKenney p->trc_reader_special.s = 0; 1832d5f177d3SPaul E. McKenney INIT_LIST_HEAD(&p->trc_holdout_list); 1833434c9eefSPaul E. McKenney INIT_LIST_HEAD(&p->trc_blkd_node); 1834d5f177d3SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ 18356bfbaa51SIngo Molnar } 18366bfbaa51SIngo Molnar 18373695eae5SChristian Brauner struct pid *pidfd_pid(const struct file *file) 18383695eae5SChristian Brauner { 18393695eae5SChristian Brauner if (file->f_op == &pidfd_fops) 18403695eae5SChristian Brauner return file->private_data; 18413695eae5SChristian Brauner 18423695eae5SChristian Brauner return ERR_PTR(-EBADF); 18433695eae5SChristian Brauner } 18443695eae5SChristian Brauner 1845b3e58382SChristian Brauner static int pidfd_release(struct inode *inode, struct file *file) 1846b3e58382SChristian Brauner { 1847b3e58382SChristian Brauner struct pid *pid = file->private_data; 1848b3e58382SChristian Brauner 1849b3e58382SChristian Brauner file->private_data = NULL; 1850b3e58382SChristian Brauner put_pid(pid); 1851b3e58382SChristian Brauner return 0; 1852b3e58382SChristian Brauner } 1853b3e58382SChristian Brauner 1854b3e58382SChristian Brauner #ifdef CONFIG_PROC_FS 185515d42eb2SChristian Kellner /** 185615d42eb2SChristian Kellner * pidfd_show_fdinfo - print information about a pidfd 185715d42eb2SChristian Kellner * @m: proc fdinfo file 185815d42eb2SChristian Kellner * @f: file referencing a pidfd 185915d42eb2SChristian Kellner * 186015d42eb2SChristian Kellner * Pid: 186115d42eb2SChristian Kellner * This function will print the pid that a given pidfd refers to in the 186215d42eb2SChristian Kellner * pid namespace of the procfs instance. 186315d42eb2SChristian Kellner * If the pid namespace of the process is not a descendant of the pid 186415d42eb2SChristian Kellner * namespace of the procfs instance 0 will be shown as its pid. This is 186515d42eb2SChristian Kellner * similar to calling getppid() on a process whose parent is outside of 186615d42eb2SChristian Kellner * its pid namespace. 186715d42eb2SChristian Kellner * 186815d42eb2SChristian Kellner * NSpid: 186915d42eb2SChristian Kellner * If pid namespaces are supported then this function will also print 187015d42eb2SChristian Kellner * the pid of a given pidfd refers to for all descendant pid namespaces 187115d42eb2SChristian Kellner * starting from the current pid namespace of the instance, i.e. the 187215d42eb2SChristian Kellner * Pid field and the first entry in the NSpid field will be identical. 187315d42eb2SChristian Kellner * If the pid namespace of the process is not a descendant of the pid 187415d42eb2SChristian Kellner * namespace of the procfs instance 0 will be shown as its first NSpid 187515d42eb2SChristian Kellner * entry and no others will be shown. 187615d42eb2SChristian Kellner * Note that this differs from the Pid and NSpid fields in 187715d42eb2SChristian Kellner * /proc/<pid>/status where Pid and NSpid are always shown relative to 187815d42eb2SChristian Kellner * the pid namespace of the procfs instance. The difference becomes 187915d42eb2SChristian Kellner * obvious when sending around a pidfd between pid namespaces from a 1880a8ca6b13SXiaofeng Cao * different branch of the tree, i.e. where no ancestral relation is 188115d42eb2SChristian Kellner * present between the pid namespaces: 188215d42eb2SChristian Kellner * - create two new pid namespaces ns1 and ns2 in the initial pid 188315d42eb2SChristian Kellner * namespace (also take care to create new mount namespaces in the 188415d42eb2SChristian Kellner * new pid namespace and mount procfs) 188515d42eb2SChristian Kellner * - create a process with a pidfd in ns1 188615d42eb2SChristian Kellner * - send pidfd from ns1 to ns2 188715d42eb2SChristian Kellner * - read /proc/self/fdinfo/<pidfd> and observe that both Pid and NSpid 188815d42eb2SChristian Kellner * have exactly one entry, which is 0 188915d42eb2SChristian Kellner */ 1890b3e58382SChristian Brauner static void pidfd_show_fdinfo(struct seq_file *m, struct file *f) 1891b3e58382SChristian Brauner { 1892b3e58382SChristian Brauner struct pid *pid = f->private_data; 18933d6d8da4SChristian Brauner struct pid_namespace *ns; 18943d6d8da4SChristian Brauner pid_t nr = -1; 1895b3e58382SChristian Brauner 18963d6d8da4SChristian Brauner if (likely(pid_has_task(pid, PIDTYPE_PID))) { 18979d78edeaSAlexey Gladkov ns = proc_pid_ns(file_inode(m->file)->i_sb); 18983d6d8da4SChristian Brauner nr = pid_nr_ns(pid, ns); 18993d6d8da4SChristian Brauner } 19003d6d8da4SChristian Brauner 19013d6d8da4SChristian Brauner seq_put_decimal_ll(m, "Pid:\t", nr); 190215d42eb2SChristian Kellner 190315d42eb2SChristian Kellner #ifdef CONFIG_PID_NS 19043d6d8da4SChristian Brauner seq_put_decimal_ll(m, "\nNSpid:\t", nr); 19053d6d8da4SChristian Brauner if (nr > 0) { 190615d42eb2SChristian Kellner int i; 190715d42eb2SChristian Kellner 190815d42eb2SChristian Kellner /* If nr is non-zero it means that 'pid' is valid and that 190915d42eb2SChristian Kellner * ns, i.e. the pid namespace associated with the procfs 191015d42eb2SChristian Kellner * instance, is in the pid namespace hierarchy of pid. 191115d42eb2SChristian Kellner * Start at one below the already printed level. 191215d42eb2SChristian Kellner */ 191315d42eb2SChristian Kellner for (i = ns->level + 1; i <= pid->level; i++) 19143d6d8da4SChristian Brauner seq_put_decimal_ll(m, "\t", pid->numbers[i].nr); 191515d42eb2SChristian Kellner } 191615d42eb2SChristian Kellner #endif 1917b3e58382SChristian Brauner seq_putc(m, '\n'); 1918b3e58382SChristian Brauner } 1919b3e58382SChristian Brauner #endif 1920b3e58382SChristian Brauner 1921b53b0b9dSJoel Fernandes (Google) /* 1922b53b0b9dSJoel Fernandes (Google) * Poll support for process exit notification. 1923b53b0b9dSJoel Fernandes (Google) */ 19249e77716aSLuc Van Oostenryck static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts) 1925b53b0b9dSJoel Fernandes (Google) { 1926b53b0b9dSJoel Fernandes (Google) struct pid *pid = file->private_data; 19279e77716aSLuc Van Oostenryck __poll_t poll_flags = 0; 1928b53b0b9dSJoel Fernandes (Google) 1929b53b0b9dSJoel Fernandes (Google) poll_wait(file, &pid->wait_pidfd, pts); 1930b53b0b9dSJoel Fernandes (Google) 1931b53b0b9dSJoel Fernandes (Google) /* 1932b53b0b9dSJoel Fernandes (Google) * Inform pollers only when the whole thread group exits. 1933b53b0b9dSJoel Fernandes (Google) * If the thread group leader exits before all other threads in the 1934b53b0b9dSJoel Fernandes (Google) * group, then poll(2) should block, similar to the wait(2) family. 1935b53b0b9dSJoel Fernandes (Google) */ 193638fd525aSEric W. Biederman if (thread_group_exited(pid)) 19379e77716aSLuc Van Oostenryck poll_flags = EPOLLIN | EPOLLRDNORM; 1938b53b0b9dSJoel Fernandes (Google) 1939b53b0b9dSJoel Fernandes (Google) return poll_flags; 1940b53b0b9dSJoel Fernandes (Google) } 1941b53b0b9dSJoel Fernandes (Google) 1942b3e58382SChristian Brauner const struct file_operations pidfd_fops = { 1943b3e58382SChristian Brauner .release = pidfd_release, 1944b53b0b9dSJoel Fernandes (Google) .poll = pidfd_poll, 1945b3e58382SChristian Brauner #ifdef CONFIG_PROC_FS 1946b3e58382SChristian Brauner .show_fdinfo = pidfd_show_fdinfo, 1947b3e58382SChristian Brauner #endif 1948b3e58382SChristian Brauner }; 1949b3e58382SChristian Brauner 1950c3f3ce04SAndrea Arcangeli static void __delayed_free_task(struct rcu_head *rhp) 1951c3f3ce04SAndrea Arcangeli { 1952c3f3ce04SAndrea Arcangeli struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 1953c3f3ce04SAndrea Arcangeli 1954c3f3ce04SAndrea Arcangeli free_task(tsk); 1955c3f3ce04SAndrea Arcangeli } 1956c3f3ce04SAndrea Arcangeli 1957c3f3ce04SAndrea Arcangeli static __always_inline void delayed_free_task(struct task_struct *tsk) 1958c3f3ce04SAndrea Arcangeli { 1959c3f3ce04SAndrea Arcangeli if (IS_ENABLED(CONFIG_MEMCG)) 1960c3f3ce04SAndrea Arcangeli call_rcu(&tsk->rcu, __delayed_free_task); 1961c3f3ce04SAndrea Arcangeli else 1962c3f3ce04SAndrea Arcangeli free_task(tsk); 1963c3f3ce04SAndrea Arcangeli } 1964c3f3ce04SAndrea Arcangeli 196567197a4fSSuren Baghdasaryan static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk) 196667197a4fSSuren Baghdasaryan { 196767197a4fSSuren Baghdasaryan /* Skip if kernel thread */ 196867197a4fSSuren Baghdasaryan if (!tsk->mm) 196967197a4fSSuren Baghdasaryan return; 197067197a4fSSuren Baghdasaryan 197167197a4fSSuren Baghdasaryan /* Skip if spawning a thread or using vfork */ 197267197a4fSSuren Baghdasaryan if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM) 197367197a4fSSuren Baghdasaryan return; 197467197a4fSSuren Baghdasaryan 197567197a4fSSuren Baghdasaryan /* We need to synchronize with __set_oom_adj */ 197667197a4fSSuren Baghdasaryan mutex_lock(&oom_adj_mutex); 197767197a4fSSuren Baghdasaryan set_bit(MMF_MULTIPROCESS, &tsk->mm->flags); 197867197a4fSSuren Baghdasaryan /* Update the values in case they were changed after copy_signal */ 197967197a4fSSuren Baghdasaryan tsk->signal->oom_score_adj = current->signal->oom_score_adj; 198067197a4fSSuren Baghdasaryan tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min; 198167197a4fSSuren Baghdasaryan mutex_unlock(&oom_adj_mutex); 198267197a4fSSuren Baghdasaryan } 198367197a4fSSuren Baghdasaryan 198479257534SDaniel Bristot de Oliveira #ifdef CONFIG_RV 198579257534SDaniel Bristot de Oliveira static void rv_task_fork(struct task_struct *p) 198679257534SDaniel Bristot de Oliveira { 198779257534SDaniel Bristot de Oliveira int i; 198879257534SDaniel Bristot de Oliveira 198979257534SDaniel Bristot de Oliveira for (i = 0; i < RV_PER_TASK_MONITORS; i++) 199079257534SDaniel Bristot de Oliveira p->rv[i].da_mon.monitoring = false; 199179257534SDaniel Bristot de Oliveira } 199279257534SDaniel Bristot de Oliveira #else 199379257534SDaniel Bristot de Oliveira #define rv_task_fork(p) do {} while (0) 199479257534SDaniel Bristot de Oliveira #endif 199579257534SDaniel Bristot de Oliveira 1996f06febc9SFrank Mayhar /* 19971da177e4SLinus Torvalds * This creates a new process as a copy of the old one, 19981da177e4SLinus Torvalds * but does not actually start it yet. 19991da177e4SLinus Torvalds * 20001da177e4SLinus Torvalds * It copies the registers, and all the appropriate 20011da177e4SLinus Torvalds * parts of the process environment (as per the clone 20021da177e4SLinus Torvalds * flags). The actual kick-off is left to the caller. 20031da177e4SLinus Torvalds */ 20040766f788SEmese Revfy static __latent_entropy struct task_struct *copy_process( 200509a05394SRoland McGrath struct pid *pid, 20063033f14aSJosh Triplett int trace, 20077f192e3cSChristian Brauner int node, 20087f192e3cSChristian Brauner struct kernel_clone_args *args) 20091da177e4SLinus Torvalds { 2010b3e58382SChristian Brauner int pidfd = -1, retval; 2011a24efe62SMariusz Kozlowski struct task_struct *p; 2012c3ad2c3bSEric W. Biederman struct multiprocess_signals delayed; 20136fd2fe49SAl Viro struct file *pidfile = NULL; 2014c5febea0SEric W. Biederman const u64 clone_flags = args->flags; 2015769071acSAndrei Vagin struct nsproxy *nsp = current->nsproxy; 20161da177e4SLinus Torvalds 2017667b6094SMarcos Paulo de Souza /* 2018667b6094SMarcos Paulo de Souza * Don't allow sharing the root directory with processes in a different 2019667b6094SMarcos Paulo de Souza * namespace 2020667b6094SMarcos Paulo de Souza */ 20211da177e4SLinus Torvalds if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) 20221da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 20231da177e4SLinus Torvalds 2024e66eded8SEric W. Biederman if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) 2025e66eded8SEric W. Biederman return ERR_PTR(-EINVAL); 2026e66eded8SEric W. Biederman 20271da177e4SLinus Torvalds /* 20281da177e4SLinus Torvalds * Thread groups must share signals as well, and detached threads 20291da177e4SLinus Torvalds * can only be started up within the thread group. 20301da177e4SLinus Torvalds */ 20311da177e4SLinus Torvalds if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) 20321da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 20331da177e4SLinus Torvalds 20341da177e4SLinus Torvalds /* 20351da177e4SLinus Torvalds * Shared signal handlers imply shared VM. By way of the above, 20361da177e4SLinus Torvalds * thread groups also imply shared VM. Blocking this case allows 20371da177e4SLinus Torvalds * for various simplifications in other code. 20381da177e4SLinus Torvalds */ 20391da177e4SLinus Torvalds if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) 20401da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 20411da177e4SLinus Torvalds 2042123be07bSSukadev Bhattiprolu /* 2043123be07bSSukadev Bhattiprolu * Siblings of global init remain as zombies on exit since they are 2044123be07bSSukadev Bhattiprolu * not reaped by their parent (swapper). To solve this and to avoid 2045123be07bSSukadev Bhattiprolu * multi-rooted process trees, prevent global and container-inits 2046123be07bSSukadev Bhattiprolu * from creating siblings. 2047123be07bSSukadev Bhattiprolu */ 2048123be07bSSukadev Bhattiprolu if ((clone_flags & CLONE_PARENT) && 2049123be07bSSukadev Bhattiprolu current->signal->flags & SIGNAL_UNKILLABLE) 2050123be07bSSukadev Bhattiprolu return ERR_PTR(-EINVAL); 2051123be07bSSukadev Bhattiprolu 20528382fcacSEric W. Biederman /* 205340a0d32dSOleg Nesterov * If the new process will be in a different pid or user namespace 2054faf00da5SEric W. Biederman * do not allow it to share a thread group with the forking task. 20558382fcacSEric W. Biederman */ 2056faf00da5SEric W. Biederman if (clone_flags & CLONE_THREAD) { 205740a0d32dSOleg Nesterov if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || 2058769071acSAndrei Vagin (task_active_pid_ns(current) != nsp->pid_ns_for_children)) 2059769071acSAndrei Vagin return ERR_PTR(-EINVAL); 2060769071acSAndrei Vagin } 2061769071acSAndrei Vagin 2062769071acSAndrei Vagin /* 2063769071acSAndrei Vagin * If the new process will be in a different time namespace 2064769071acSAndrei Vagin * do not allow it to share VM or a thread group with the forking task. 2065133e2d3eSAndrei Vagin * 2066133e2d3eSAndrei Vagin * On vfork, the child process enters the target time namespace only 2067133e2d3eSAndrei Vagin * after exec. 2068769071acSAndrei Vagin */ 2069133e2d3eSAndrei Vagin if ((clone_flags & (CLONE_VM | CLONE_VFORK)) == CLONE_VM) { 2070769071acSAndrei Vagin if (nsp->time_ns != nsp->time_ns_for_children) 20718382fcacSEric W. Biederman return ERR_PTR(-EINVAL); 207240a0d32dSOleg Nesterov } 20738382fcacSEric W. Biederman 2074b3e58382SChristian Brauner if (clone_flags & CLONE_PIDFD) { 2075b3e58382SChristian Brauner /* 2076b3e58382SChristian Brauner * - CLONE_DETACHED is blocked so that we can potentially 2077b3e58382SChristian Brauner * reuse it later for CLONE_PIDFD. 2078b3e58382SChristian Brauner * - CLONE_THREAD is blocked until someone really needs it. 2079b3e58382SChristian Brauner */ 20807f192e3cSChristian Brauner if (clone_flags & (CLONE_DETACHED | CLONE_THREAD)) 2081b3e58382SChristian Brauner return ERR_PTR(-EINVAL); 2082b3e58382SChristian Brauner } 2083b3e58382SChristian Brauner 2084c3ad2c3bSEric W. Biederman /* 2085c3ad2c3bSEric W. Biederman * Force any signals received before this point to be delivered 2086c3ad2c3bSEric W. Biederman * before the fork happens. Collect up signals sent to multiple 2087c3ad2c3bSEric W. Biederman * processes that happen during the fork and delay them so that 2088c3ad2c3bSEric W. Biederman * they appear to happen after the fork. 2089c3ad2c3bSEric W. Biederman */ 2090c3ad2c3bSEric W. Biederman sigemptyset(&delayed.signal); 2091c3ad2c3bSEric W. Biederman INIT_HLIST_NODE(&delayed.node); 2092c3ad2c3bSEric W. Biederman 2093c3ad2c3bSEric W. Biederman spin_lock_irq(¤t->sighand->siglock); 2094c3ad2c3bSEric W. Biederman if (!(clone_flags & CLONE_THREAD)) 2095c3ad2c3bSEric W. Biederman hlist_add_head(&delayed.node, ¤t->signal->multiprocess); 2096c3ad2c3bSEric W. Biederman recalc_sigpending(); 2097c3ad2c3bSEric W. Biederman spin_unlock_irq(¤t->sighand->siglock); 2098c3ad2c3bSEric W. Biederman retval = -ERESTARTNOINTR; 209966ae0d1eSJens Axboe if (task_sigpending(current)) 2100c3ad2c3bSEric W. Biederman goto fork_out; 2101c3ad2c3bSEric W. Biederman 21021da177e4SLinus Torvalds retval = -ENOMEM; 2103725fc629SAndi Kleen p = dup_task_struct(current, node); 21041da177e4SLinus Torvalds if (!p) 21051da177e4SLinus Torvalds goto fork_out; 2106753550ebSEric W. Biederman p->flags &= ~PF_KTHREAD; 2107753550ebSEric W. Biederman if (args->kthread) 2108753550ebSEric W. Biederman p->flags |= PF_KTHREAD; 2109b16b3855SJens Axboe if (args->io_thread) { 2110b16b3855SJens Axboe /* 2111b16b3855SJens Axboe * Mark us an IO worker, and block any signal that isn't 2112b16b3855SJens Axboe * fatal or STOP 2113b16b3855SJens Axboe */ 2114cc440e87SJens Axboe p->flags |= PF_IO_WORKER; 2115b16b3855SJens Axboe siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2116b16b3855SJens Axboe } 21171da177e4SLinus Torvalds 21187f192e3cSChristian Brauner p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL; 21194d6501dcSVegard Nossum /* 21204d6501dcSVegard Nossum * Clear TID on mm_release()? 21214d6501dcSVegard Nossum */ 21227f192e3cSChristian Brauner p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? args->child_tid : NULL; 21234d6501dcSVegard Nossum 2124f7e8b616SSteven Rostedt ftrace_graph_init_task(p); 2125f7e8b616SSteven Rostedt 2126bea493a0SPeter Zijlstra rt_mutex_init_task(p); 2127bea493a0SPeter Zijlstra 2128a21ee605SPeter Zijlstra lockdep_assert_irqs_enabled(); 2129d12c1a37SIngo Molnar #ifdef CONFIG_PROVE_LOCKING 2130de30a2b3SIngo Molnar DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 2131de30a2b3SIngo Molnar #endif 21328f2f9c4dSEric W. Biederman retval = copy_creds(p, clone_flags); 21338f2f9c4dSEric W. Biederman if (retval < 0) 21348f2f9c4dSEric W. Biederman goto bad_fork_free; 21358f2f9c4dSEric W. Biederman 21361da177e4SLinus Torvalds retval = -EAGAIN; 213721d1c5e3SAlexey Gladkov if (is_ucounts_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) { 2138b57922b6SEric Paris if (p->real_cred->user != INIT_USER && 2139b57922b6SEric Paris !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) 21408f2f9c4dSEric W. Biederman goto bad_fork_cleanup_count; 21411da177e4SLinus Torvalds } 214272fa5997SVasiliy Kulikov current->flags &= ~PF_NPROC_EXCEEDED; 21431da177e4SLinus Torvalds 21441da177e4SLinus Torvalds /* 21451da177e4SLinus Torvalds * If multiple threads are within copy_process(), then this check 21461da177e4SLinus Torvalds * triggers too late. This doesn't hurt, the check is only there 21471da177e4SLinus Torvalds * to stop root fork bombs. 21481da177e4SLinus Torvalds */ 214904ec93feSLi Zefan retval = -EAGAIN; 2150c17d1a3aSWeilong Chen if (data_race(nr_threads >= max_threads)) 21511da177e4SLinus Torvalds goto bad_fork_cleanup_count; 21521da177e4SLinus Torvalds 2153ca74e92bSShailabh Nagar delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ 2154a8ea6fc9SFrederic Weisbecker p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE | PF_NO_SETAFFINITY); 2155514ddb44SDavid Rientjes p->flags |= PF_FORKNOEXEC; 21561da177e4SLinus Torvalds INIT_LIST_HEAD(&p->children); 21571da177e4SLinus Torvalds INIT_LIST_HEAD(&p->sibling); 2158f41d911fSPaul E. McKenney rcu_copy_process(p); 21591da177e4SLinus Torvalds p->vfork_done = NULL; 21601da177e4SLinus Torvalds spin_lock_init(&p->alloc_lock); 21611da177e4SLinus Torvalds 21621da177e4SLinus Torvalds init_sigpending(&p->pending); 21631da177e4SLinus Torvalds 216464861634SMartin Schwidefsky p->utime = p->stime = p->gtime = 0; 216540565b5aSStanislaw Gruszka #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 216664861634SMartin Schwidefsky p->utimescaled = p->stimescaled = 0; 216740565b5aSStanislaw Gruszka #endif 21689d7fb042SPeter Zijlstra prev_cputime_init(&p->prev_cputime); 21699d7fb042SPeter Zijlstra 21706a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 2171bac5b6b6SFrederic Weisbecker seqcount_init(&p->vtime.seqcount); 2172bac5b6b6SFrederic Weisbecker p->vtime.starttime = 0; 2173bac5b6b6SFrederic Weisbecker p->vtime.state = VTIME_INACTIVE; 21746a61671bSFrederic Weisbecker #endif 21756a61671bSFrederic Weisbecker 21760f212204SJens Axboe #ifdef CONFIG_IO_URING 21770f212204SJens Axboe p->io_uring = NULL; 21780f212204SJens Axboe #endif 21790f212204SJens Axboe 2180a3a2e76cSKAMEZAWA Hiroyuki #if defined(SPLIT_RSS_COUNTING) 2181a3a2e76cSKAMEZAWA Hiroyuki memset(&p->rss_stat, 0, sizeof(p->rss_stat)); 2182a3a2e76cSKAMEZAWA Hiroyuki #endif 2183172ba844SBalbir Singh 21846976675dSArjan van de Ven p->default_timer_slack_ns = current->timer_slack_ns; 21856976675dSArjan van de Ven 2186eb414681SJohannes Weiner #ifdef CONFIG_PSI 2187eb414681SJohannes Weiner p->psi_flags = 0; 2188eb414681SJohannes Weiner #endif 2189eb414681SJohannes Weiner 21905995477aSAndrea Righi task_io_accounting_init(&p->ioac); 21911da177e4SLinus Torvalds acct_clear_integrals(p); 21921da177e4SLinus Torvalds 21933a245c0fSThomas Gleixner posix_cputimers_init(&p->posix_cputimers); 21941da177e4SLinus Torvalds 21951da177e4SLinus Torvalds p->io_context = NULL; 2196c0b0ae8aSRichard Guy Briggs audit_set_context(p, NULL); 2197b4f48b63SPaul Menage cgroup_fork(p); 2198343f4c49SEric W. Biederman if (args->kthread) { 219940966e31SEric W. Biederman if (!set_kthread_struct(p)) 2200ff8288ffSEric W. Biederman goto bad_fork_cleanup_delayacct; 220140966e31SEric W. Biederman } 22021da177e4SLinus Torvalds #ifdef CONFIG_NUMA 2203846a16bfSLee Schermerhorn p->mempolicy = mpol_dup(p->mempolicy); 22041da177e4SLinus Torvalds if (IS_ERR(p->mempolicy)) { 22051da177e4SLinus Torvalds retval = PTR_ERR(p->mempolicy); 22061da177e4SLinus Torvalds p->mempolicy = NULL; 2207ff8288ffSEric W. Biederman goto bad_fork_cleanup_delayacct; 22081da177e4SLinus Torvalds } 22091da177e4SLinus Torvalds #endif 2210778d3b0fSMichal Hocko #ifdef CONFIG_CPUSETS 2211778d3b0fSMichal Hocko p->cpuset_mem_spread_rotor = NUMA_NO_NODE; 2212778d3b0fSMichal Hocko p->cpuset_slab_spread_rotor = NUMA_NO_NODE; 2213b7505861SAhmed S. Darwish seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock); 2214778d3b0fSMichal Hocko #endif 2215de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS 22160584df9cSMarco Elver memset(&p->irqtrace, 0, sizeof(p->irqtrace)); 22170584df9cSMarco Elver p->irqtrace.hardirq_disable_ip = _THIS_IP_; 22180584df9cSMarco Elver p->irqtrace.softirq_enable_ip = _THIS_IP_; 2219de30a2b3SIngo Molnar p->softirqs_enabled = 1; 2220de30a2b3SIngo Molnar p->softirq_context = 0; 2221de30a2b3SIngo Molnar #endif 22228bcbde54SDavid Hildenbrand 22238bcbde54SDavid Hildenbrand p->pagefault_disabled = 0; 22248bcbde54SDavid Hildenbrand 2225fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP 2226b09be676SByungchul Park lockdep_init_task(p); 2227fbb9ce95SIngo Molnar #endif 22281da177e4SLinus Torvalds 2229408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES 2230408894eeSIngo Molnar p->blocked_on = NULL; /* not blocked yet */ 2231408894eeSIngo Molnar #endif 2232cafe5635SKent Overstreet #ifdef CONFIG_BCACHE 2233cafe5635SKent Overstreet p->sequential_io = 0; 2234cafe5635SKent Overstreet p->sequential_io_avg = 0; 2235cafe5635SKent Overstreet #endif 2236a10787e6SSong Liu #ifdef CONFIG_BPF_SYSCALL 2237a10787e6SSong Liu RCU_INIT_POINTER(p->bpf_storage, NULL); 2238c7603cfaSAndrii Nakryiko p->bpf_ctx = NULL; 2239a10787e6SSong Liu #endif 22400f481406SMarkus Metzger 22413c90e6e9SSrivatsa Vaddagiri /* Perform scheduler related setup. Assign this task to a CPU. */ 2242aab03e05SDario Faggioli retval = sched_fork(clone_flags, p); 2243aab03e05SDario Faggioli if (retval) 2244aab03e05SDario Faggioli goto bad_fork_cleanup_policy; 22456ab423e0SPeter Zijlstra 22462b26f0aaSMarco Elver retval = perf_event_init_task(p, clone_flags); 22476ab423e0SPeter Zijlstra if (retval) 22486ab423e0SPeter Zijlstra goto bad_fork_cleanup_policy; 2249fb0a685cSDaniel Rebelo de Oliveira retval = audit_alloc(p); 2250fb0a685cSDaniel Rebelo de Oliveira if (retval) 22516c72e350SPeter Zijlstra goto bad_fork_cleanup_perf; 22521da177e4SLinus Torvalds /* copy all the process information */ 2253ab602f79SJack Miller shm_init_task(p); 2254e4e55b47STetsuo Handa retval = security_task_alloc(p, clone_flags); 2255fb0a685cSDaniel Rebelo de Oliveira if (retval) 22561da177e4SLinus Torvalds goto bad_fork_cleanup_audit; 2257e4e55b47STetsuo Handa retval = copy_semundo(clone_flags, p); 2258e4e55b47STetsuo Handa if (retval) 2259e4e55b47STetsuo Handa goto bad_fork_cleanup_security; 2260fb0a685cSDaniel Rebelo de Oliveira retval = copy_files(clone_flags, p); 2261fb0a685cSDaniel Rebelo de Oliveira if (retval) 22621da177e4SLinus Torvalds goto bad_fork_cleanup_semundo; 2263fb0a685cSDaniel Rebelo de Oliveira retval = copy_fs(clone_flags, p); 2264fb0a685cSDaniel Rebelo de Oliveira if (retval) 22651da177e4SLinus Torvalds goto bad_fork_cleanup_files; 2266fb0a685cSDaniel Rebelo de Oliveira retval = copy_sighand(clone_flags, p); 2267fb0a685cSDaniel Rebelo de Oliveira if (retval) 22681da177e4SLinus Torvalds goto bad_fork_cleanup_fs; 2269fb0a685cSDaniel Rebelo de Oliveira retval = copy_signal(clone_flags, p); 2270fb0a685cSDaniel Rebelo de Oliveira if (retval) 22711da177e4SLinus Torvalds goto bad_fork_cleanup_sighand; 2272fb0a685cSDaniel Rebelo de Oliveira retval = copy_mm(clone_flags, p); 2273fb0a685cSDaniel Rebelo de Oliveira if (retval) 22741da177e4SLinus Torvalds goto bad_fork_cleanup_signal; 2275fb0a685cSDaniel Rebelo de Oliveira retval = copy_namespaces(clone_flags, p); 2276fb0a685cSDaniel Rebelo de Oliveira if (retval) 2277d84f4f99SDavid Howells goto bad_fork_cleanup_mm; 2278fb0a685cSDaniel Rebelo de Oliveira retval = copy_io(clone_flags, p); 2279fb0a685cSDaniel Rebelo de Oliveira if (retval) 2280fd0928dfSJens Axboe goto bad_fork_cleanup_namespaces; 2281c5febea0SEric W. Biederman retval = copy_thread(p, args); 22821da177e4SLinus Torvalds if (retval) 2283fd0928dfSJens Axboe goto bad_fork_cleanup_io; 22841da177e4SLinus Torvalds 2285afaef01cSAlexander Popov stackleak_task_init(p); 2286afaef01cSAlexander Popov 2287425fb2b4SPavel Emelyanov if (pid != &init_struct_pid) { 228849cb2fc4SAdrian Reber pid = alloc_pid(p->nsproxy->pid_ns_for_children, args->set_tid, 228949cb2fc4SAdrian Reber args->set_tid_size); 229035f71bc0SMichal Hocko if (IS_ERR(pid)) { 229135f71bc0SMichal Hocko retval = PTR_ERR(pid); 22920740aa5fSJiri Slaby goto bad_fork_cleanup_thread; 2293425fb2b4SPavel Emelyanov } 229435f71bc0SMichal Hocko } 2295425fb2b4SPavel Emelyanov 2296b3e58382SChristian Brauner /* 2297b3e58382SChristian Brauner * This has to happen after we've potentially unshared the file 2298b3e58382SChristian Brauner * descriptor table (so that the pidfd doesn't leak into the child 2299b3e58382SChristian Brauner * if the fd table isn't shared). 2300b3e58382SChristian Brauner */ 2301b3e58382SChristian Brauner if (clone_flags & CLONE_PIDFD) { 23026fd2fe49SAl Viro retval = get_unused_fd_flags(O_RDWR | O_CLOEXEC); 2303b3e58382SChristian Brauner if (retval < 0) 2304b3e58382SChristian Brauner goto bad_fork_free_pid; 2305b3e58382SChristian Brauner 2306b3e58382SChristian Brauner pidfd = retval; 23076fd2fe49SAl Viro 23086fd2fe49SAl Viro pidfile = anon_inode_getfile("[pidfd]", &pidfd_fops, pid, 23096fd2fe49SAl Viro O_RDWR | O_CLOEXEC); 23106fd2fe49SAl Viro if (IS_ERR(pidfile)) { 23116fd2fe49SAl Viro put_unused_fd(pidfd); 231228dd29c0SChristian Brauner retval = PTR_ERR(pidfile); 23136fd2fe49SAl Viro goto bad_fork_free_pid; 23146fd2fe49SAl Viro } 23156fd2fe49SAl Viro get_pid(pid); /* held by pidfile now */ 23166fd2fe49SAl Viro 23177f192e3cSChristian Brauner retval = put_user(pidfd, args->pidfd); 2318b3e58382SChristian Brauner if (retval) 2319b3e58382SChristian Brauner goto bad_fork_put_pidfd; 2320b3e58382SChristian Brauner } 2321b3e58382SChristian Brauner 232273c10101SJens Axboe #ifdef CONFIG_BLOCK 232373c10101SJens Axboe p->plug = NULL; 232473c10101SJens Axboe #endif 2325ba31c1a4SThomas Gleixner futex_init_task(p); 2326ba31c1a4SThomas Gleixner 23271da177e4SLinus Torvalds /* 2328f9a3879aSGOTO Masanori * sigaltstack should be cleared when sharing the same VM 2329f9a3879aSGOTO Masanori */ 2330f9a3879aSGOTO Masanori if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) 23312a742138SStas Sergeev sas_ss_reset(p); 2332f9a3879aSGOTO Masanori 2333f9a3879aSGOTO Masanori /* 23346580807dSOleg Nesterov * Syscall tracing and stepping should be turned off in the 23356580807dSOleg Nesterov * child regardless of CLONE_PTRACE. 23361da177e4SLinus Torvalds */ 23376580807dSOleg Nesterov user_disable_single_step(p); 233864c19ba2SGabriel Krisman Bertazi clear_task_syscall_work(p, SYSCALL_TRACE); 233964eb35f7SGabriel Krisman Bertazi #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU) 234064eb35f7SGabriel Krisman Bertazi clear_task_syscall_work(p, SYSCALL_EMU); 2341ed75e8d5SLaurent Vivier #endif 2342e02c9b0dSLin Feng clear_tsk_latency_tracing(p); 23431da177e4SLinus Torvalds 23441da177e4SLinus Torvalds /* ok, now we should be set up.. */ 234518c830dfSOleg Nesterov p->pid = pid_nr(pid); 234618c830dfSOleg Nesterov if (clone_flags & CLONE_THREAD) { 234718c830dfSOleg Nesterov p->group_leader = current->group_leader; 234818c830dfSOleg Nesterov p->tgid = current->tgid; 234918c830dfSOleg Nesterov } else { 235018c830dfSOleg Nesterov p->group_leader = p; 235118c830dfSOleg Nesterov p->tgid = p->pid; 235218c830dfSOleg Nesterov } 23535f8aadd8SOleg Nesterov 23549d823e8fSWu Fengguang p->nr_dirtied = 0; 23559d823e8fSWu Fengguang p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); 235683712358SWu Fengguang p->dirty_paused_when = 0; 23579d823e8fSWu Fengguang 2358bb8cbbfeSOleg Nesterov p->pdeath_signal = 0; 235947e65328SOleg Nesterov INIT_LIST_HEAD(&p->thread_group); 2360158e1645SAl Viro p->task_works = NULL; 2361ca7752caSMichael Pratt clear_posix_cputimers_work(p); 23621da177e4SLinus Torvalds 2363d741bf41SPeter Zijlstra #ifdef CONFIG_KRETPROBES 2364d741bf41SPeter Zijlstra p->kretprobe_instances.first = NULL; 2365d741bf41SPeter Zijlstra #endif 236654ecbe6fSMasami Hiramatsu #ifdef CONFIG_RETHOOK 236754ecbe6fSMasami Hiramatsu p->rethooks.first = NULL; 236854ecbe6fSMasami Hiramatsu #endif 2369d741bf41SPeter Zijlstra 237018c830dfSOleg Nesterov /* 23717e47682eSAleksa Sarai * Ensure that the cgroup subsystem policies allow the new process to be 23727b7b8a2cSRandy Dunlap * forked. It should be noted that the new process's css_set can be changed 23737e47682eSAleksa Sarai * between here and cgroup_post_fork() if an organisation operation is in 23747e47682eSAleksa Sarai * progress. 23757e47682eSAleksa Sarai */ 2376ef2c41cfSChristian Brauner retval = cgroup_can_fork(p, args); 23777e47682eSAleksa Sarai if (retval) 23785a5cf5cbSChristian Brauner goto bad_fork_put_pidfd; 23797e47682eSAleksa Sarai 23807e47682eSAleksa Sarai /* 2381b1e82065SPeter Zijlstra * Now that the cgroups are pinned, re-clone the parent cgroup and put 2382b1e82065SPeter Zijlstra * the new task on the correct runqueue. All this *before* the task 2383b1e82065SPeter Zijlstra * becomes visible. 2384b1e82065SPeter Zijlstra * 2385b1e82065SPeter Zijlstra * This isn't part of ->can_fork() because while the re-cloning is 2386b1e82065SPeter Zijlstra * cgroup specific, it unconditionally needs to place the task on a 2387b1e82065SPeter Zijlstra * runqueue. 2388b1e82065SPeter Zijlstra */ 2389b1e82065SPeter Zijlstra sched_cgroup_fork(p, args); 2390b1e82065SPeter Zijlstra 2391b1e82065SPeter Zijlstra /* 23927b558513SDavid Herrmann * From this point on we must avoid any synchronous user-space 23937b558513SDavid Herrmann * communication until we take the tasklist-lock. In particular, we do 23947b558513SDavid Herrmann * not want user-space to be able to predict the process start-time by 23957b558513SDavid Herrmann * stalling fork(2) after we recorded the start_time but before it is 23967b558513SDavid Herrmann * visible to the system. 23977b558513SDavid Herrmann */ 23987b558513SDavid Herrmann 23997b558513SDavid Herrmann p->start_time = ktime_get_ns(); 2400cf25e24dSPeter Zijlstra p->start_boottime = ktime_get_boottime_ns(); 24017b558513SDavid Herrmann 24027b558513SDavid Herrmann /* 240318c830dfSOleg Nesterov * Make it visible to the rest of the system, but dont wake it up yet. 240418c830dfSOleg Nesterov * Need tasklist lock for parent etc handling! 240518c830dfSOleg Nesterov */ 24061da177e4SLinus Torvalds write_lock_irq(&tasklist_lock); 24071da177e4SLinus Torvalds 24081da177e4SLinus Torvalds /* CLONE_PARENT re-uses the old parent */ 24092d5516cbSOleg Nesterov if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { 24101da177e4SLinus Torvalds p->real_parent = current->real_parent; 24112d5516cbSOleg Nesterov p->parent_exec_id = current->parent_exec_id; 2412b4e00444SEddy Wu if (clone_flags & CLONE_THREAD) 2413b4e00444SEddy Wu p->exit_signal = -1; 2414b4e00444SEddy Wu else 2415b4e00444SEddy Wu p->exit_signal = current->group_leader->exit_signal; 24162d5516cbSOleg Nesterov } else { 24171da177e4SLinus Torvalds p->real_parent = current; 24182d5516cbSOleg Nesterov p->parent_exec_id = current->self_exec_id; 2419b4e00444SEddy Wu p->exit_signal = args->exit_signal; 24202d5516cbSOleg Nesterov } 24211da177e4SLinus Torvalds 2422d83a7cb3SJosh Poimboeuf klp_copy_process(p); 2423d83a7cb3SJosh Poimboeuf 242485dd3f61SPeter Zijlstra sched_core_fork(p); 242585dd3f61SPeter Zijlstra 24261da177e4SLinus Torvalds spin_lock(¤t->sighand->siglock); 24274a2c7a78SOleg Nesterov 24284a2c7a78SOleg Nesterov /* 2429dbd95212SKees Cook * Copy seccomp details explicitly here, in case they were changed 2430dbd95212SKees Cook * before holding sighand lock. 2431dbd95212SKees Cook */ 2432dbd95212SKees Cook copy_seccomp(p); 2433dbd95212SKees Cook 243479257534SDaniel Bristot de Oliveira rv_task_fork(p); 243579257534SDaniel Bristot de Oliveira 2436d7822b1eSMathieu Desnoyers rseq_fork(p, clone_flags); 2437d7822b1eSMathieu Desnoyers 24384ca1d3eeSEric W. Biederman /* Don't start children in a dying pid namespace */ 2439e8cfbc24SGargi Sharma if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) { 24403fd37226SKirill Tkhai retval = -ENOMEM; 24413fd37226SKirill Tkhai goto bad_fork_cancel_cgroup; 24423fd37226SKirill Tkhai } 24434a2c7a78SOleg Nesterov 24447673bf55SEric W. Biederman /* Let kill terminate clone/fork in the middle */ 24457673bf55SEric W. Biederman if (fatal_signal_pending(current)) { 24467673bf55SEric W. Biederman retval = -EINTR; 24477673bf55SEric W. Biederman goto bad_fork_cancel_cgroup; 24487673bf55SEric W. Biederman } 24497673bf55SEric W. Biederman 24502c470475SEric W. Biederman init_task_pid_links(p); 245173b9ebfeSOleg Nesterov if (likely(p->pid)) { 24524b9d33e6STejun Heo ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); 24531da177e4SLinus Torvalds 245481907739SOleg Nesterov init_task_pid(p, PIDTYPE_PID, pid); 24551da177e4SLinus Torvalds if (thread_group_leader(p)) { 24566883f81aSEric W. Biederman init_task_pid(p, PIDTYPE_TGID, pid); 245781907739SOleg Nesterov init_task_pid(p, PIDTYPE_PGID, task_pgrp(current)); 245881907739SOleg Nesterov init_task_pid(p, PIDTYPE_SID, task_session(current)); 245981907739SOleg Nesterov 24601c4042c2SEric W. Biederman if (is_child_reaper(pid)) { 246117cf22c3SEric W. Biederman ns_of_pid(pid)->child_reaper = p; 24621c4042c2SEric W. Biederman p->signal->flags |= SIGNAL_UNKILLABLE; 24631c4042c2SEric W. Biederman } 2464c3ad2c3bSEric W. Biederman p->signal->shared_pending.signal = delayed.signal; 24659c9f4dedSAlan Cox p->signal->tty = tty_kref_get(current->signal->tty); 2466749860ceSPavel Tikhomirov /* 2467749860ceSPavel Tikhomirov * Inherit has_child_subreaper flag under the same 2468749860ceSPavel Tikhomirov * tasklist_lock with adding child to the process tree 2469749860ceSPavel Tikhomirov * for propagate_has_child_subreaper optimization. 2470749860ceSPavel Tikhomirov */ 2471749860ceSPavel Tikhomirov p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper || 2472749860ceSPavel Tikhomirov p->real_parent->signal->is_child_subreaper; 24739cd80bbbSOleg Nesterov list_add_tail(&p->sibling, &p->real_parent->children); 24745e85d4abSEric W. Biederman list_add_tail_rcu(&p->tasks, &init_task.tasks); 24756883f81aSEric W. Biederman attach_pid(p, PIDTYPE_TGID); 247681907739SOleg Nesterov attach_pid(p, PIDTYPE_PGID); 247781907739SOleg Nesterov attach_pid(p, PIDTYPE_SID); 2478909ea964SChristoph Lameter __this_cpu_inc(process_counts); 247980628ca0SOleg Nesterov } else { 248080628ca0SOleg Nesterov current->signal->nr_threads++; 248180628ca0SOleg Nesterov atomic_inc(¤t->signal->live); 248260d4de3fSElena Reshetova refcount_inc(¤t->signal->sigcnt); 2483924de3b8SEric W. Biederman task_join_group_stop(p); 248480628ca0SOleg Nesterov list_add_tail_rcu(&p->thread_group, 248580628ca0SOleg Nesterov &p->group_leader->thread_group); 24860c740d0aSOleg Nesterov list_add_tail_rcu(&p->thread_node, 24870c740d0aSOleg Nesterov &p->signal->thread_head); 24881da177e4SLinus Torvalds } 248981907739SOleg Nesterov attach_pid(p, PIDTYPE_PID); 24901da177e4SLinus Torvalds nr_threads++; 249173b9ebfeSOleg Nesterov } 24921da177e4SLinus Torvalds total_forks++; 2493c3ad2c3bSEric W. Biederman hlist_del_init(&delayed.node); 24943f17da69SOleg Nesterov spin_unlock(¤t->sighand->siglock); 24954af4206bSOleg Nesterov syscall_tracepoint_update(p); 24961da177e4SLinus Torvalds write_unlock_irq(&tasklist_lock); 24974af4206bSOleg Nesterov 2498ddc204b5SWaiman Long if (pidfile) 2499ddc204b5SWaiman Long fd_install(pidfd, pidfile); 2500ddc204b5SWaiman Long 2501c13cf856SAndrew Morton proc_fork_connector(p); 2502b1e82065SPeter Zijlstra sched_post_fork(p); 2503ef2c41cfSChristian Brauner cgroup_post_fork(p, args); 2504cdd6c482SIngo Molnar perf_event_fork(p); 250543d2b113SKAMEZAWA Hiroyuki 250643d2b113SKAMEZAWA Hiroyuki trace_task_newtask(p, clone_flags); 25073ab67966SOleg Nesterov uprobe_copy_process(p, clone_flags); 250843d2b113SKAMEZAWA Hiroyuki 250967197a4fSSuren Baghdasaryan copy_oom_score_adj(clone_flags, p); 251067197a4fSSuren Baghdasaryan 25111da177e4SLinus Torvalds return p; 25121da177e4SLinus Torvalds 25137e47682eSAleksa Sarai bad_fork_cancel_cgroup: 251485dd3f61SPeter Zijlstra sched_core_free(p); 25153fd37226SKirill Tkhai spin_unlock(¤t->sighand->siglock); 25163fd37226SKirill Tkhai write_unlock_irq(&tasklist_lock); 2517ef2c41cfSChristian Brauner cgroup_cancel_fork(p, args); 2518b3e58382SChristian Brauner bad_fork_put_pidfd: 25196fd2fe49SAl Viro if (clone_flags & CLONE_PIDFD) { 25206fd2fe49SAl Viro fput(pidfile); 25216fd2fe49SAl Viro put_unused_fd(pidfd); 25226fd2fe49SAl Viro } 2523425fb2b4SPavel Emelyanov bad_fork_free_pid: 2524425fb2b4SPavel Emelyanov if (pid != &init_struct_pid) 2525425fb2b4SPavel Emelyanov free_pid(pid); 25260740aa5fSJiri Slaby bad_fork_cleanup_thread: 25270740aa5fSJiri Slaby exit_thread(p); 2528fd0928dfSJens Axboe bad_fork_cleanup_io: 2529b69f2292SLouis Rilling if (p->io_context) 2530b69f2292SLouis Rilling exit_io_context(p); 2531ab516013SSerge E. Hallyn bad_fork_cleanup_namespaces: 2532444f378bSLinus Torvalds exit_task_namespaces(p); 25331da177e4SLinus Torvalds bad_fork_cleanup_mm: 2534c3f3ce04SAndrea Arcangeli if (p->mm) { 2535c3f3ce04SAndrea Arcangeli mm_clear_owner(p->mm, p); 25361da177e4SLinus Torvalds mmput(p->mm); 2537c3f3ce04SAndrea Arcangeli } 25381da177e4SLinus Torvalds bad_fork_cleanup_signal: 25394ab6c083SOleg Nesterov if (!(clone_flags & CLONE_THREAD)) 25401c5354deSMike Galbraith free_signal_struct(p->signal); 25411da177e4SLinus Torvalds bad_fork_cleanup_sighand: 2542a7e5328aSOleg Nesterov __cleanup_sighand(p->sighand); 25431da177e4SLinus Torvalds bad_fork_cleanup_fs: 25441da177e4SLinus Torvalds exit_fs(p); /* blocking */ 25451da177e4SLinus Torvalds bad_fork_cleanup_files: 25461da177e4SLinus Torvalds exit_files(p); /* blocking */ 25471da177e4SLinus Torvalds bad_fork_cleanup_semundo: 25481da177e4SLinus Torvalds exit_sem(p); 2549e4e55b47STetsuo Handa bad_fork_cleanup_security: 2550e4e55b47STetsuo Handa security_task_free(p); 25511da177e4SLinus Torvalds bad_fork_cleanup_audit: 25521da177e4SLinus Torvalds audit_free(p); 25536c72e350SPeter Zijlstra bad_fork_cleanup_perf: 2554cdd6c482SIngo Molnar perf_event_free_task(p); 25556c72e350SPeter Zijlstra bad_fork_cleanup_policy: 2556b09be676SByungchul Park lockdep_free_task(p); 25571da177e4SLinus Torvalds #ifdef CONFIG_NUMA 2558f0be3d32SLee Schermerhorn mpol_put(p->mempolicy); 25591da177e4SLinus Torvalds #endif 2560ff8288ffSEric W. Biederman bad_fork_cleanup_delayacct: 256135df17c5SShailabh Nagar delayacct_tsk_free(p); 25621da177e4SLinus Torvalds bad_fork_cleanup_count: 256321d1c5e3SAlexey Gladkov dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); 2564e0e81739SDavid Howells exit_creds(p); 25651da177e4SLinus Torvalds bad_fork_free: 25662f064a59SPeter Zijlstra WRITE_ONCE(p->__state, TASK_DEAD); 25671a03d3f1SSebastian Andrzej Siewior exit_task_stack_account(p); 256868f24b08SAndy Lutomirski put_task_stack(p); 2569c3f3ce04SAndrea Arcangeli delayed_free_task(p); 2570fe7d37d1SOleg Nesterov fork_out: 2571c3ad2c3bSEric W. Biederman spin_lock_irq(¤t->sighand->siglock); 2572c3ad2c3bSEric W. Biederman hlist_del_init(&delayed.node); 2573c3ad2c3bSEric W. Biederman spin_unlock_irq(¤t->sighand->siglock); 2574fe7d37d1SOleg Nesterov return ERR_PTR(retval); 25751da177e4SLinus Torvalds } 25761da177e4SLinus Torvalds 25772c470475SEric W. Biederman static inline void init_idle_pids(struct task_struct *idle) 2578f106eee1SOleg Nesterov { 2579f106eee1SOleg Nesterov enum pid_type type; 2580f106eee1SOleg Nesterov 2581f106eee1SOleg Nesterov for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { 25822c470475SEric W. Biederman INIT_HLIST_NODE(&idle->pid_links[type]); /* not really needed */ 25832c470475SEric W. Biederman init_task_pid(idle, type, &init_struct_pid); 2584f106eee1SOleg Nesterov } 2585f106eee1SOleg Nesterov } 2586f106eee1SOleg Nesterov 258736cb0e1cSEric W. Biederman static int idle_dummy(void *dummy) 258836cb0e1cSEric W. Biederman { 258936cb0e1cSEric W. Biederman /* This function is never called */ 259036cb0e1cSEric W. Biederman return 0; 259136cb0e1cSEric W. Biederman } 259236cb0e1cSEric W. Biederman 2593f1a0a376SValentin Schneider struct task_struct * __init fork_idle(int cpu) 25941da177e4SLinus Torvalds { 259536c8b586SIngo Molnar struct task_struct *task; 25967f192e3cSChristian Brauner struct kernel_clone_args args = { 25977f192e3cSChristian Brauner .flags = CLONE_VM, 25985bd2e97cSEric W. Biederman .fn = &idle_dummy, 25995bd2e97cSEric W. Biederman .fn_arg = NULL, 2600343f4c49SEric W. Biederman .kthread = 1, 260136cb0e1cSEric W. Biederman .idle = 1, 26027f192e3cSChristian Brauner }; 26037f192e3cSChristian Brauner 26047f192e3cSChristian Brauner task = copy_process(&init_struct_pid, 0, cpu_to_node(cpu), &args); 2605f106eee1SOleg Nesterov if (!IS_ERR(task)) { 26062c470475SEric W. Biederman init_idle_pids(task); 26071da177e4SLinus Torvalds init_idle(task, cpu); 2608f106eee1SOleg Nesterov } 260973b9ebfeSOleg Nesterov 26101da177e4SLinus Torvalds return task; 26111da177e4SLinus Torvalds } 26121da177e4SLinus Torvalds 261313585fa0SNadav Amit struct mm_struct *copy_init_mm(void) 261413585fa0SNadav Amit { 261513585fa0SNadav Amit return dup_mm(NULL, &init_mm); 261613585fa0SNadav Amit } 261713585fa0SNadav Amit 26181da177e4SLinus Torvalds /* 2619cc440e87SJens Axboe * This is like kernel_clone(), but shaved down and tailored to just 2620cc440e87SJens Axboe * creating io_uring workers. It returns a created task, or an error pointer. 2621cc440e87SJens Axboe * The returned task is inactive, and the caller must fire it up through 2622cc440e87SJens Axboe * wake_up_new_task(p). All signals are blocked in the created task. 2623cc440e87SJens Axboe */ 2624cc440e87SJens Axboe struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node) 2625cc440e87SJens Axboe { 2626cc440e87SJens Axboe unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD| 2627cc440e87SJens Axboe CLONE_IO; 2628cc440e87SJens Axboe struct kernel_clone_args args = { 2629cc440e87SJens Axboe .flags = ((lower_32_bits(flags) | CLONE_VM | 2630cc440e87SJens Axboe CLONE_UNTRACED) & ~CSIGNAL), 2631cc440e87SJens Axboe .exit_signal = (lower_32_bits(flags) & CSIGNAL), 26325bd2e97cSEric W. Biederman .fn = fn, 26335bd2e97cSEric W. Biederman .fn_arg = arg, 2634cc440e87SJens Axboe .io_thread = 1, 2635cc440e87SJens Axboe }; 2636cc440e87SJens Axboe 2637b16b3855SJens Axboe return copy_process(NULL, 0, node, &args); 2638cc440e87SJens Axboe } 2639cc440e87SJens Axboe 2640cc440e87SJens Axboe /* 26411da177e4SLinus Torvalds * Ok, this is the main fork-routine. 26421da177e4SLinus Torvalds * 26431da177e4SLinus Torvalds * It copies the process, and if successful kick-starts 26441da177e4SLinus Torvalds * it and waits for it to finish using the VM if required. 2645a0eb9abdSEugene Syromiatnikov * 2646a0eb9abdSEugene Syromiatnikov * args->exit_signal is expected to be checked for sanity by the caller. 26471da177e4SLinus Torvalds */ 2648cad6967aSChristian Brauner pid_t kernel_clone(struct kernel_clone_args *args) 26491da177e4SLinus Torvalds { 26507f192e3cSChristian Brauner u64 clone_flags = args->flags; 26519f5325aaSMarcos Paulo de Souza struct completion vfork; 26529f5325aaSMarcos Paulo de Souza struct pid *pid; 26531da177e4SLinus Torvalds struct task_struct *p; 26541da177e4SLinus Torvalds int trace = 0; 2655cad6967aSChristian Brauner pid_t nr; 26561da177e4SLinus Torvalds 2657bdff746aSAndrew Morton /* 26583af8588cSChristian Brauner * For legacy clone() calls, CLONE_PIDFD uses the parent_tid argument 26593af8588cSChristian Brauner * to return the pidfd. Hence, CLONE_PIDFD and CLONE_PARENT_SETTID are 26603af8588cSChristian Brauner * mutually exclusive. With clone3() CLONE_PIDFD has grown a separate 26613af8588cSChristian Brauner * field in struct clone_args and it still doesn't make sense to have 26623af8588cSChristian Brauner * them both point at the same memory location. Performing this check 26633af8588cSChristian Brauner * here has the advantage that we don't need to have a separate helper 26643af8588cSChristian Brauner * to check for legacy clone(). 26653af8588cSChristian Brauner */ 26663af8588cSChristian Brauner if ((args->flags & CLONE_PIDFD) && 26673af8588cSChristian Brauner (args->flags & CLONE_PARENT_SETTID) && 26683af8588cSChristian Brauner (args->pidfd == args->parent_tid)) 26693af8588cSChristian Brauner return -EINVAL; 26703af8588cSChristian Brauner 26713af8588cSChristian Brauner /* 26724b9d33e6STejun Heo * Determine whether and which event to report to ptracer. When 26734b9d33e6STejun Heo * called from kernel_thread or CLONE_UNTRACED is explicitly 26744b9d33e6STejun Heo * requested, no event is reported; otherwise, report if the event 26754b9d33e6STejun Heo * for the type of forking is enabled. 267609a05394SRoland McGrath */ 2677e80d6661SAl Viro if (!(clone_flags & CLONE_UNTRACED)) { 26784b9d33e6STejun Heo if (clone_flags & CLONE_VFORK) 26794b9d33e6STejun Heo trace = PTRACE_EVENT_VFORK; 26807f192e3cSChristian Brauner else if (args->exit_signal != SIGCHLD) 26814b9d33e6STejun Heo trace = PTRACE_EVENT_CLONE; 26824b9d33e6STejun Heo else 26834b9d33e6STejun Heo trace = PTRACE_EVENT_FORK; 26844b9d33e6STejun Heo 26854b9d33e6STejun Heo if (likely(!ptrace_event_enabled(current, trace))) 26864b9d33e6STejun Heo trace = 0; 26874b9d33e6STejun Heo } 26881da177e4SLinus Torvalds 26897f192e3cSChristian Brauner p = copy_process(NULL, trace, NUMA_NO_NODE, args); 269038addce8SEmese Revfy add_latent_entropy(); 26919f5325aaSMarcos Paulo de Souza 26929f5325aaSMarcos Paulo de Souza if (IS_ERR(p)) 26939f5325aaSMarcos Paulo de Souza return PTR_ERR(p); 26949f5325aaSMarcos Paulo de Souza 26951da177e4SLinus Torvalds /* 26961da177e4SLinus Torvalds * Do this prior waking up the new thread - the thread pointer 26971da177e4SLinus Torvalds * might get invalid after that point, if the thread exits quickly. 26981da177e4SLinus Torvalds */ 26990a16b607SMathieu Desnoyers trace_sched_process_fork(current, p); 27000a16b607SMathieu Desnoyers 27014e52365fSMatthew Dempsky pid = get_task_pid(p, PIDTYPE_PID); 27024e52365fSMatthew Dempsky nr = pid_vnr(pid); 270330e49c26SPavel Emelyanov 270430e49c26SPavel Emelyanov if (clone_flags & CLONE_PARENT_SETTID) 27057f192e3cSChristian Brauner put_user(nr, args->parent_tid); 2706a6f5e063SSukadev Bhattiprolu 27071da177e4SLinus Torvalds if (clone_flags & CLONE_VFORK) { 27081da177e4SLinus Torvalds p->vfork_done = &vfork; 27091da177e4SLinus Torvalds init_completion(&vfork); 2710d68b46feSOleg Nesterov get_task_struct(p); 27111da177e4SLinus Torvalds } 27121da177e4SLinus Torvalds 2713bd74fdaeSYu Zhao if (IS_ENABLED(CONFIG_LRU_GEN) && !(clone_flags & CLONE_VM)) { 2714bd74fdaeSYu Zhao /* lock the task to synchronize with memcg migration */ 2715bd74fdaeSYu Zhao task_lock(p); 2716bd74fdaeSYu Zhao lru_gen_add_mm(p->mm); 2717bd74fdaeSYu Zhao task_unlock(p); 2718bd74fdaeSYu Zhao } 2719bd74fdaeSYu Zhao 27203e51e3edSSamir Bellabes wake_up_new_task(p); 27211da177e4SLinus Torvalds 27224b9d33e6STejun Heo /* forking complete and child started to run, tell ptracer */ 27234b9d33e6STejun Heo if (unlikely(trace)) 27244e52365fSMatthew Dempsky ptrace_event_pid(trace, pid); 272509a05394SRoland McGrath 27261da177e4SLinus Torvalds if (clone_flags & CLONE_VFORK) { 2727d68b46feSOleg Nesterov if (!wait_for_vfork_done(p, &vfork)) 27284e52365fSMatthew Dempsky ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); 27299f59ce5dSChuck Ebbert } 27304e52365fSMatthew Dempsky 27314e52365fSMatthew Dempsky put_pid(pid); 273292476d7fSEric W. Biederman return nr; 27331da177e4SLinus Torvalds } 27341da177e4SLinus Torvalds 27352aa3a7f8SAl Viro /* 27362aa3a7f8SAl Viro * Create a kernel thread. 27372aa3a7f8SAl Viro */ 27382aa3a7f8SAl Viro pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 27392aa3a7f8SAl Viro { 27407f192e3cSChristian Brauner struct kernel_clone_args args = { 27413f2c788aSChristian Brauner .flags = ((lower_32_bits(flags) | CLONE_VM | 27423f2c788aSChristian Brauner CLONE_UNTRACED) & ~CSIGNAL), 27433f2c788aSChristian Brauner .exit_signal = (lower_32_bits(flags) & CSIGNAL), 27445bd2e97cSEric W. Biederman .fn = fn, 27455bd2e97cSEric W. Biederman .fn_arg = arg, 2746343f4c49SEric W. Biederman .kthread = 1, 2747343f4c49SEric W. Biederman }; 2748343f4c49SEric W. Biederman 2749343f4c49SEric W. Biederman return kernel_clone(&args); 2750343f4c49SEric W. Biederman } 2751343f4c49SEric W. Biederman 2752343f4c49SEric W. Biederman /* 2753343f4c49SEric W. Biederman * Create a user mode thread. 2754343f4c49SEric W. Biederman */ 2755343f4c49SEric W. Biederman pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags) 2756343f4c49SEric W. Biederman { 2757343f4c49SEric W. Biederman struct kernel_clone_args args = { 2758343f4c49SEric W. Biederman .flags = ((lower_32_bits(flags) | CLONE_VM | 2759343f4c49SEric W. Biederman CLONE_UNTRACED) & ~CSIGNAL), 2760343f4c49SEric W. Biederman .exit_signal = (lower_32_bits(flags) & CSIGNAL), 27615bd2e97cSEric W. Biederman .fn = fn, 27625bd2e97cSEric W. Biederman .fn_arg = arg, 27637f192e3cSChristian Brauner }; 27647f192e3cSChristian Brauner 2765cad6967aSChristian Brauner return kernel_clone(&args); 27662aa3a7f8SAl Viro } 27672aa3a7f8SAl Viro 2768d2125043SAl Viro #ifdef __ARCH_WANT_SYS_FORK 2769d2125043SAl Viro SYSCALL_DEFINE0(fork) 2770d2125043SAl Viro { 2771d2125043SAl Viro #ifdef CONFIG_MMU 27727f192e3cSChristian Brauner struct kernel_clone_args args = { 27737f192e3cSChristian Brauner .exit_signal = SIGCHLD, 27747f192e3cSChristian Brauner }; 27757f192e3cSChristian Brauner 2776cad6967aSChristian Brauner return kernel_clone(&args); 2777d2125043SAl Viro #else 2778d2125043SAl Viro /* can not support in nommu mode */ 27795d59e182SDaeseok Youn return -EINVAL; 2780d2125043SAl Viro #endif 2781d2125043SAl Viro } 2782d2125043SAl Viro #endif 2783d2125043SAl Viro 2784d2125043SAl Viro #ifdef __ARCH_WANT_SYS_VFORK 2785d2125043SAl Viro SYSCALL_DEFINE0(vfork) 2786d2125043SAl Viro { 27877f192e3cSChristian Brauner struct kernel_clone_args args = { 27887f192e3cSChristian Brauner .flags = CLONE_VFORK | CLONE_VM, 27897f192e3cSChristian Brauner .exit_signal = SIGCHLD, 27907f192e3cSChristian Brauner }; 27917f192e3cSChristian Brauner 2792cad6967aSChristian Brauner return kernel_clone(&args); 2793d2125043SAl Viro } 2794d2125043SAl Viro #endif 2795d2125043SAl Viro 2796d2125043SAl Viro #ifdef __ARCH_WANT_SYS_CLONE 2797d2125043SAl Viro #ifdef CONFIG_CLONE_BACKWARDS 2798d2125043SAl Viro SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 2799d2125043SAl Viro int __user *, parent_tidptr, 28003033f14aSJosh Triplett unsigned long, tls, 2801d2125043SAl Viro int __user *, child_tidptr) 2802d2125043SAl Viro #elif defined(CONFIG_CLONE_BACKWARDS2) 2803d2125043SAl Viro SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, 2804d2125043SAl Viro int __user *, parent_tidptr, 2805d2125043SAl Viro int __user *, child_tidptr, 28063033f14aSJosh Triplett unsigned long, tls) 2807dfa9771aSMichal Simek #elif defined(CONFIG_CLONE_BACKWARDS3) 2808dfa9771aSMichal Simek SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, 2809dfa9771aSMichal Simek int, stack_size, 2810dfa9771aSMichal Simek int __user *, parent_tidptr, 2811dfa9771aSMichal Simek int __user *, child_tidptr, 28123033f14aSJosh Triplett unsigned long, tls) 2813d2125043SAl Viro #else 2814d2125043SAl Viro SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 2815d2125043SAl Viro int __user *, parent_tidptr, 2816d2125043SAl Viro int __user *, child_tidptr, 28173033f14aSJosh Triplett unsigned long, tls) 2818d2125043SAl Viro #endif 2819d2125043SAl Viro { 28207f192e3cSChristian Brauner struct kernel_clone_args args = { 28213f2c788aSChristian Brauner .flags = (lower_32_bits(clone_flags) & ~CSIGNAL), 28227f192e3cSChristian Brauner .pidfd = parent_tidptr, 28237f192e3cSChristian Brauner .child_tid = child_tidptr, 28247f192e3cSChristian Brauner .parent_tid = parent_tidptr, 28253f2c788aSChristian Brauner .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL), 28267f192e3cSChristian Brauner .stack = newsp, 28277f192e3cSChristian Brauner .tls = tls, 28287f192e3cSChristian Brauner }; 28297f192e3cSChristian Brauner 2830cad6967aSChristian Brauner return kernel_clone(&args); 28317f192e3cSChristian Brauner } 2832d68dbb0cSChristian Brauner #endif 28337f192e3cSChristian Brauner 2834d68dbb0cSChristian Brauner #ifdef __ARCH_WANT_SYS_CLONE3 2835dd499f7aSAmanieu d'Antras 28367f192e3cSChristian Brauner noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs, 28377f192e3cSChristian Brauner struct clone_args __user *uargs, 2838f14c234bSAleksa Sarai size_t usize) 28397f192e3cSChristian Brauner { 2840f14c234bSAleksa Sarai int err; 28417f192e3cSChristian Brauner struct clone_args args; 284249cb2fc4SAdrian Reber pid_t *kset_tid = kargs->set_tid; 28437f192e3cSChristian Brauner 2844a966dcfeSEugene Syromiatnikov BUILD_BUG_ON(offsetofend(struct clone_args, tls) != 2845a966dcfeSEugene Syromiatnikov CLONE_ARGS_SIZE_VER0); 2846a966dcfeSEugene Syromiatnikov BUILD_BUG_ON(offsetofend(struct clone_args, set_tid_size) != 2847a966dcfeSEugene Syromiatnikov CLONE_ARGS_SIZE_VER1); 2848a966dcfeSEugene Syromiatnikov BUILD_BUG_ON(offsetofend(struct clone_args, cgroup) != 2849a966dcfeSEugene Syromiatnikov CLONE_ARGS_SIZE_VER2); 2850a966dcfeSEugene Syromiatnikov BUILD_BUG_ON(sizeof(struct clone_args) != CLONE_ARGS_SIZE_VER2); 2851a966dcfeSEugene Syromiatnikov 2852f14c234bSAleksa Sarai if (unlikely(usize > PAGE_SIZE)) 28537f192e3cSChristian Brauner return -E2BIG; 2854f14c234bSAleksa Sarai if (unlikely(usize < CLONE_ARGS_SIZE_VER0)) 28557f192e3cSChristian Brauner return -EINVAL; 28567f192e3cSChristian Brauner 2857f14c234bSAleksa Sarai err = copy_struct_from_user(&args, sizeof(args), uargs, usize); 2858f14c234bSAleksa Sarai if (err) 2859f14c234bSAleksa Sarai return err; 28607f192e3cSChristian Brauner 286149cb2fc4SAdrian Reber if (unlikely(args.set_tid_size > MAX_PID_NS_LEVEL)) 286249cb2fc4SAdrian Reber return -EINVAL; 286349cb2fc4SAdrian Reber 286449cb2fc4SAdrian Reber if (unlikely(!args.set_tid && args.set_tid_size > 0)) 286549cb2fc4SAdrian Reber return -EINVAL; 286649cb2fc4SAdrian Reber 286749cb2fc4SAdrian Reber if (unlikely(args.set_tid && args.set_tid_size == 0)) 286849cb2fc4SAdrian Reber return -EINVAL; 286949cb2fc4SAdrian Reber 2870a0eb9abdSEugene Syromiatnikov /* 2871a0eb9abdSEugene Syromiatnikov * Verify that higher 32bits of exit_signal are unset and that 2872a0eb9abdSEugene Syromiatnikov * it is a valid signal 2873a0eb9abdSEugene Syromiatnikov */ 2874a0eb9abdSEugene Syromiatnikov if (unlikely((args.exit_signal & ~((u64)CSIGNAL)) || 2875a0eb9abdSEugene Syromiatnikov !valid_signal(args.exit_signal))) 2876a0eb9abdSEugene Syromiatnikov return -EINVAL; 2877a0eb9abdSEugene Syromiatnikov 287862173872SEugene Syromiatnikov if ((args.flags & CLONE_INTO_CGROUP) && 287962173872SEugene Syromiatnikov (args.cgroup > INT_MAX || usize < CLONE_ARGS_SIZE_VER2)) 2880ef2c41cfSChristian Brauner return -EINVAL; 2881ef2c41cfSChristian Brauner 28827f192e3cSChristian Brauner *kargs = (struct kernel_clone_args){ 28837f192e3cSChristian Brauner .flags = args.flags, 28847f192e3cSChristian Brauner .pidfd = u64_to_user_ptr(args.pidfd), 28857f192e3cSChristian Brauner .child_tid = u64_to_user_ptr(args.child_tid), 28867f192e3cSChristian Brauner .parent_tid = u64_to_user_ptr(args.parent_tid), 28877f192e3cSChristian Brauner .exit_signal = args.exit_signal, 28887f192e3cSChristian Brauner .stack = args.stack, 28897f192e3cSChristian Brauner .stack_size = args.stack_size, 28907f192e3cSChristian Brauner .tls = args.tls, 289149cb2fc4SAdrian Reber .set_tid_size = args.set_tid_size, 2892ef2c41cfSChristian Brauner .cgroup = args.cgroup, 28937f192e3cSChristian Brauner }; 28947f192e3cSChristian Brauner 289549cb2fc4SAdrian Reber if (args.set_tid && 289649cb2fc4SAdrian Reber copy_from_user(kset_tid, u64_to_user_ptr(args.set_tid), 289749cb2fc4SAdrian Reber (kargs->set_tid_size * sizeof(pid_t)))) 289849cb2fc4SAdrian Reber return -EFAULT; 289949cb2fc4SAdrian Reber 290049cb2fc4SAdrian Reber kargs->set_tid = kset_tid; 290149cb2fc4SAdrian Reber 29027f192e3cSChristian Brauner return 0; 29037f192e3cSChristian Brauner } 29047f192e3cSChristian Brauner 2905fa729c4dSChristian Brauner /** 2906fa729c4dSChristian Brauner * clone3_stack_valid - check and prepare stack 2907fa729c4dSChristian Brauner * @kargs: kernel clone args 2908fa729c4dSChristian Brauner * 2909fa729c4dSChristian Brauner * Verify that the stack arguments userspace gave us are sane. 2910fa729c4dSChristian Brauner * In addition, set the stack direction for userspace since it's easy for us to 2911fa729c4dSChristian Brauner * determine. 2912fa729c4dSChristian Brauner */ 2913fa729c4dSChristian Brauner static inline bool clone3_stack_valid(struct kernel_clone_args *kargs) 2914fa729c4dSChristian Brauner { 2915fa729c4dSChristian Brauner if (kargs->stack == 0) { 2916fa729c4dSChristian Brauner if (kargs->stack_size > 0) 2917fa729c4dSChristian Brauner return false; 2918fa729c4dSChristian Brauner } else { 2919fa729c4dSChristian Brauner if (kargs->stack_size == 0) 2920fa729c4dSChristian Brauner return false; 2921fa729c4dSChristian Brauner 2922fa729c4dSChristian Brauner if (!access_ok((void __user *)kargs->stack, kargs->stack_size)) 2923fa729c4dSChristian Brauner return false; 2924fa729c4dSChristian Brauner 2925fa729c4dSChristian Brauner #if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_IA64) 2926fa729c4dSChristian Brauner kargs->stack += kargs->stack_size; 2927fa729c4dSChristian Brauner #endif 2928fa729c4dSChristian Brauner } 2929fa729c4dSChristian Brauner 2930fa729c4dSChristian Brauner return true; 2931fa729c4dSChristian Brauner } 2932fa729c4dSChristian Brauner 2933fa729c4dSChristian Brauner static bool clone3_args_valid(struct kernel_clone_args *kargs) 29347f192e3cSChristian Brauner { 2935b612e5dfSChristian Brauner /* Verify that no unknown flags are passed along. */ 2936ef2c41cfSChristian Brauner if (kargs->flags & 2937ef2c41cfSChristian Brauner ~(CLONE_LEGACY_FLAGS | CLONE_CLEAR_SIGHAND | CLONE_INTO_CGROUP)) 29387f192e3cSChristian Brauner return false; 29397f192e3cSChristian Brauner 29407f192e3cSChristian Brauner /* 2941a8ca6b13SXiaofeng Cao * - make the CLONE_DETACHED bit reusable for clone3 2942a8ca6b13SXiaofeng Cao * - make the CSIGNAL bits reusable for clone3 29437f192e3cSChristian Brauner */ 29447f192e3cSChristian Brauner if (kargs->flags & (CLONE_DETACHED | CSIGNAL)) 29457f192e3cSChristian Brauner return false; 29467f192e3cSChristian Brauner 2947b612e5dfSChristian Brauner if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) == 2948b612e5dfSChristian Brauner (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) 2949b612e5dfSChristian Brauner return false; 2950b612e5dfSChristian Brauner 29517f192e3cSChristian Brauner if ((kargs->flags & (CLONE_THREAD | CLONE_PARENT)) && 29527f192e3cSChristian Brauner kargs->exit_signal) 29537f192e3cSChristian Brauner return false; 29547f192e3cSChristian Brauner 2955fa729c4dSChristian Brauner if (!clone3_stack_valid(kargs)) 2956fa729c4dSChristian Brauner return false; 2957fa729c4dSChristian Brauner 29587f192e3cSChristian Brauner return true; 29597f192e3cSChristian Brauner } 29607f192e3cSChristian Brauner 2961501bd016SChristian Brauner /** 2962501bd016SChristian Brauner * clone3 - create a new process with specific properties 2963501bd016SChristian Brauner * @uargs: argument structure 2964501bd016SChristian Brauner * @size: size of @uargs 2965501bd016SChristian Brauner * 2966501bd016SChristian Brauner * clone3() is the extensible successor to clone()/clone2(). 2967501bd016SChristian Brauner * It takes a struct as argument that is versioned by its size. 2968501bd016SChristian Brauner * 2969501bd016SChristian Brauner * Return: On success, a positive PID for the child process. 2970501bd016SChristian Brauner * On error, a negative errno number. 2971501bd016SChristian Brauner */ 29727f192e3cSChristian Brauner SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size) 29737f192e3cSChristian Brauner { 29747f192e3cSChristian Brauner int err; 29757f192e3cSChristian Brauner 29767f192e3cSChristian Brauner struct kernel_clone_args kargs; 297749cb2fc4SAdrian Reber pid_t set_tid[MAX_PID_NS_LEVEL]; 297849cb2fc4SAdrian Reber 297949cb2fc4SAdrian Reber kargs.set_tid = set_tid; 29807f192e3cSChristian Brauner 29817f192e3cSChristian Brauner err = copy_clone_args_from_user(&kargs, uargs, size); 29827f192e3cSChristian Brauner if (err) 29837f192e3cSChristian Brauner return err; 29847f192e3cSChristian Brauner 29857f192e3cSChristian Brauner if (!clone3_args_valid(&kargs)) 29867f192e3cSChristian Brauner return -EINVAL; 29877f192e3cSChristian Brauner 2988cad6967aSChristian Brauner return kernel_clone(&kargs); 2989d2125043SAl Viro } 2990d2125043SAl Viro #endif 2991d2125043SAl Viro 29920f1b92cbSOleg Nesterov void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data) 29930f1b92cbSOleg Nesterov { 29940f1b92cbSOleg Nesterov struct task_struct *leader, *parent, *child; 29950f1b92cbSOleg Nesterov int res; 29960f1b92cbSOleg Nesterov 29970f1b92cbSOleg Nesterov read_lock(&tasklist_lock); 29980f1b92cbSOleg Nesterov leader = top = top->group_leader; 29990f1b92cbSOleg Nesterov down: 30000f1b92cbSOleg Nesterov for_each_thread(leader, parent) { 30010f1b92cbSOleg Nesterov list_for_each_entry(child, &parent->children, sibling) { 30020f1b92cbSOleg Nesterov res = visitor(child, data); 30030f1b92cbSOleg Nesterov if (res) { 30040f1b92cbSOleg Nesterov if (res < 0) 30050f1b92cbSOleg Nesterov goto out; 30060f1b92cbSOleg Nesterov leader = child; 30070f1b92cbSOleg Nesterov goto down; 30080f1b92cbSOleg Nesterov } 30090f1b92cbSOleg Nesterov up: 30100f1b92cbSOleg Nesterov ; 30110f1b92cbSOleg Nesterov } 30120f1b92cbSOleg Nesterov } 30130f1b92cbSOleg Nesterov 30140f1b92cbSOleg Nesterov if (leader != top) { 30150f1b92cbSOleg Nesterov child = leader; 30160f1b92cbSOleg Nesterov parent = child->real_parent; 30170f1b92cbSOleg Nesterov leader = parent->group_leader; 30180f1b92cbSOleg Nesterov goto up; 30190f1b92cbSOleg Nesterov } 30200f1b92cbSOleg Nesterov out: 30210f1b92cbSOleg Nesterov read_unlock(&tasklist_lock); 30220f1b92cbSOleg Nesterov } 30230f1b92cbSOleg Nesterov 30245fd63b30SRavikiran G Thirumalai #ifndef ARCH_MIN_MMSTRUCT_ALIGN 30255fd63b30SRavikiran G Thirumalai #define ARCH_MIN_MMSTRUCT_ALIGN 0 30265fd63b30SRavikiran G Thirumalai #endif 30275fd63b30SRavikiran G Thirumalai 302851cc5068SAlexey Dobriyan static void sighand_ctor(void *data) 3029aa1757f9SOleg Nesterov { 3030aa1757f9SOleg Nesterov struct sighand_struct *sighand = data; 3031aa1757f9SOleg Nesterov 3032aa1757f9SOleg Nesterov spin_lock_init(&sighand->siglock); 3033b8fceee1SDavide Libenzi init_waitqueue_head(&sighand->signalfd_wqh); 3034fba2afaaSDavide Libenzi } 3035aa1757f9SOleg Nesterov 30361da177e4SLinus Torvalds void __init proc_caches_init(void) 30371da177e4SLinus Torvalds { 3038c1a2f7f0SRik van Riel unsigned int mm_size; 3039c1a2f7f0SRik van Riel 30401da177e4SLinus Torvalds sighand_cachep = kmem_cache_create("sighand_cache", 30411da177e4SLinus Torvalds sizeof(struct sighand_struct), 0, 30425f0d5a3aSPaul E. McKenney SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| 304375f296d9SLevin, Alexander (Sasha Levin) SLAB_ACCOUNT, sighand_ctor); 30441da177e4SLinus Torvalds signal_cachep = kmem_cache_create("signal_cache", 30451da177e4SLinus Torvalds sizeof(struct signal_struct), 0, 304675f296d9SLevin, Alexander (Sasha Levin) SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 30475d097056SVladimir Davydov NULL); 30481da177e4SLinus Torvalds files_cachep = kmem_cache_create("files_cache", 30491da177e4SLinus Torvalds sizeof(struct files_struct), 0, 305075f296d9SLevin, Alexander (Sasha Levin) SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 30515d097056SVladimir Davydov NULL); 30521da177e4SLinus Torvalds fs_cachep = kmem_cache_create("fs_cache", 30531da177e4SLinus Torvalds sizeof(struct fs_struct), 0, 305475f296d9SLevin, Alexander (Sasha Levin) SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 30555d097056SVladimir Davydov NULL); 3056c1a2f7f0SRik van Riel 30576345d24dSLinus Torvalds /* 3058c1a2f7f0SRik van Riel * The mm_cpumask is located at the end of mm_struct, and is 3059c1a2f7f0SRik van Riel * dynamically sized based on the maximum CPU number this system 3060c1a2f7f0SRik van Riel * can have, taking hotplug into account (nr_cpu_ids). 30616345d24dSLinus Torvalds */ 3062c1a2f7f0SRik van Riel mm_size = sizeof(struct mm_struct) + cpumask_size(); 3063c1a2f7f0SRik van Riel 306407dcd7feSDavid Windsor mm_cachep = kmem_cache_create_usercopy("mm_struct", 3065c1a2f7f0SRik van Riel mm_size, ARCH_MIN_MMSTRUCT_ALIGN, 306675f296d9SLevin, Alexander (Sasha Levin) SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 306707dcd7feSDavid Windsor offsetof(struct mm_struct, saved_auxv), 306807dcd7feSDavid Windsor sizeof_field(struct mm_struct, saved_auxv), 30695d097056SVladimir Davydov NULL); 30705d097056SVladimir Davydov vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); 30718feae131SDavid Howells mmap_init(); 307266577193SAl Viro nsproxy_cache_init(); 30731da177e4SLinus Torvalds } 3074cf2e340fSJANAK DESAI 3075cf2e340fSJANAK DESAI /* 30769bfb23fcSOleg Nesterov * Check constraints on flags passed to the unshare system call. 3077cf2e340fSJANAK DESAI */ 30789bfb23fcSOleg Nesterov static int check_unshare_flags(unsigned long unshare_flags) 3079cf2e340fSJANAK DESAI { 30809bfb23fcSOleg Nesterov if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| 30819bfb23fcSOleg Nesterov CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| 308250804fe3SEric W. Biederman CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET| 3083769071acSAndrei Vagin CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP| 3084769071acSAndrei Vagin CLONE_NEWTIME)) 3085cf2e340fSJANAK DESAI return -EINVAL; 30869bfb23fcSOleg Nesterov /* 308712c641abSEric W. Biederman * Not implemented, but pretend it works if there is nothing 308812c641abSEric W. Biederman * to unshare. Note that unsharing the address space or the 308912c641abSEric W. Biederman * signal handlers also need to unshare the signal queues (aka 309012c641abSEric W. Biederman * CLONE_THREAD). 30919bfb23fcSOleg Nesterov */ 30929bfb23fcSOleg Nesterov if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { 309312c641abSEric W. Biederman if (!thread_group_empty(current)) 309412c641abSEric W. Biederman return -EINVAL; 309512c641abSEric W. Biederman } 309612c641abSEric W. Biederman if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) { 3097d036bda7SElena Reshetova if (refcount_read(¤t->sighand->count) > 1) 309812c641abSEric W. Biederman return -EINVAL; 309912c641abSEric W. Biederman } 310012c641abSEric W. Biederman if (unshare_flags & CLONE_VM) { 310112c641abSEric W. Biederman if (!current_is_single_threaded()) 31029bfb23fcSOleg Nesterov return -EINVAL; 31039bfb23fcSOleg Nesterov } 3104cf2e340fSJANAK DESAI 3105cf2e340fSJANAK DESAI return 0; 3106cf2e340fSJANAK DESAI } 3107cf2e340fSJANAK DESAI 3108cf2e340fSJANAK DESAI /* 310999d1419dSJANAK DESAI * Unshare the filesystem structure if it is being shared 3110cf2e340fSJANAK DESAI */ 3111cf2e340fSJANAK DESAI static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) 3112cf2e340fSJANAK DESAI { 3113cf2e340fSJANAK DESAI struct fs_struct *fs = current->fs; 3114cf2e340fSJANAK DESAI 3115498052bbSAl Viro if (!(unshare_flags & CLONE_FS) || !fs) 3116498052bbSAl Viro return 0; 3117498052bbSAl Viro 3118498052bbSAl Viro /* don't need lock here; in the worst case we'll do useless copy */ 3119498052bbSAl Viro if (fs->users == 1) 3120498052bbSAl Viro return 0; 3121498052bbSAl Viro 3122498052bbSAl Viro *new_fsp = copy_fs_struct(fs); 312399d1419dSJANAK DESAI if (!*new_fsp) 312499d1419dSJANAK DESAI return -ENOMEM; 3125cf2e340fSJANAK DESAI 3126cf2e340fSJANAK DESAI return 0; 3127cf2e340fSJANAK DESAI } 3128cf2e340fSJANAK DESAI 3129cf2e340fSJANAK DESAI /* 3130a016f338SJANAK DESAI * Unshare file descriptor table if it is being shared 3131cf2e340fSJANAK DESAI */ 313260997c3dSChristian Brauner int unshare_fd(unsigned long unshare_flags, unsigned int max_fds, 313360997c3dSChristian Brauner struct files_struct **new_fdp) 3134cf2e340fSJANAK DESAI { 3135cf2e340fSJANAK DESAI struct files_struct *fd = current->files; 3136a016f338SJANAK DESAI int error = 0; 3137cf2e340fSJANAK DESAI 3138cf2e340fSJANAK DESAI if ((unshare_flags & CLONE_FILES) && 3139a016f338SJANAK DESAI (fd && atomic_read(&fd->count) > 1)) { 314060997c3dSChristian Brauner *new_fdp = dup_fd(fd, max_fds, &error); 3141a016f338SJANAK DESAI if (!*new_fdp) 3142a016f338SJANAK DESAI return error; 3143a016f338SJANAK DESAI } 3144cf2e340fSJANAK DESAI 3145cf2e340fSJANAK DESAI return 0; 3146cf2e340fSJANAK DESAI } 3147cf2e340fSJANAK DESAI 3148cf2e340fSJANAK DESAI /* 3149cf2e340fSJANAK DESAI * unshare allows a process to 'unshare' part of the process 3150cf2e340fSJANAK DESAI * context which was originally shared using clone. copy_* 3151cad6967aSChristian Brauner * functions used by kernel_clone() cannot be used here directly 3152cf2e340fSJANAK DESAI * because they modify an inactive task_struct that is being 3153cf2e340fSJANAK DESAI * constructed. Here we are modifying the current, active, 3154cf2e340fSJANAK DESAI * task_struct. 3155cf2e340fSJANAK DESAI */ 31569b32105eSDominik Brodowski int ksys_unshare(unsigned long unshare_flags) 3157cf2e340fSJANAK DESAI { 3158cf2e340fSJANAK DESAI struct fs_struct *fs, *new_fs = NULL; 3159ba1f70ddSRan Xiaokai struct files_struct *new_fd = NULL; 3160b2e0d987SEric W. Biederman struct cred *new_cred = NULL; 3161cf7b708cSPavel Emelyanov struct nsproxy *new_nsproxy = NULL; 31629edff4abSManfred Spraul int do_sysvsem = 0; 31639bfb23fcSOleg Nesterov int err; 3164cf2e340fSJANAK DESAI 316550804fe3SEric W. Biederman /* 3166faf00da5SEric W. Biederman * If unsharing a user namespace must also unshare the thread group 3167faf00da5SEric W. Biederman * and unshare the filesystem root and working directories. 3168b2e0d987SEric W. Biederman */ 3169b2e0d987SEric W. Biederman if (unshare_flags & CLONE_NEWUSER) 3170e66eded8SEric W. Biederman unshare_flags |= CLONE_THREAD | CLONE_FS; 3171b2e0d987SEric W. Biederman /* 317250804fe3SEric W. Biederman * If unsharing vm, must also unshare signal handlers. 317350804fe3SEric W. Biederman */ 317450804fe3SEric W. Biederman if (unshare_flags & CLONE_VM) 317550804fe3SEric W. Biederman unshare_flags |= CLONE_SIGHAND; 31766013f67fSManfred Spraul /* 317712c641abSEric W. Biederman * If unsharing a signal handlers, must also unshare the signal queues. 317812c641abSEric W. Biederman */ 317912c641abSEric W. Biederman if (unshare_flags & CLONE_SIGHAND) 318012c641abSEric W. Biederman unshare_flags |= CLONE_THREAD; 318112c641abSEric W. Biederman /* 31829bfb23fcSOleg Nesterov * If unsharing namespace, must also unshare filesystem information. 31839bfb23fcSOleg Nesterov */ 31849bfb23fcSOleg Nesterov if (unshare_flags & CLONE_NEWNS) 31859bfb23fcSOleg Nesterov unshare_flags |= CLONE_FS; 318650804fe3SEric W. Biederman 318750804fe3SEric W. Biederman err = check_unshare_flags(unshare_flags); 318850804fe3SEric W. Biederman if (err) 318950804fe3SEric W. Biederman goto bad_unshare_out; 31909bfb23fcSOleg Nesterov /* 31916013f67fSManfred Spraul * CLONE_NEWIPC must also detach from the undolist: after switching 31926013f67fSManfred Spraul * to a new ipc namespace, the semaphore arrays from the old 31936013f67fSManfred Spraul * namespace are unreachable. 31946013f67fSManfred Spraul */ 31956013f67fSManfred Spraul if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) 31969edff4abSManfred Spraul do_sysvsem = 1; 3197fb0a685cSDaniel Rebelo de Oliveira err = unshare_fs(unshare_flags, &new_fs); 3198fb0a685cSDaniel Rebelo de Oliveira if (err) 31999bfb23fcSOleg Nesterov goto bad_unshare_out; 320060997c3dSChristian Brauner err = unshare_fd(unshare_flags, NR_OPEN_MAX, &new_fd); 3201fb0a685cSDaniel Rebelo de Oliveira if (err) 32029bfb23fcSOleg Nesterov goto bad_unshare_cleanup_fs; 3203b2e0d987SEric W. Biederman err = unshare_userns(unshare_flags, &new_cred); 3204fb0a685cSDaniel Rebelo de Oliveira if (err) 32059edff4abSManfred Spraul goto bad_unshare_cleanup_fd; 3206b2e0d987SEric W. Biederman err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, 3207b2e0d987SEric W. Biederman new_cred, new_fs); 3208b2e0d987SEric W. Biederman if (err) 3209b2e0d987SEric W. Biederman goto bad_unshare_cleanup_cred; 3210cf2e340fSJANAK DESAI 3211905ae01cSAlexey Gladkov if (new_cred) { 3212905ae01cSAlexey Gladkov err = set_cred_ucounts(new_cred); 3213905ae01cSAlexey Gladkov if (err) 3214905ae01cSAlexey Gladkov goto bad_unshare_cleanup_cred; 3215905ae01cSAlexey Gladkov } 3216905ae01cSAlexey Gladkov 3217b2e0d987SEric W. Biederman if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) { 32189edff4abSManfred Spraul if (do_sysvsem) { 32199edff4abSManfred Spraul /* 32209edff4abSManfred Spraul * CLONE_SYSVSEM is equivalent to sys_exit(). 32219edff4abSManfred Spraul */ 32229edff4abSManfred Spraul exit_sem(current); 32239edff4abSManfred Spraul } 3224ab602f79SJack Miller if (unshare_flags & CLONE_NEWIPC) { 3225ab602f79SJack Miller /* Orphan segments in old ns (see sem above). */ 3226ab602f79SJack Miller exit_shm(current); 3227ab602f79SJack Miller shm_init_task(current); 3228ab602f79SJack Miller } 3229ab516013SSerge E. Hallyn 32306f977e6bSAlan Cox if (new_nsproxy) 3231cf7b708cSPavel Emelyanov switch_task_namespaces(current, new_nsproxy); 3232cf2e340fSJANAK DESAI 3233cf7b708cSPavel Emelyanov task_lock(current); 3234cf7b708cSPavel Emelyanov 3235cf2e340fSJANAK DESAI if (new_fs) { 3236cf2e340fSJANAK DESAI fs = current->fs; 32372a4419b5SNick Piggin spin_lock(&fs->lock); 3238cf2e340fSJANAK DESAI current->fs = new_fs; 3239498052bbSAl Viro if (--fs->users) 3240498052bbSAl Viro new_fs = NULL; 3241498052bbSAl Viro else 3242cf2e340fSJANAK DESAI new_fs = fs; 32432a4419b5SNick Piggin spin_unlock(&fs->lock); 3244cf2e340fSJANAK DESAI } 3245cf2e340fSJANAK DESAI 3246ba1f70ddSRan Xiaokai if (new_fd) 3247ba1f70ddSRan Xiaokai swap(current->files, new_fd); 3248cf2e340fSJANAK DESAI 3249cf2e340fSJANAK DESAI task_unlock(current); 3250b2e0d987SEric W. Biederman 3251b2e0d987SEric W. Biederman if (new_cred) { 3252b2e0d987SEric W. Biederman /* Install the new user namespace */ 3253b2e0d987SEric W. Biederman commit_creds(new_cred); 3254b2e0d987SEric W. Biederman new_cred = NULL; 3255b2e0d987SEric W. Biederman } 3256cf2e340fSJANAK DESAI } 3257cf2e340fSJANAK DESAI 3258e4222673SHari Bathini perf_event_namespaces(current); 3259e4222673SHari Bathini 3260b2e0d987SEric W. Biederman bad_unshare_cleanup_cred: 3261b2e0d987SEric W. Biederman if (new_cred) 3262b2e0d987SEric W. Biederman put_cred(new_cred); 3263cf2e340fSJANAK DESAI bad_unshare_cleanup_fd: 3264cf2e340fSJANAK DESAI if (new_fd) 3265cf2e340fSJANAK DESAI put_files_struct(new_fd); 3266cf2e340fSJANAK DESAI 3267cf2e340fSJANAK DESAI bad_unshare_cleanup_fs: 3268cf2e340fSJANAK DESAI if (new_fs) 3269498052bbSAl Viro free_fs_struct(new_fs); 3270cf2e340fSJANAK DESAI 3271cf2e340fSJANAK DESAI bad_unshare_out: 3272cf2e340fSJANAK DESAI return err; 3273cf2e340fSJANAK DESAI } 32743b125388SAl Viro 32759b32105eSDominik Brodowski SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) 32769b32105eSDominik Brodowski { 32779b32105eSDominik Brodowski return ksys_unshare(unshare_flags); 32789b32105eSDominik Brodowski } 32799b32105eSDominik Brodowski 32803b125388SAl Viro /* 32813b125388SAl Viro * Helper to unshare the files of the current task. 32823b125388SAl Viro * We don't want to expose copy_files internals to 32833b125388SAl Viro * the exec layer of the kernel. 32843b125388SAl Viro */ 32853b125388SAl Viro 32861f702603SEric W. Biederman int unshare_files(void) 32873b125388SAl Viro { 32883b125388SAl Viro struct task_struct *task = current; 32891f702603SEric W. Biederman struct files_struct *old, *copy = NULL; 32903b125388SAl Viro int error; 32913b125388SAl Viro 329260997c3dSChristian Brauner error = unshare_fd(CLONE_FILES, NR_OPEN_MAX, ©); 32931f702603SEric W. Biederman if (error || !copy) 32943b125388SAl Viro return error; 32951f702603SEric W. Biederman 32961f702603SEric W. Biederman old = task->files; 32973b125388SAl Viro task_lock(task); 32983b125388SAl Viro task->files = copy; 32993b125388SAl Viro task_unlock(task); 33001f702603SEric W. Biederman put_files_struct(old); 33013b125388SAl Viro return 0; 33023b125388SAl Viro } 330316db3d3fSHeinrich Schuchardt 330416db3d3fSHeinrich Schuchardt int sysctl_max_threads(struct ctl_table *table, int write, 3305b0daa2c7STobias Klauser void *buffer, size_t *lenp, loff_t *ppos) 330616db3d3fSHeinrich Schuchardt { 330716db3d3fSHeinrich Schuchardt struct ctl_table t; 330816db3d3fSHeinrich Schuchardt int ret; 330916db3d3fSHeinrich Schuchardt int threads = max_threads; 3310b0f53dbcSMichal Hocko int min = 1; 331116db3d3fSHeinrich Schuchardt int max = MAX_THREADS; 331216db3d3fSHeinrich Schuchardt 331316db3d3fSHeinrich Schuchardt t = *table; 331416db3d3fSHeinrich Schuchardt t.data = &threads; 331516db3d3fSHeinrich Schuchardt t.extra1 = &min; 331616db3d3fSHeinrich Schuchardt t.extra2 = &max; 331716db3d3fSHeinrich Schuchardt 331816db3d3fSHeinrich Schuchardt ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 331916db3d3fSHeinrich Schuchardt if (ret || !write) 332016db3d3fSHeinrich Schuchardt return ret; 332116db3d3fSHeinrich Schuchardt 3322b0f53dbcSMichal Hocko max_threads = threads; 332316db3d3fSHeinrich Schuchardt 332416db3d3fSHeinrich Schuchardt return 0; 332516db3d3fSHeinrich Schuchardt } 3326