1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/kernel/fork.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 1991, 1992 Linus Torvalds 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds /* 91da177e4SLinus Torvalds * 'fork.c' contains the help-routines for the 'fork' system call 101da177e4SLinus Torvalds * (see also entry.S and others). 111da177e4SLinus Torvalds * Fork is rather simple, once you get the hang of it, but the memory 121da177e4SLinus Torvalds * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' 131da177e4SLinus Torvalds */ 141da177e4SLinus Torvalds 15b3e58382SChristian Brauner #include <linux/anon_inodes.h> 161da177e4SLinus Torvalds #include <linux/slab.h> 174eb5aaa3SIngo Molnar #include <linux/sched/autogroup.h> 186e84f315SIngo Molnar #include <linux/sched/mm.h> 19f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 208703e8a4SIngo Molnar #include <linux/sched/user.h> 216a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 2203441a34SIngo Molnar #include <linux/sched/stat.h> 2329930025SIngo Molnar #include <linux/sched/task.h> 2468db0cf1SIngo Molnar #include <linux/sched/task_stack.h> 2532ef5517SIngo Molnar #include <linux/sched/cputime.h> 26b3e58382SChristian Brauner #include <linux/seq_file.h> 27037741a6SIngo Molnar #include <linux/rtmutex.h> 281da177e4SLinus Torvalds #include <linux/init.h> 291da177e4SLinus Torvalds #include <linux/unistd.h> 301da177e4SLinus Torvalds #include <linux/module.h> 311da177e4SLinus Torvalds #include <linux/vmalloc.h> 321da177e4SLinus Torvalds #include <linux/completion.h> 331da177e4SLinus Torvalds #include <linux/personality.h> 341da177e4SLinus Torvalds #include <linux/mempolicy.h> 351da177e4SLinus Torvalds #include <linux/sem.h> 361da177e4SLinus Torvalds #include <linux/file.h> 379f3acc31SAl Viro #include <linux/fdtable.h> 38da9cbc87SJens Axboe #include <linux/iocontext.h> 391da177e4SLinus Torvalds #include <linux/key.h> 4050b5e49cSAlexander Potapenko #include <linux/kmsan.h> 411da177e4SLinus Torvalds #include <linux/binfmts.h> 421da177e4SLinus Torvalds #include <linux/mman.h> 43cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 441da177e4SLinus Torvalds #include <linux/fs.h> 45615d6e87SDavidlohr Bueso #include <linux/mm.h> 4617fca131SArnd Bergmann #include <linux/mm_inline.h> 47ab516013SSerge E. Hallyn #include <linux/nsproxy.h> 48c59ede7bSRandy.Dunlap #include <linux/capability.h> 491da177e4SLinus Torvalds #include <linux/cpu.h> 50b4f48b63SPaul Menage #include <linux/cgroup.h> 511da177e4SLinus Torvalds #include <linux/security.h> 52a1e78772SMel Gorman #include <linux/hugetlb.h> 53e2cfabdfSWill Drewry #include <linux/seccomp.h> 541da177e4SLinus Torvalds #include <linux/swap.h> 551da177e4SLinus Torvalds #include <linux/syscalls.h> 561da177e4SLinus Torvalds #include <linux/jiffies.h> 571da177e4SLinus Torvalds #include <linux/futex.h> 588141c7f3SLinus Torvalds #include <linux/compat.h> 59207205a2SEric Dumazet #include <linux/kthread.h> 607c3ab738SAndrew Morton #include <linux/task_io_accounting_ops.h> 61ab2af1f5SDipankar Sarma #include <linux/rcupdate.h> 621da177e4SLinus Torvalds #include <linux/ptrace.h> 631da177e4SLinus Torvalds #include <linux/mount.h> 641da177e4SLinus Torvalds #include <linux/audit.h> 6578fb7466SPavel Emelianov #include <linux/memcontrol.h> 66f201ae23SFrederic Weisbecker #include <linux/ftrace.h> 675e2bf014SMike Galbraith #include <linux/proc_fs.h> 681da177e4SLinus Torvalds #include <linux/profile.h> 691da177e4SLinus Torvalds #include <linux/rmap.h> 70f8af4da3SHugh Dickins #include <linux/ksm.h> 711da177e4SLinus Torvalds #include <linux/acct.h> 72893e26e6SPavel Emelyanov #include <linux/userfaultfd_k.h> 738f0ab514SJay Lan #include <linux/tsacct_kern.h> 749f46080cSMatt Helsley #include <linux/cn_proc.h> 75ba96a0c8SRafael J. Wysocki #include <linux/freezer.h> 76ca74e92bSShailabh Nagar #include <linux/delayacct.h> 77ad4ecbcbSShailabh Nagar #include <linux/taskstats_kern.h> 78522ed776SMiloslav Trmac #include <linux/tty.h> 795ad4e53bSAl Viro #include <linux/fs_struct.h> 807c9f8861SEric Sandeen #include <linux/magic.h> 81cdd6c482SIngo Molnar #include <linux/perf_event.h> 8242c4ab41SStanislaw Gruszka #include <linux/posix-timers.h> 838e7cac79SAvi Kivity #include <linux/user-return-notifier.h> 843d5992d2SYing Han #include <linux/oom.h> 85ba76149fSAndrea Arcangeli #include <linux/khugepaged.h> 86d80e731eSOleg Nesterov #include <linux/signalfd.h> 870326f5a9SSrikar Dronamraju #include <linux/uprobes.h> 88a27bb332SKent Overstreet #include <linux/aio.h> 8952f5684cSGideon Israel Dsouza #include <linux/compiler.h> 9016db3d3fSHeinrich Schuchardt #include <linux/sysctl.h> 915c9a8750SDmitry Vyukov #include <linux/kcov.h> 92d83a7cb3SJosh Poimboeuf #include <linux/livepatch.h> 9348ac3c18SMark Rutland #include <linux/thread_info.h> 94afaef01cSAlexander Popov #include <linux/stackleak.h> 95eafb149eSDaniel Axtens #include <linux/kasan.h> 96d08b9f0cSSami Tolvanen #include <linux/scs.h> 970f212204SJens Axboe #include <linux/io_uring.h> 98a10787e6SSong Liu #include <linux/bpf.h> 99b3883a9aSJason A. Donenfeld #include <linux/stackprotector.h> 1001da177e4SLinus Torvalds 1011da177e4SLinus Torvalds #include <asm/pgalloc.h> 1027c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 1031da177e4SLinus Torvalds #include <asm/mmu_context.h> 1041da177e4SLinus Torvalds #include <asm/cacheflush.h> 1051da177e4SLinus Torvalds #include <asm/tlbflush.h> 1061da177e4SLinus Torvalds 107ad8d75ffSSteven Rostedt #include <trace/events/sched.h> 108ad8d75ffSSteven Rostedt 10943d2b113SKAMEZAWA Hiroyuki #define CREATE_TRACE_POINTS 11043d2b113SKAMEZAWA Hiroyuki #include <trace/events/task.h> 11143d2b113SKAMEZAWA Hiroyuki 1121da177e4SLinus Torvalds /* 113ac1b398dSHeinrich Schuchardt * Minimum number of threads to boot the kernel 114ac1b398dSHeinrich Schuchardt */ 115ac1b398dSHeinrich Schuchardt #define MIN_THREADS 20 116ac1b398dSHeinrich Schuchardt 117ac1b398dSHeinrich Schuchardt /* 118ac1b398dSHeinrich Schuchardt * Maximum number of threads 119ac1b398dSHeinrich Schuchardt */ 120ac1b398dSHeinrich Schuchardt #define MAX_THREADS FUTEX_TID_MASK 121ac1b398dSHeinrich Schuchardt 122ac1b398dSHeinrich Schuchardt /* 1231da177e4SLinus Torvalds * Protected counters by write_lock_irq(&tasklist_lock) 1241da177e4SLinus Torvalds */ 1251da177e4SLinus Torvalds unsigned long total_forks; /* Handle normal Linux uptimes. */ 1261da177e4SLinus Torvalds int nr_threads; /* The idle threads do not count.. */ 1271da177e4SLinus Torvalds 1288856ae4dSKefeng Wang static int max_threads; /* tunable limit on nr_threads */ 1291da177e4SLinus Torvalds 1308495f7e6SSai Praneeth Prakhya #define NAMED_ARRAY_INDEX(x) [x] = __stringify(x) 1318495f7e6SSai Praneeth Prakhya 1328495f7e6SSai Praneeth Prakhya static const char * const resident_page_types[] = { 1338495f7e6SSai Praneeth Prakhya NAMED_ARRAY_INDEX(MM_FILEPAGES), 1348495f7e6SSai Praneeth Prakhya NAMED_ARRAY_INDEX(MM_ANONPAGES), 1358495f7e6SSai Praneeth Prakhya NAMED_ARRAY_INDEX(MM_SWAPENTS), 1368495f7e6SSai Praneeth Prakhya NAMED_ARRAY_INDEX(MM_SHMEMPAGES), 1378495f7e6SSai Praneeth Prakhya }; 1388495f7e6SSai Praneeth Prakhya 1391da177e4SLinus Torvalds DEFINE_PER_CPU(unsigned long, process_counts) = 0; 1401da177e4SLinus Torvalds 1411da177e4SLinus Torvalds __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 142db1466b3SPaul E. McKenney 143db1466b3SPaul E. McKenney #ifdef CONFIG_PROVE_RCU 144db1466b3SPaul E. McKenney int lockdep_tasklist_lock_is_held(void) 145db1466b3SPaul E. McKenney { 146db1466b3SPaul E. McKenney return lockdep_is_held(&tasklist_lock); 147db1466b3SPaul E. McKenney } 148db1466b3SPaul E. McKenney EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); 149db1466b3SPaul E. McKenney #endif /* #ifdef CONFIG_PROVE_RCU */ 1501da177e4SLinus Torvalds 1511da177e4SLinus Torvalds int nr_processes(void) 1521da177e4SLinus Torvalds { 1531da177e4SLinus Torvalds int cpu; 1541da177e4SLinus Torvalds int total = 0; 1551da177e4SLinus Torvalds 1561d510750SIan Campbell for_each_possible_cpu(cpu) 1571da177e4SLinus Torvalds total += per_cpu(process_counts, cpu); 1581da177e4SLinus Torvalds 1591da177e4SLinus Torvalds return total; 1601da177e4SLinus Torvalds } 1611da177e4SLinus Torvalds 162f19b9f74SAkinobu Mita void __weak arch_release_task_struct(struct task_struct *tsk) 163f19b9f74SAkinobu Mita { 164f19b9f74SAkinobu Mita } 165f19b9f74SAkinobu Mita 166f5e10287SThomas Gleixner #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 167e18b890bSChristoph Lameter static struct kmem_cache *task_struct_cachep; 16841101809SThomas Gleixner 16941101809SThomas Gleixner static inline struct task_struct *alloc_task_struct_node(int node) 17041101809SThomas Gleixner { 17141101809SThomas Gleixner return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); 17241101809SThomas Gleixner } 17341101809SThomas Gleixner 17441101809SThomas Gleixner static inline void free_task_struct(struct task_struct *tsk) 17541101809SThomas Gleixner { 17641101809SThomas Gleixner kmem_cache_free(task_struct_cachep, tsk); 17741101809SThomas Gleixner } 1781da177e4SLinus Torvalds #endif 1791da177e4SLinus Torvalds 180b235beeaSLinus Torvalds #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR 18141101809SThomas Gleixner 1820d15d74aSThomas Gleixner /* 1830d15d74aSThomas Gleixner * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a 1840d15d74aSThomas Gleixner * kmemcache based allocator. 1850d15d74aSThomas Gleixner */ 186ba14a194SAndy Lutomirski # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) 187ac496bf4SAndy Lutomirski 188ac496bf4SAndy Lutomirski # ifdef CONFIG_VMAP_STACK 189ac496bf4SAndy Lutomirski /* 190ac496bf4SAndy Lutomirski * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB 191ac496bf4SAndy Lutomirski * flush. Try to minimize the number of calls by caching stacks. 192ac496bf4SAndy Lutomirski */ 193ac496bf4SAndy Lutomirski #define NR_CACHED_STACKS 2 194ac496bf4SAndy Lutomirski static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]); 19519659c59SHoeun Ryu 196e540bf31SSebastian Andrzej Siewior struct vm_stack { 197e540bf31SSebastian Andrzej Siewior struct rcu_head rcu; 198e540bf31SSebastian Andrzej Siewior struct vm_struct *stack_vm_area; 199e540bf31SSebastian Andrzej Siewior }; 200e540bf31SSebastian Andrzej Siewior 201e540bf31SSebastian Andrzej Siewior static bool try_release_thread_stack_to_cache(struct vm_struct *vm) 202e540bf31SSebastian Andrzej Siewior { 203e540bf31SSebastian Andrzej Siewior unsigned int i; 204e540bf31SSebastian Andrzej Siewior 205e540bf31SSebastian Andrzej Siewior for (i = 0; i < NR_CACHED_STACKS; i++) { 206e540bf31SSebastian Andrzej Siewior if (this_cpu_cmpxchg(cached_stacks[i], NULL, vm) != NULL) 207e540bf31SSebastian Andrzej Siewior continue; 208e540bf31SSebastian Andrzej Siewior return true; 209e540bf31SSebastian Andrzej Siewior } 210e540bf31SSebastian Andrzej Siewior return false; 211e540bf31SSebastian Andrzej Siewior } 212e540bf31SSebastian Andrzej Siewior 213e540bf31SSebastian Andrzej Siewior static void thread_stack_free_rcu(struct rcu_head *rh) 214e540bf31SSebastian Andrzej Siewior { 215e540bf31SSebastian Andrzej Siewior struct vm_stack *vm_stack = container_of(rh, struct vm_stack, rcu); 216e540bf31SSebastian Andrzej Siewior 217e540bf31SSebastian Andrzej Siewior if (try_release_thread_stack_to_cache(vm_stack->stack_vm_area)) 218e540bf31SSebastian Andrzej Siewior return; 219e540bf31SSebastian Andrzej Siewior 220e540bf31SSebastian Andrzej Siewior vfree(vm_stack); 221e540bf31SSebastian Andrzej Siewior } 222e540bf31SSebastian Andrzej Siewior 223e540bf31SSebastian Andrzej Siewior static void thread_stack_delayed_free(struct task_struct *tsk) 224e540bf31SSebastian Andrzej Siewior { 225e540bf31SSebastian Andrzej Siewior struct vm_stack *vm_stack = tsk->stack; 226e540bf31SSebastian Andrzej Siewior 227e540bf31SSebastian Andrzej Siewior vm_stack->stack_vm_area = tsk->stack_vm_area; 228e540bf31SSebastian Andrzej Siewior call_rcu(&vm_stack->rcu, thread_stack_free_rcu); 229e540bf31SSebastian Andrzej Siewior } 230e540bf31SSebastian Andrzej Siewior 23119659c59SHoeun Ryu static int free_vm_stack_cache(unsigned int cpu) 23219659c59SHoeun Ryu { 23319659c59SHoeun Ryu struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu); 23419659c59SHoeun Ryu int i; 23519659c59SHoeun Ryu 23619659c59SHoeun Ryu for (i = 0; i < NR_CACHED_STACKS; i++) { 23719659c59SHoeun Ryu struct vm_struct *vm_stack = cached_vm_stacks[i]; 23819659c59SHoeun Ryu 23919659c59SHoeun Ryu if (!vm_stack) 24019659c59SHoeun Ryu continue; 24119659c59SHoeun Ryu 24219659c59SHoeun Ryu vfree(vm_stack->addr); 24319659c59SHoeun Ryu cached_vm_stacks[i] = NULL; 24419659c59SHoeun Ryu } 24519659c59SHoeun Ryu 24619659c59SHoeun Ryu return 0; 24719659c59SHoeun Ryu } 248ac496bf4SAndy Lutomirski 2491a03d3f1SSebastian Andrzej Siewior static int memcg_charge_kernel_stack(struct vm_struct *vm) 250b69c49b7SFUJITA Tomonori { 251f1c1a9eeSSebastian Andrzej Siewior int i; 252f1c1a9eeSSebastian Andrzej Siewior int ret; 253f1c1a9eeSSebastian Andrzej Siewior 254f1c1a9eeSSebastian Andrzej Siewior BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0); 255f1c1a9eeSSebastian Andrzej Siewior BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE); 256f1c1a9eeSSebastian Andrzej Siewior 257f1c1a9eeSSebastian Andrzej Siewior for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { 258f1c1a9eeSSebastian Andrzej Siewior ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL, 0); 259f1c1a9eeSSebastian Andrzej Siewior if (ret) 260f1c1a9eeSSebastian Andrzej Siewior goto err; 261f1c1a9eeSSebastian Andrzej Siewior } 262f1c1a9eeSSebastian Andrzej Siewior return 0; 263f1c1a9eeSSebastian Andrzej Siewior err: 264f1c1a9eeSSebastian Andrzej Siewior /* 265f1c1a9eeSSebastian Andrzej Siewior * If memcg_kmem_charge_page() fails, page's memory cgroup pointer is 266f1c1a9eeSSebastian Andrzej Siewior * NULL, and memcg_kmem_uncharge_page() in free_thread_stack() will 267f1c1a9eeSSebastian Andrzej Siewior * ignore this page. 268f1c1a9eeSSebastian Andrzej Siewior */ 269f1c1a9eeSSebastian Andrzej Siewior for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) 270f1c1a9eeSSebastian Andrzej Siewior memcg_kmem_uncharge_page(vm->pages[i], 0); 271f1c1a9eeSSebastian Andrzej Siewior return ret; 272f1c1a9eeSSebastian Andrzej Siewior } 273f1c1a9eeSSebastian Andrzej Siewior 2747865aba3SSebastian Andrzej Siewior static int alloc_thread_stack_node(struct task_struct *tsk, int node) 275b69c49b7SFUJITA Tomonori { 2761a03d3f1SSebastian Andrzej Siewior struct vm_struct *vm; 277ac496bf4SAndy Lutomirski void *stack; 278ac496bf4SAndy Lutomirski int i; 279ac496bf4SAndy Lutomirski 280ac496bf4SAndy Lutomirski for (i = 0; i < NR_CACHED_STACKS; i++) { 281112166f8SChristoph Lameter struct vm_struct *s; 282112166f8SChristoph Lameter 283112166f8SChristoph Lameter s = this_cpu_xchg(cached_stacks[i], NULL); 284ac496bf4SAndy Lutomirski 285ac496bf4SAndy Lutomirski if (!s) 286ac496bf4SAndy Lutomirski continue; 287ac496bf4SAndy Lutomirski 28851fb34deSAndrey Konovalov /* Reset stack metadata. */ 289cebd0eb2SAndrey Konovalov kasan_unpoison_range(s->addr, THREAD_SIZE); 290eafb149eSDaniel Axtens 29151fb34deSAndrey Konovalov stack = kasan_reset_tag(s->addr); 29251fb34deSAndrey Konovalov 293ca182551SKonstantin Khlebnikov /* Clear stale pointers from reused stack. */ 29451fb34deSAndrey Konovalov memset(stack, 0, THREAD_SIZE); 295e01e8063SKees Cook 2961a03d3f1SSebastian Andrzej Siewior if (memcg_charge_kernel_stack(s)) { 297f1c1a9eeSSebastian Andrzej Siewior vfree(s->addr); 298f1c1a9eeSSebastian Andrzej Siewior return -ENOMEM; 299f1c1a9eeSSebastian Andrzej Siewior } 300f1c1a9eeSSebastian Andrzej Siewior 301ac496bf4SAndy Lutomirski tsk->stack_vm_area = s; 30251fb34deSAndrey Konovalov tsk->stack = stack; 3037865aba3SSebastian Andrzej Siewior return 0; 304ac496bf4SAndy Lutomirski } 305ac496bf4SAndy Lutomirski 3069b6f7e16SRoman Gushchin /* 3079b6f7e16SRoman Gushchin * Allocated stacks are cached and later reused by new threads, 3089b6f7e16SRoman Gushchin * so memcg accounting is performed manually on assigning/releasing 3099b6f7e16SRoman Gushchin * stacks to tasks. Drop __GFP_ACCOUNT. 3109b6f7e16SRoman Gushchin */ 31148ac3c18SMark Rutland stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN, 312ba14a194SAndy Lutomirski VMALLOC_START, VMALLOC_END, 3139b6f7e16SRoman Gushchin THREADINFO_GFP & ~__GFP_ACCOUNT, 314ba14a194SAndy Lutomirski PAGE_KERNEL, 315ac496bf4SAndy Lutomirski 0, node, __builtin_return_address(0)); 3167865aba3SSebastian Andrzej Siewior if (!stack) 3177865aba3SSebastian Andrzej Siewior return -ENOMEM; 318ba14a194SAndy Lutomirski 3191a03d3f1SSebastian Andrzej Siewior vm = find_vm_area(stack); 3201a03d3f1SSebastian Andrzej Siewior if (memcg_charge_kernel_stack(vm)) { 321f1c1a9eeSSebastian Andrzej Siewior vfree(stack); 322f1c1a9eeSSebastian Andrzej Siewior return -ENOMEM; 323f1c1a9eeSSebastian Andrzej Siewior } 324ba14a194SAndy Lutomirski /* 325ba14a194SAndy Lutomirski * We can't call find_vm_area() in interrupt context, and 326ba14a194SAndy Lutomirski * free_thread_stack() can be called in interrupt context, 327ba14a194SAndy Lutomirski * so cache the vm_struct. 328ba14a194SAndy Lutomirski */ 3291a03d3f1SSebastian Andrzej Siewior tsk->stack_vm_area = vm; 330c08e6a12SAndrey Konovalov stack = kasan_reset_tag(stack); 3315eed6f1dSRik van Riel tsk->stack = stack; 3327865aba3SSebastian Andrzej Siewior return 0; 3335eed6f1dSRik van Riel } 334be9a2277SSebastian Andrzej Siewior 335be9a2277SSebastian Andrzej Siewior static void free_thread_stack(struct task_struct *tsk) 336be9a2277SSebastian Andrzej Siewior { 337e540bf31SSebastian Andrzej Siewior if (!try_release_thread_stack_to_cache(tsk->stack_vm_area)) 338e540bf31SSebastian Andrzej Siewior thread_stack_delayed_free(tsk); 339be9a2277SSebastian Andrzej Siewior 340be9a2277SSebastian Andrzej Siewior tsk->stack = NULL; 341be9a2277SSebastian Andrzej Siewior tsk->stack_vm_area = NULL; 342be9a2277SSebastian Andrzej Siewior } 343be9a2277SSebastian Andrzej Siewior 344be9a2277SSebastian Andrzej Siewior # else /* !CONFIG_VMAP_STACK */ 345be9a2277SSebastian Andrzej Siewior 346e540bf31SSebastian Andrzej Siewior static void thread_stack_free_rcu(struct rcu_head *rh) 347e540bf31SSebastian Andrzej Siewior { 348e540bf31SSebastian Andrzej Siewior __free_pages(virt_to_page(rh), THREAD_SIZE_ORDER); 349e540bf31SSebastian Andrzej Siewior } 350e540bf31SSebastian Andrzej Siewior 351e540bf31SSebastian Andrzej Siewior static void thread_stack_delayed_free(struct task_struct *tsk) 352e540bf31SSebastian Andrzej Siewior { 353e540bf31SSebastian Andrzej Siewior struct rcu_head *rh = tsk->stack; 354e540bf31SSebastian Andrzej Siewior 355e540bf31SSebastian Andrzej Siewior call_rcu(rh, thread_stack_free_rcu); 356e540bf31SSebastian Andrzej Siewior } 357e540bf31SSebastian Andrzej Siewior 3587865aba3SSebastian Andrzej Siewior static int alloc_thread_stack_node(struct task_struct *tsk, int node) 359be9a2277SSebastian Andrzej Siewior { 3604949148aSVladimir Davydov struct page *page = alloc_pages_node(node, THREADINFO_GFP, 3612889f608SThomas Gleixner THREAD_SIZE_ORDER); 362b6a84016SEric Dumazet 3631bf4580eSAndrea Arcangeli if (likely(page)) { 3648dcc1d34SAndrey Konovalov tsk->stack = kasan_reset_tag(page_address(page)); 3657865aba3SSebastian Andrzej Siewior return 0; 3661bf4580eSAndrea Arcangeli } 3677865aba3SSebastian Andrzej Siewior return -ENOMEM; 368b69c49b7SFUJITA Tomonori } 369b69c49b7SFUJITA Tomonori 370be9a2277SSebastian Andrzej Siewior static void free_thread_stack(struct task_struct *tsk) 371b69c49b7SFUJITA Tomonori { 372e540bf31SSebastian Andrzej Siewior thread_stack_delayed_free(tsk); 373be9a2277SSebastian Andrzej Siewior tsk->stack = NULL; 374ac496bf4SAndy Lutomirski } 375ac496bf4SAndy Lutomirski 376be9a2277SSebastian Andrzej Siewior # endif /* CONFIG_VMAP_STACK */ 377be9a2277SSebastian Andrzej Siewior # else /* !(THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)) */ 378ac496bf4SAndy Lutomirski 379b235beeaSLinus Torvalds static struct kmem_cache *thread_stack_cache; 3800d15d74aSThomas Gleixner 381e540bf31SSebastian Andrzej Siewior static void thread_stack_free_rcu(struct rcu_head *rh) 382e540bf31SSebastian Andrzej Siewior { 383e540bf31SSebastian Andrzej Siewior kmem_cache_free(thread_stack_cache, rh); 384e540bf31SSebastian Andrzej Siewior } 385e540bf31SSebastian Andrzej Siewior 386e540bf31SSebastian Andrzej Siewior static void thread_stack_delayed_free(struct task_struct *tsk) 387e540bf31SSebastian Andrzej Siewior { 388e540bf31SSebastian Andrzej Siewior struct rcu_head *rh = tsk->stack; 389e540bf31SSebastian Andrzej Siewior 390e540bf31SSebastian Andrzej Siewior call_rcu(rh, thread_stack_free_rcu); 391e540bf31SSebastian Andrzej Siewior } 392e540bf31SSebastian Andrzej Siewior 3937865aba3SSebastian Andrzej Siewior static int alloc_thread_stack_node(struct task_struct *tsk, int node) 3940d15d74aSThomas Gleixner { 3955eed6f1dSRik van Riel unsigned long *stack; 3965eed6f1dSRik van Riel stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); 3978dcc1d34SAndrey Konovalov stack = kasan_reset_tag(stack); 3985eed6f1dSRik van Riel tsk->stack = stack; 3997865aba3SSebastian Andrzej Siewior return stack ? 0 : -ENOMEM; 4000d15d74aSThomas Gleixner } 4010d15d74aSThomas Gleixner 402ba14a194SAndy Lutomirski static void free_thread_stack(struct task_struct *tsk) 4030d15d74aSThomas Gleixner { 404e540bf31SSebastian Andrzej Siewior thread_stack_delayed_free(tsk); 405be9a2277SSebastian Andrzej Siewior tsk->stack = NULL; 4060d15d74aSThomas Gleixner } 4070d15d74aSThomas Gleixner 408b235beeaSLinus Torvalds void thread_stack_cache_init(void) 4090d15d74aSThomas Gleixner { 410f9d29946SDavid Windsor thread_stack_cache = kmem_cache_create_usercopy("thread_stack", 411f9d29946SDavid Windsor THREAD_SIZE, THREAD_SIZE, 0, 0, 412f9d29946SDavid Windsor THREAD_SIZE, NULL); 413b235beeaSLinus Torvalds BUG_ON(thread_stack_cache == NULL); 4140d15d74aSThomas Gleixner } 415be9a2277SSebastian Andrzej Siewior 416be9a2277SSebastian Andrzej Siewior # endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */ 4172bb0529cSSebastian Andrzej Siewior #else /* CONFIG_ARCH_THREAD_STACK_ALLOCATOR */ 4182bb0529cSSebastian Andrzej Siewior 4197865aba3SSebastian Andrzej Siewior static int alloc_thread_stack_node(struct task_struct *tsk, int node) 4202bb0529cSSebastian Andrzej Siewior { 4212bb0529cSSebastian Andrzej Siewior unsigned long *stack; 4222bb0529cSSebastian Andrzej Siewior 4232bb0529cSSebastian Andrzej Siewior stack = arch_alloc_thread_stack_node(tsk, node); 4242bb0529cSSebastian Andrzej Siewior tsk->stack = stack; 4257865aba3SSebastian Andrzej Siewior return stack ? 0 : -ENOMEM; 4262bb0529cSSebastian Andrzej Siewior } 4272bb0529cSSebastian Andrzej Siewior 4282bb0529cSSebastian Andrzej Siewior static void free_thread_stack(struct task_struct *tsk) 4292bb0529cSSebastian Andrzej Siewior { 4302bb0529cSSebastian Andrzej Siewior arch_free_thread_stack(tsk); 4312bb0529cSSebastian Andrzej Siewior tsk->stack = NULL; 4322bb0529cSSebastian Andrzej Siewior } 4332bb0529cSSebastian Andrzej Siewior 434be9a2277SSebastian Andrzej Siewior #endif /* !CONFIG_ARCH_THREAD_STACK_ALLOCATOR */ 435b69c49b7SFUJITA Tomonori 4361da177e4SLinus Torvalds /* SLAB cache for signal_struct structures (tsk->signal) */ 437e18b890bSChristoph Lameter static struct kmem_cache *signal_cachep; 4381da177e4SLinus Torvalds 4391da177e4SLinus Torvalds /* SLAB cache for sighand_struct structures (tsk->sighand) */ 440e18b890bSChristoph Lameter struct kmem_cache *sighand_cachep; 4411da177e4SLinus Torvalds 4421da177e4SLinus Torvalds /* SLAB cache for files_struct structures (tsk->files) */ 443e18b890bSChristoph Lameter struct kmem_cache *files_cachep; 4441da177e4SLinus Torvalds 4451da177e4SLinus Torvalds /* SLAB cache for fs_struct structures (tsk->fs) */ 446e18b890bSChristoph Lameter struct kmem_cache *fs_cachep; 4471da177e4SLinus Torvalds 4481da177e4SLinus Torvalds /* SLAB cache for vm_area_struct structures */ 4493928d4f5SLinus Torvalds static struct kmem_cache *vm_area_cachep; 4501da177e4SLinus Torvalds 4511da177e4SLinus Torvalds /* SLAB cache for mm_struct structures (tsk->mm) */ 452e18b890bSChristoph Lameter static struct kmem_cache *mm_cachep; 4531da177e4SLinus Torvalds 454490fc053SLinus Torvalds struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) 4553928d4f5SLinus Torvalds { 456a670468fSAndrew Morton struct vm_area_struct *vma; 457490fc053SLinus Torvalds 458a670468fSAndrew Morton vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 459027232daSKirill A. Shutemov if (vma) 460027232daSKirill A. Shutemov vma_init(vma, mm); 461490fc053SLinus Torvalds return vma; 4623928d4f5SLinus Torvalds } 4633928d4f5SLinus Torvalds 4643928d4f5SLinus Torvalds struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) 4653928d4f5SLinus Torvalds { 46695faf699SLinus Torvalds struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 46795faf699SLinus Torvalds 46895faf699SLinus Torvalds if (new) { 469cda099b3SQian Cai ASSERT_EXCLUSIVE_WRITER(orig->vm_flags); 470cda099b3SQian Cai ASSERT_EXCLUSIVE_WRITER(orig->vm_file); 471cda099b3SQian Cai /* 472cda099b3SQian Cai * orig->shared.rb may be modified concurrently, but the clone 473cda099b3SQian Cai * will be reinitialized. 474cda099b3SQian Cai */ 47506e78b61SSuren Baghdasaryan data_race(memcpy(new, orig, sizeof(*new))); 47695faf699SLinus Torvalds INIT_LIST_HEAD(&new->anon_vma_chain); 4775c26f6acSSuren Baghdasaryan dup_anon_vma_name(orig, new); 47895faf699SLinus Torvalds } 47995faf699SLinus Torvalds return new; 4803928d4f5SLinus Torvalds } 4813928d4f5SLinus Torvalds 4823928d4f5SLinus Torvalds void vm_area_free(struct vm_area_struct *vma) 4833928d4f5SLinus Torvalds { 4845c26f6acSSuren Baghdasaryan free_anon_vma_name(vma); 4853928d4f5SLinus Torvalds kmem_cache_free(vm_area_cachep, vma); 4863928d4f5SLinus Torvalds } 4873928d4f5SLinus Torvalds 488ba14a194SAndy Lutomirski static void account_kernel_stack(struct task_struct *tsk, int account) 489c6a7f572SKOSAKI Motohiro { 4900ce055f8SSebastian Andrzej Siewior if (IS_ENABLED(CONFIG_VMAP_STACK)) { 491ba14a194SAndy Lutomirski struct vm_struct *vm = task_stack_vm_area(tsk); 49227faca83SMuchun Song int i; 493ba14a194SAndy Lutomirski 49427faca83SMuchun Song for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) 49527faca83SMuchun Song mod_lruvec_page_state(vm->pages[i], NR_KERNEL_STACK_KB, 49627faca83SMuchun Song account * (PAGE_SIZE / 1024)); 49727faca83SMuchun Song } else { 4980ce055f8SSebastian Andrzej Siewior void *stack = task_stack_page(tsk); 4990ce055f8SSebastian Andrzej Siewior 500991e7673SShakeel Butt /* All stack pages are in the same node. */ 501da3ceeffSMuchun Song mod_lruvec_kmem_state(stack, NR_KERNEL_STACK_KB, 502991e7673SShakeel Butt account * (THREAD_SIZE / 1024)); 503ba14a194SAndy Lutomirski } 50427faca83SMuchun Song } 505c6a7f572SKOSAKI Motohiro 5061a03d3f1SSebastian Andrzej Siewior void exit_task_stack_account(struct task_struct *tsk) 5079b6f7e16SRoman Gushchin { 5081a03d3f1SSebastian Andrzej Siewior account_kernel_stack(tsk, -1); 5099b6f7e16SRoman Gushchin 5101a03d3f1SSebastian Andrzej Siewior if (IS_ENABLED(CONFIG_VMAP_STACK)) { 5111a03d3f1SSebastian Andrzej Siewior struct vm_struct *vm; 5129b6f7e16SRoman Gushchin int i; 5139b6f7e16SRoman Gushchin 5141a03d3f1SSebastian Andrzej Siewior vm = task_stack_vm_area(tsk); 5151a03d3f1SSebastian Andrzej Siewior for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) 5161a03d3f1SSebastian Andrzej Siewior memcg_kmem_uncharge_page(vm->pages[i], 0); 5179b6f7e16SRoman Gushchin } 5189b6f7e16SRoman Gushchin } 5199b6f7e16SRoman Gushchin 52068f24b08SAndy Lutomirski static void release_task_stack(struct task_struct *tsk) 5211da177e4SLinus Torvalds { 5222f064a59SPeter Zijlstra if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD)) 523405c0759SAndy Lutomirski return; /* Better to leak the stack than to free prematurely */ 524405c0759SAndy Lutomirski 525ba14a194SAndy Lutomirski free_thread_stack(tsk); 52668f24b08SAndy Lutomirski } 52768f24b08SAndy Lutomirski 52868f24b08SAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 52968f24b08SAndy Lutomirski void put_task_stack(struct task_struct *tsk) 53068f24b08SAndy Lutomirski { 531f0b89d39SElena Reshetova if (refcount_dec_and_test(&tsk->stack_refcount)) 53268f24b08SAndy Lutomirski release_task_stack(tsk); 53368f24b08SAndy Lutomirski } 53468f24b08SAndy Lutomirski #endif 53568f24b08SAndy Lutomirski 53668f24b08SAndy Lutomirski void free_task(struct task_struct *tsk) 53768f24b08SAndy Lutomirski { 538a1140cb2SKuniyuki Iwashima #ifdef CONFIG_SECCOMP 539a1140cb2SKuniyuki Iwashima WARN_ON_ONCE(tsk->seccomp.filter); 540a1140cb2SKuniyuki Iwashima #endif 541b90ca8baSWill Deacon release_user_cpus_ptr(tsk); 542d08b9f0cSSami Tolvanen scs_release(tsk); 543d08b9f0cSSami Tolvanen 54468f24b08SAndy Lutomirski #ifndef CONFIG_THREAD_INFO_IN_TASK 54568f24b08SAndy Lutomirski /* 54668f24b08SAndy Lutomirski * The task is finally done with both the stack and thread_info, 54768f24b08SAndy Lutomirski * so free both. 54868f24b08SAndy Lutomirski */ 54968f24b08SAndy Lutomirski release_task_stack(tsk); 55068f24b08SAndy Lutomirski #else 55168f24b08SAndy Lutomirski /* 55268f24b08SAndy Lutomirski * If the task had a separate stack allocation, it should be gone 55368f24b08SAndy Lutomirski * by now. 55468f24b08SAndy Lutomirski */ 555f0b89d39SElena Reshetova WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0); 55668f24b08SAndy Lutomirski #endif 55723f78d4aSIngo Molnar rt_mutex_debug_task_free(tsk); 558fb52607aSFrederic Weisbecker ftrace_graph_exit_task(tsk); 559f19b9f74SAkinobu Mita arch_release_task_struct(tsk); 5601da5c46fSOleg Nesterov if (tsk->flags & PF_KTHREAD) 5611da5c46fSOleg Nesterov free_kthread_struct(tsk); 5621da177e4SLinus Torvalds free_task_struct(tsk); 5631da177e4SLinus Torvalds } 5641da177e4SLinus Torvalds EXPORT_SYMBOL(free_task); 5651da177e4SLinus Torvalds 566fe69d560SDavid Hildenbrand static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm) 567fe69d560SDavid Hildenbrand { 568fe69d560SDavid Hildenbrand struct file *exe_file; 569fe69d560SDavid Hildenbrand 570fe69d560SDavid Hildenbrand exe_file = get_mm_exe_file(oldmm); 571fe69d560SDavid Hildenbrand RCU_INIT_POINTER(mm->exe_file, exe_file); 572fe69d560SDavid Hildenbrand /* 573fe69d560SDavid Hildenbrand * We depend on the oldmm having properly denied write access to the 574fe69d560SDavid Hildenbrand * exe_file already. 575fe69d560SDavid Hildenbrand */ 576fe69d560SDavid Hildenbrand if (exe_file && deny_write_access(exe_file)) 577fe69d560SDavid Hildenbrand pr_warn_once("deny_write_access() failed in %s\n", __func__); 578fe69d560SDavid Hildenbrand } 579fe69d560SDavid Hildenbrand 5801da177e4SLinus Torvalds #ifdef CONFIG_MMU 5810766f788SEmese Revfy static __latent_entropy int dup_mmap(struct mm_struct *mm, 5820766f788SEmese Revfy struct mm_struct *oldmm) 5831da177e4SLinus Torvalds { 584763ecb03SLiam R. Howlett struct vm_area_struct *mpnt, *tmp; 5851da177e4SLinus Torvalds int retval; 586c9dbe82cSLiam R. Howlett unsigned long charge = 0; 587893e26e6SPavel Emelyanov LIST_HEAD(uf); 5883b9dbd5eSLiam R. Howlett VMA_ITERATOR(old_vmi, oldmm, 0); 5893b9dbd5eSLiam R. Howlett VMA_ITERATOR(vmi, mm, 0); 5901da177e4SLinus Torvalds 59132cdba1eSOleg Nesterov uprobe_start_dup_mmap(); 592d8ed45c5SMichel Lespinasse if (mmap_write_lock_killable(oldmm)) { 5937c051267SMichal Hocko retval = -EINTR; 5947c051267SMichal Hocko goto fail_uprobe_end; 5957c051267SMichal Hocko } 596ec8c0446SRalf Baechle flush_cache_dup_mm(oldmm); 597f8ac4ec9SOleg Nesterov uprobe_dup_mmap(oldmm, mm); 598ad339451SIngo Molnar /* 599ad339451SIngo Molnar * Not linked in yet - no deadlock potential: 600ad339451SIngo Molnar */ 601aaa2cc56SMichel Lespinasse mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING); 6027ee78232SHugh Dickins 60390f31d0eSKonstantin Khlebnikov /* No ordering required: file already has been exposed. */ 604fe69d560SDavid Hildenbrand dup_mm_exe_file(mm, oldmm); 60590f31d0eSKonstantin Khlebnikov 6064f7d4614SVladimir Davydov mm->total_vm = oldmm->total_vm; 60784638335SKonstantin Khlebnikov mm->data_vm = oldmm->data_vm; 6084f7d4614SVladimir Davydov mm->exec_vm = oldmm->exec_vm; 6094f7d4614SVladimir Davydov mm->stack_vm = oldmm->stack_vm; 6104f7d4614SVladimir Davydov 611f8af4da3SHugh Dickins retval = ksm_fork(mm, oldmm); 612f8af4da3SHugh Dickins if (retval) 613f8af4da3SHugh Dickins goto out; 614d2081b2bSYang Shi khugepaged_fork(mm, oldmm); 6151da177e4SLinus Torvalds 6163b9dbd5eSLiam R. Howlett retval = vma_iter_bulk_alloc(&vmi, oldmm->map_count); 617d4af56c5SLiam R. Howlett if (retval) 618d4af56c5SLiam R. Howlett goto out; 619d4af56c5SLiam R. Howlett 6203b9dbd5eSLiam R. Howlett for_each_vma(old_vmi, mpnt) { 6211da177e4SLinus Torvalds struct file *file; 6221da177e4SLinus Torvalds 6231da177e4SLinus Torvalds if (mpnt->vm_flags & VM_DONTCOPY) { 62484638335SKonstantin Khlebnikov vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt)); 6251da177e4SLinus Torvalds continue; 6261da177e4SLinus Torvalds } 6271da177e4SLinus Torvalds charge = 0; 628655c79bbSTetsuo Handa /* 629655c79bbSTetsuo Handa * Don't duplicate many vmas if we've been oom-killed (for 630655c79bbSTetsuo Handa * example) 631655c79bbSTetsuo Handa */ 632655c79bbSTetsuo Handa if (fatal_signal_pending(current)) { 633655c79bbSTetsuo Handa retval = -EINTR; 634d4af56c5SLiam R. Howlett goto loop_out; 635655c79bbSTetsuo Handa } 6361da177e4SLinus Torvalds if (mpnt->vm_flags & VM_ACCOUNT) { 637b2412b7fSHuang Shijie unsigned long len = vma_pages(mpnt); 638b2412b7fSHuang Shijie 639191c5424SAl Viro if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ 6401da177e4SLinus Torvalds goto fail_nomem; 6411da177e4SLinus Torvalds charge = len; 6421da177e4SLinus Torvalds } 6433928d4f5SLinus Torvalds tmp = vm_area_dup(mpnt); 6441da177e4SLinus Torvalds if (!tmp) 6451da177e4SLinus Torvalds goto fail_nomem; 646ef0855d3SOleg Nesterov retval = vma_dup_policy(mpnt, tmp); 647ef0855d3SOleg Nesterov if (retval) 6481da177e4SLinus Torvalds goto fail_nomem_policy; 649a247c3a9SAndrea Arcangeli tmp->vm_mm = mm; 650893e26e6SPavel Emelyanov retval = dup_userfaultfd(tmp, &uf); 651893e26e6SPavel Emelyanov if (retval) 652893e26e6SPavel Emelyanov goto fail_nomem_anon_vma_fork; 653d2cd9edeSRik van Riel if (tmp->vm_flags & VM_WIPEONFORK) { 65493949bb2SLi Xinhai /* 65593949bb2SLi Xinhai * VM_WIPEONFORK gets a clean slate in the child. 65693949bb2SLi Xinhai * Don't prepare anon_vma until fault since we don't 65793949bb2SLi Xinhai * copy page for current vma. 65893949bb2SLi Xinhai */ 659d2cd9edeSRik van Riel tmp->anon_vma = NULL; 660d2cd9edeSRik van Riel } else if (anon_vma_fork(tmp, mpnt)) 6615beb4930SRik van Riel goto fail_nomem_anon_vma_fork; 662e430a95aSSuren Baghdasaryan vm_flags_clear(tmp, VM_LOCKED_MASK); 6631da177e4SLinus Torvalds file = tmp->vm_file; 6641da177e4SLinus Torvalds if (file) { 665b88ed205SHugh Dickins struct address_space *mapping = file->f_mapping; 666b88ed205SHugh Dickins 6671da177e4SLinus Torvalds get_file(file); 66883cde9e8SDavidlohr Bueso i_mmap_lock_write(mapping); 669b88ed205SHugh Dickins if (tmp->vm_flags & VM_SHARED) 670cf508b58SMiaohe Lin mapping_allow_writable(mapping); 671b88ed205SHugh Dickins flush_dcache_mmap_lock(mapping); 672b88ed205SHugh Dickins /* insert tmp into the share list, just after mpnt */ 6739826a516SMichel Lespinasse vma_interval_tree_insert_after(tmp, mpnt, 6749826a516SMichel Lespinasse &mapping->i_mmap); 675b88ed205SHugh Dickins flush_dcache_mmap_unlock(mapping); 67683cde9e8SDavidlohr Bueso i_mmap_unlock_write(mapping); 6771da177e4SLinus Torvalds } 6781da177e4SLinus Torvalds 6791da177e4SLinus Torvalds /* 6808d9bfb26SMike Kravetz * Copy/update hugetlb private vma information. 681a1e78772SMel Gorman */ 682a1e78772SMel Gorman if (is_vm_hugetlb_page(tmp)) 6838d9bfb26SMike Kravetz hugetlb_dup_vma_private(tmp); 684a1e78772SMel Gorman 685d4af56c5SLiam R. Howlett /* Link the vma into the MT */ 6863b9dbd5eSLiam R. Howlett if (vma_iter_bulk_store(&vmi, tmp)) 6873b9dbd5eSLiam R. Howlett goto fail_nomem_vmi_store; 6881da177e4SLinus Torvalds 6891da177e4SLinus Torvalds mm->map_count++; 690d2cd9edeSRik van Riel if (!(tmp->vm_flags & VM_WIPEONFORK)) 691c78f4636SPeter Xu retval = copy_page_range(tmp, mpnt); 6921da177e4SLinus Torvalds 6931da177e4SLinus Torvalds if (tmp->vm_ops && tmp->vm_ops->open) 6941da177e4SLinus Torvalds tmp->vm_ops->open(tmp); 6951da177e4SLinus Torvalds 6961da177e4SLinus Torvalds if (retval) 697d4af56c5SLiam R. Howlett goto loop_out; 6981da177e4SLinus Torvalds } 699d6dd61c8SJeremy Fitzhardinge /* a new mm has just been created */ 7001ed0cc5aSNadav Amit retval = arch_dup_mmap(oldmm, mm); 701d4af56c5SLiam R. Howlett loop_out: 7023b9dbd5eSLiam R. Howlett vma_iter_free(&vmi); 7031da177e4SLinus Torvalds out: 704d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 705fd3e42fcSHugh Dickins flush_tlb_mm(oldmm); 706d8ed45c5SMichel Lespinasse mmap_write_unlock(oldmm); 707893e26e6SPavel Emelyanov dup_userfaultfd_complete(&uf); 7087c051267SMichal Hocko fail_uprobe_end: 70932cdba1eSOleg Nesterov uprobe_end_dup_mmap(); 7101da177e4SLinus Torvalds return retval; 711c9dbe82cSLiam R. Howlett 7123b9dbd5eSLiam R. Howlett fail_nomem_vmi_store: 713c9dbe82cSLiam R. Howlett unlink_anon_vmas(tmp); 7145beb4930SRik van Riel fail_nomem_anon_vma_fork: 715ef0855d3SOleg Nesterov mpol_put(vma_policy(tmp)); 7161da177e4SLinus Torvalds fail_nomem_policy: 7173928d4f5SLinus Torvalds vm_area_free(tmp); 7181da177e4SLinus Torvalds fail_nomem: 7191da177e4SLinus Torvalds retval = -ENOMEM; 7201da177e4SLinus Torvalds vm_unacct_memory(charge); 721d4af56c5SLiam R. Howlett goto loop_out; 7221da177e4SLinus Torvalds } 7231da177e4SLinus Torvalds 7241da177e4SLinus Torvalds static inline int mm_alloc_pgd(struct mm_struct *mm) 7251da177e4SLinus Torvalds { 7261da177e4SLinus Torvalds mm->pgd = pgd_alloc(mm); 7271da177e4SLinus Torvalds if (unlikely(!mm->pgd)) 7281da177e4SLinus Torvalds return -ENOMEM; 7291da177e4SLinus Torvalds return 0; 7301da177e4SLinus Torvalds } 7311da177e4SLinus Torvalds 7321da177e4SLinus Torvalds static inline void mm_free_pgd(struct mm_struct *mm) 7331da177e4SLinus Torvalds { 7345e541973SBenjamin Herrenschmidt pgd_free(mm, mm->pgd); 7351da177e4SLinus Torvalds } 7361da177e4SLinus Torvalds #else 73790f31d0eSKonstantin Khlebnikov static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 73890f31d0eSKonstantin Khlebnikov { 739d8ed45c5SMichel Lespinasse mmap_write_lock(oldmm); 740fe69d560SDavid Hildenbrand dup_mm_exe_file(mm, oldmm); 741d8ed45c5SMichel Lespinasse mmap_write_unlock(oldmm); 74290f31d0eSKonstantin Khlebnikov return 0; 74390f31d0eSKonstantin Khlebnikov } 7441da177e4SLinus Torvalds #define mm_alloc_pgd(mm) (0) 7451da177e4SLinus Torvalds #define mm_free_pgd(mm) 7461da177e4SLinus Torvalds #endif /* CONFIG_MMU */ 7471da177e4SLinus Torvalds 748d70f2a14SAndrew Morton static void check_mm(struct mm_struct *mm) 749d70f2a14SAndrew Morton { 750d70f2a14SAndrew Morton int i; 751d70f2a14SAndrew Morton 7528495f7e6SSai Praneeth Prakhya BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types) != NR_MM_COUNTERS, 7538495f7e6SSai Praneeth Prakhya "Please make sure 'struct resident_page_types[]' is updated as well"); 7548495f7e6SSai Praneeth Prakhya 755d70f2a14SAndrew Morton for (i = 0; i < NR_MM_COUNTERS; i++) { 756f1a79412SShakeel Butt long x = percpu_counter_sum(&mm->rss_stat[i]); 757d70f2a14SAndrew Morton 758d70f2a14SAndrew Morton if (unlikely(x)) 7598495f7e6SSai Praneeth Prakhya pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n", 7608495f7e6SSai Praneeth Prakhya mm, resident_page_types[i], x); 761d70f2a14SAndrew Morton } 762d70f2a14SAndrew Morton 763d70f2a14SAndrew Morton if (mm_pgtables_bytes(mm)) 764d70f2a14SAndrew Morton pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n", 765d70f2a14SAndrew Morton mm_pgtables_bytes(mm)); 766d70f2a14SAndrew Morton 767d70f2a14SAndrew Morton #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 768d70f2a14SAndrew Morton VM_BUG_ON_MM(mm->pmd_huge_pte, mm); 769d70f2a14SAndrew Morton #endif 770d70f2a14SAndrew Morton } 7711da177e4SLinus Torvalds 772e94b1766SChristoph Lameter #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) 7731da177e4SLinus Torvalds #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) 7741da177e4SLinus Torvalds 775d70f2a14SAndrew Morton /* 776d70f2a14SAndrew Morton * Called when the last reference to the mm 777d70f2a14SAndrew Morton * is dropped: either by a lazy thread or by 778d70f2a14SAndrew Morton * mmput. Free the page directory and the mm. 779d70f2a14SAndrew Morton */ 780d34bc48fSAndrew Morton void __mmdrop(struct mm_struct *mm) 781d70f2a14SAndrew Morton { 782f1a79412SShakeel Butt int i; 783f1a79412SShakeel Butt 784d70f2a14SAndrew Morton BUG_ON(mm == &init_mm); 7853eda69c9SMark Rutland WARN_ON_ONCE(mm == current->mm); 7863eda69c9SMark Rutland WARN_ON_ONCE(mm == current->active_mm); 787d70f2a14SAndrew Morton mm_free_pgd(mm); 788d70f2a14SAndrew Morton destroy_context(mm); 789984cfe4eSJason Gunthorpe mmu_notifier_subscriptions_destroy(mm); 790d70f2a14SAndrew Morton check_mm(mm); 791d70f2a14SAndrew Morton put_user_ns(mm->user_ns); 7922667ed10SFenghua Yu mm_pasid_drop(mm); 793f1a79412SShakeel Butt 794f1a79412SShakeel Butt for (i = 0; i < NR_MM_COUNTERS; i++) 795f1a79412SShakeel Butt percpu_counter_destroy(&mm->rss_stat[i]); 796d70f2a14SAndrew Morton free_mm(mm); 797d70f2a14SAndrew Morton } 798d34bc48fSAndrew Morton EXPORT_SYMBOL_GPL(__mmdrop); 799d70f2a14SAndrew Morton 800d70f2a14SAndrew Morton static void mmdrop_async_fn(struct work_struct *work) 801d70f2a14SAndrew Morton { 802d70f2a14SAndrew Morton struct mm_struct *mm; 803d70f2a14SAndrew Morton 804d70f2a14SAndrew Morton mm = container_of(work, struct mm_struct, async_put_work); 805d70f2a14SAndrew Morton __mmdrop(mm); 806d70f2a14SAndrew Morton } 807d70f2a14SAndrew Morton 808d70f2a14SAndrew Morton static void mmdrop_async(struct mm_struct *mm) 809d70f2a14SAndrew Morton { 810d70f2a14SAndrew Morton if (unlikely(atomic_dec_and_test(&mm->mm_count))) { 811d70f2a14SAndrew Morton INIT_WORK(&mm->async_put_work, mmdrop_async_fn); 812d70f2a14SAndrew Morton schedule_work(&mm->async_put_work); 813d70f2a14SAndrew Morton } 814d70f2a14SAndrew Morton } 815d70f2a14SAndrew Morton 8161da177e4SLinus Torvalds static inline void free_signal_struct(struct signal_struct *sig) 8171da177e4SLinus Torvalds { 8181da177e4SLinus Torvalds taskstats_tgid_free(sig); 8191da177e4SLinus Torvalds sched_autogroup_exit(sig); 8201da177e4SLinus Torvalds /* 8211da177e4SLinus Torvalds * __mmdrop is not safe to call from softirq context on x86 due to 8221da177e4SLinus Torvalds * pgd_dtor so postpone it to the async context 8231da177e4SLinus Torvalds */ 8241da177e4SLinus Torvalds if (sig->oom_mm) 8251da177e4SLinus Torvalds mmdrop_async(sig->oom_mm); 8261da177e4SLinus Torvalds kmem_cache_free(signal_cachep, sig); 8271da177e4SLinus Torvalds } 8281da177e4SLinus Torvalds 8291da177e4SLinus Torvalds static inline void put_signal_struct(struct signal_struct *sig) 8301da177e4SLinus Torvalds { 83160d4de3fSElena Reshetova if (refcount_dec_and_test(&sig->sigcnt)) 8321da177e4SLinus Torvalds free_signal_struct(sig); 8331da177e4SLinus Torvalds } 8341da177e4SLinus Torvalds 8351da177e4SLinus Torvalds void __put_task_struct(struct task_struct *tsk) 8361da177e4SLinus Torvalds { 8371da177e4SLinus Torvalds WARN_ON(!tsk->exit_state); 838ec1d2819SElena Reshetova WARN_ON(refcount_read(&tsk->usage)); 8391da177e4SLinus Torvalds WARN_ON(tsk == current); 8401da177e4SLinus Torvalds 8410f212204SJens Axboe io_uring_free(tsk); 8421da177e4SLinus Torvalds cgroup_free(tsk); 84316d51a59SJann Horn task_numa_free(tsk, true); 8441da177e4SLinus Torvalds security_task_free(tsk); 845a10787e6SSong Liu bpf_task_storage_free(tsk); 8461da177e4SLinus Torvalds exit_creds(tsk); 8471da177e4SLinus Torvalds delayacct_tsk_free(tsk); 8481da177e4SLinus Torvalds put_signal_struct(tsk->signal); 8496e33cad0SPeter Zijlstra sched_core_free(tsk); 8501da177e4SLinus Torvalds free_task(tsk); 8511da177e4SLinus Torvalds } 8521da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(__put_task_struct); 8531da177e4SLinus Torvalds 8541da177e4SLinus Torvalds void __init __weak arch_task_cache_init(void) { } 8551da177e4SLinus Torvalds 8561da177e4SLinus Torvalds /* 8571da177e4SLinus Torvalds * set_max_threads 8581da177e4SLinus Torvalds */ 8591da177e4SLinus Torvalds static void set_max_threads(unsigned int max_threads_suggested) 8601da177e4SLinus Torvalds { 8611da177e4SLinus Torvalds u64 threads; 862ca79b0c2SArun KS unsigned long nr_pages = totalram_pages(); 8631da177e4SLinus Torvalds 8641da177e4SLinus Torvalds /* 8651da177e4SLinus Torvalds * The number of threads shall be limited such that the thread 8661da177e4SLinus Torvalds * structures may only consume a small part of the available memory. 8671da177e4SLinus Torvalds */ 8683d6357deSArun KS if (fls64(nr_pages) + fls64(PAGE_SIZE) > 64) 8691da177e4SLinus Torvalds threads = MAX_THREADS; 8701da177e4SLinus Torvalds else 8713d6357deSArun KS threads = div64_u64((u64) nr_pages * (u64) PAGE_SIZE, 8721da177e4SLinus Torvalds (u64) THREAD_SIZE * 8UL); 8731da177e4SLinus Torvalds 8741da177e4SLinus Torvalds if (threads > max_threads_suggested) 8751da177e4SLinus Torvalds threads = max_threads_suggested; 8761da177e4SLinus Torvalds 8771da177e4SLinus Torvalds max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); 8781da177e4SLinus Torvalds } 8791da177e4SLinus Torvalds 8801da177e4SLinus Torvalds #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT 8811da177e4SLinus Torvalds /* Initialized by the architecture: */ 8821da177e4SLinus Torvalds int arch_task_struct_size __read_mostly; 8831da177e4SLinus Torvalds #endif 8841da177e4SLinus Torvalds 8854189ff23SChristoph Hellwig #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 8865905429aSKees Cook static void task_struct_whitelist(unsigned long *offset, unsigned long *size) 8875905429aSKees Cook { 8885905429aSKees Cook /* Fetch thread_struct whitelist for the architecture. */ 8895905429aSKees Cook arch_thread_struct_whitelist(offset, size); 8905905429aSKees Cook 8915905429aSKees Cook /* 8925905429aSKees Cook * Handle zero-sized whitelist or empty thread_struct, otherwise 8935905429aSKees Cook * adjust offset to position of thread_struct in task_struct. 8945905429aSKees Cook */ 8955905429aSKees Cook if (unlikely(*size == 0)) 8965905429aSKees Cook *offset = 0; 8975905429aSKees Cook else 8985905429aSKees Cook *offset += offsetof(struct task_struct, thread); 8995905429aSKees Cook } 9004189ff23SChristoph Hellwig #endif /* CONFIG_ARCH_TASK_STRUCT_ALLOCATOR */ 9015905429aSKees Cook 9021da177e4SLinus Torvalds void __init fork_init(void) 9031da177e4SLinus Torvalds { 9041da177e4SLinus Torvalds int i; 9051da177e4SLinus Torvalds #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 9061da177e4SLinus Torvalds #ifndef ARCH_MIN_TASKALIGN 9071da177e4SLinus Torvalds #define ARCH_MIN_TASKALIGN 0 9081da177e4SLinus Torvalds #endif 9091da177e4SLinus Torvalds int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN); 9105905429aSKees Cook unsigned long useroffset, usersize; 9111da177e4SLinus Torvalds 9121da177e4SLinus Torvalds /* create a slab on which task_structs can be allocated */ 9135905429aSKees Cook task_struct_whitelist(&useroffset, &usersize); 9145905429aSKees Cook task_struct_cachep = kmem_cache_create_usercopy("task_struct", 9151da177e4SLinus Torvalds arch_task_struct_size, align, 9165905429aSKees Cook SLAB_PANIC|SLAB_ACCOUNT, 9175905429aSKees Cook useroffset, usersize, NULL); 9181da177e4SLinus Torvalds #endif 9191da177e4SLinus Torvalds 9201da177e4SLinus Torvalds /* do the arch specific task caches init */ 9211da177e4SLinus Torvalds arch_task_cache_init(); 9221da177e4SLinus Torvalds 9231da177e4SLinus Torvalds set_max_threads(MAX_THREADS); 9241da177e4SLinus Torvalds 9251da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; 9261da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; 9271da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_SIGPENDING] = 9281da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_NPROC]; 9291da177e4SLinus Torvalds 930de399236SAlexey Gladkov for (i = 0; i < UCOUNT_COUNTS; i++) 9311da177e4SLinus Torvalds init_user_ns.ucount_max[i] = max_threads/2; 9321da177e4SLinus Torvalds 933de399236SAlexey Gladkov set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, RLIM_INFINITY); 934de399236SAlexey Gladkov set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, RLIM_INFINITY); 935de399236SAlexey Gladkov set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, RLIM_INFINITY); 936de399236SAlexey Gladkov set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, RLIM_INFINITY); 93721d1c5e3SAlexey Gladkov 9381da177e4SLinus Torvalds #ifdef CONFIG_VMAP_STACK 9391da177e4SLinus Torvalds cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache", 9401da177e4SLinus Torvalds NULL, free_vm_stack_cache); 9411da177e4SLinus Torvalds #endif 9421da177e4SLinus Torvalds 943d08b9f0cSSami Tolvanen scs_init(); 944d08b9f0cSSami Tolvanen 9451da177e4SLinus Torvalds lockdep_init_task(&init_task); 946aad42dd4SNadav Amit uprobes_init(); 9471da177e4SLinus Torvalds } 9481da177e4SLinus Torvalds 9491da177e4SLinus Torvalds int __weak arch_dup_task_struct(struct task_struct *dst, 9501da177e4SLinus Torvalds struct task_struct *src) 9511da177e4SLinus Torvalds { 9521da177e4SLinus Torvalds *dst = *src; 9531da177e4SLinus Torvalds return 0; 9541da177e4SLinus Torvalds } 9551da177e4SLinus Torvalds 9561da177e4SLinus Torvalds void set_task_stack_end_magic(struct task_struct *tsk) 9571da177e4SLinus Torvalds { 9581da177e4SLinus Torvalds unsigned long *stackend; 9591da177e4SLinus Torvalds 9601da177e4SLinus Torvalds stackend = end_of_stack(tsk); 9611da177e4SLinus Torvalds *stackend = STACK_END_MAGIC; /* for overflow detection */ 9621da177e4SLinus Torvalds } 9631da177e4SLinus Torvalds 9641da177e4SLinus Torvalds static struct task_struct *dup_task_struct(struct task_struct *orig, int node) 9651da177e4SLinus Torvalds { 9661da177e4SLinus Torvalds struct task_struct *tsk; 9671da177e4SLinus Torvalds int err; 9681da177e4SLinus Torvalds 9691da177e4SLinus Torvalds if (node == NUMA_NO_NODE) 9701da177e4SLinus Torvalds node = tsk_fork_get_node(orig); 9711da177e4SLinus Torvalds tsk = alloc_task_struct_node(node); 9721da177e4SLinus Torvalds if (!tsk) 9731da177e4SLinus Torvalds return NULL; 9741da177e4SLinus Torvalds 975546c42b2SSebastian Andrzej Siewior err = arch_dup_task_struct(tsk, orig); 976546c42b2SSebastian Andrzej Siewior if (err) 9771da177e4SLinus Torvalds goto free_tsk; 9781da177e4SLinus Torvalds 9797865aba3SSebastian Andrzej Siewior err = alloc_thread_stack_node(tsk, node); 9807865aba3SSebastian Andrzej Siewior if (err) 9811da177e4SLinus Torvalds goto free_tsk; 9829b6f7e16SRoman Gushchin 9831da177e4SLinus Torvalds #ifdef CONFIG_THREAD_INFO_IN_TASK 984f0b89d39SElena Reshetova refcount_set(&tsk->stack_refcount, 1); 9851da177e4SLinus Torvalds #endif 9861a03d3f1SSebastian Andrzej Siewior account_kernel_stack(tsk, 1); 9871da177e4SLinus Torvalds 988d08b9f0cSSami Tolvanen err = scs_prepare(tsk, node); 989d08b9f0cSSami Tolvanen if (err) 990d08b9f0cSSami Tolvanen goto free_stack; 991d08b9f0cSSami Tolvanen 9921da177e4SLinus Torvalds #ifdef CONFIG_SECCOMP 9931da177e4SLinus Torvalds /* 9941da177e4SLinus Torvalds * We must handle setting up seccomp filters once we're under 9951da177e4SLinus Torvalds * the sighand lock in case orig has changed between now and 9961da177e4SLinus Torvalds * then. Until then, filter must be NULL to avoid messing up 9971da177e4SLinus Torvalds * the usage counts on the error path calling free_task. 9981da177e4SLinus Torvalds */ 9991da177e4SLinus Torvalds tsk->seccomp.filter = NULL; 10001da177e4SLinus Torvalds #endif 10011da177e4SLinus Torvalds 10021da177e4SLinus Torvalds setup_thread_stack(tsk, orig); 10031da177e4SLinus Torvalds clear_user_return_notifier(tsk); 10041da177e4SLinus Torvalds clear_tsk_need_resched(tsk); 10051da177e4SLinus Torvalds set_task_stack_end_magic(tsk); 10061446e1dfSGabriel Krisman Bertazi clear_syscall_work_syscall_user_dispatch(tsk); 10071da177e4SLinus Torvalds 1008050e9baaSLinus Torvalds #ifdef CONFIG_STACKPROTECTOR 10091da177e4SLinus Torvalds tsk->stack_canary = get_random_canary(); 10101da177e4SLinus Torvalds #endif 10113bd37062SSebastian Andrzej Siewior if (orig->cpus_ptr == &orig->cpus_mask) 10123bd37062SSebastian Andrzej Siewior tsk->cpus_ptr = &tsk->cpus_mask; 1013b90ca8baSWill Deacon dup_user_cpus_ptr(tsk, orig, node); 10141da177e4SLinus Torvalds 10151da177e4SLinus Torvalds /* 10160ff7b2cfSEric W. Biederman * One for the user space visible state that goes away when reaped. 10170ff7b2cfSEric W. Biederman * One for the scheduler. 10181da177e4SLinus Torvalds */ 10190ff7b2cfSEric W. Biederman refcount_set(&tsk->rcu_users, 2); 10200ff7b2cfSEric W. Biederman /* One for the rcu users */ 10210ff7b2cfSEric W. Biederman refcount_set(&tsk->usage, 1); 10221da177e4SLinus Torvalds #ifdef CONFIG_BLK_DEV_IO_TRACE 10231da177e4SLinus Torvalds tsk->btrace_seq = 0; 10241da177e4SLinus Torvalds #endif 10251da177e4SLinus Torvalds tsk->splice_pipe = NULL; 10261da177e4SLinus Torvalds tsk->task_frag.page = NULL; 10271da177e4SLinus Torvalds tsk->wake_q.next = NULL; 1028e32cf5dfSEric W. Biederman tsk->worker_private = NULL; 10291da177e4SLinus Torvalds 10301da177e4SLinus Torvalds kcov_task_init(tsk); 103150b5e49cSAlexander Potapenko kmsan_task_create(tsk); 10325fbda3ecSThomas Gleixner kmap_local_fork(tsk); 10331da177e4SLinus Torvalds 10341da177e4SLinus Torvalds #ifdef CONFIG_FAULT_INJECTION 10351da177e4SLinus Torvalds tsk->fail_nth = 0; 10361da177e4SLinus Torvalds #endif 10371da177e4SLinus Torvalds 10382c323017SJosef Bacik #ifdef CONFIG_BLK_CGROUP 1039f05837edSChristoph Hellwig tsk->throttle_disk = NULL; 10402c323017SJosef Bacik tsk->use_memdelay = 0; 10412c323017SJosef Bacik #endif 10422c323017SJosef Bacik 1043a3d29e82SPeter Zijlstra #ifdef CONFIG_IOMMU_SVA 1044a3d29e82SPeter Zijlstra tsk->pasid_activated = 0; 1045a3d29e82SPeter Zijlstra #endif 1046a3d29e82SPeter Zijlstra 1047d46eb14bSShakeel Butt #ifdef CONFIG_MEMCG 1048d46eb14bSShakeel Butt tsk->active_memcg = NULL; 1049d46eb14bSShakeel Butt #endif 1050b041b525STony Luck 1051b041b525STony Luck #ifdef CONFIG_CPU_SUP_INTEL 1052b041b525STony Luck tsk->reported_split_lock = 0; 1053b041b525STony Luck #endif 1054b041b525STony Luck 1055af7f588dSMathieu Desnoyers #ifdef CONFIG_SCHED_MM_CID 1056af7f588dSMathieu Desnoyers tsk->mm_cid = -1; 1057af7f588dSMathieu Desnoyers tsk->mm_cid_active = 0; 1058af7f588dSMathieu Desnoyers #endif 10591da177e4SLinus Torvalds return tsk; 10601da177e4SLinus Torvalds 10611da177e4SLinus Torvalds free_stack: 10621a03d3f1SSebastian Andrzej Siewior exit_task_stack_account(tsk); 10631da177e4SLinus Torvalds free_thread_stack(tsk); 10641da177e4SLinus Torvalds free_tsk: 10651da177e4SLinus Torvalds free_task_struct(tsk); 10661da177e4SLinus Torvalds return NULL; 10671da177e4SLinus Torvalds } 10681da177e4SLinus Torvalds 10691da177e4SLinus Torvalds __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); 10701da177e4SLinus Torvalds 10714cb0e11bSHidehiro Kawai static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; 10724cb0e11bSHidehiro Kawai 10734cb0e11bSHidehiro Kawai static int __init coredump_filter_setup(char *s) 10744cb0e11bSHidehiro Kawai { 10754cb0e11bSHidehiro Kawai default_dump_filter = 10764cb0e11bSHidehiro Kawai (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & 10774cb0e11bSHidehiro Kawai MMF_DUMP_FILTER_MASK; 10784cb0e11bSHidehiro Kawai return 1; 10794cb0e11bSHidehiro Kawai } 10804cb0e11bSHidehiro Kawai 10814cb0e11bSHidehiro Kawai __setup("coredump_filter=", coredump_filter_setup); 10824cb0e11bSHidehiro Kawai 10831da177e4SLinus Torvalds #include <linux/init_task.h> 10841da177e4SLinus Torvalds 1085858f0993SAlexey Dobriyan static void mm_init_aio(struct mm_struct *mm) 1086858f0993SAlexey Dobriyan { 1087858f0993SAlexey Dobriyan #ifdef CONFIG_AIO 1088858f0993SAlexey Dobriyan spin_lock_init(&mm->ioctx_lock); 1089db446a08SBenjamin LaHaise mm->ioctx_table = NULL; 1090858f0993SAlexey Dobriyan #endif 1091858f0993SAlexey Dobriyan } 1092858f0993SAlexey Dobriyan 1093c3f3ce04SAndrea Arcangeli static __always_inline void mm_clear_owner(struct mm_struct *mm, 1094c3f3ce04SAndrea Arcangeli struct task_struct *p) 1095c3f3ce04SAndrea Arcangeli { 1096c3f3ce04SAndrea Arcangeli #ifdef CONFIG_MEMCG 1097c3f3ce04SAndrea Arcangeli if (mm->owner == p) 1098c3f3ce04SAndrea Arcangeli WRITE_ONCE(mm->owner, NULL); 1099c3f3ce04SAndrea Arcangeli #endif 1100c3f3ce04SAndrea Arcangeli } 1101c3f3ce04SAndrea Arcangeli 110233144e84SVladimir Davydov static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) 110333144e84SVladimir Davydov { 110433144e84SVladimir Davydov #ifdef CONFIG_MEMCG 110533144e84SVladimir Davydov mm->owner = p; 110633144e84SVladimir Davydov #endif 110733144e84SVladimir Davydov } 110833144e84SVladimir Davydov 1109355627f5SEric Biggers static void mm_init_uprobes_state(struct mm_struct *mm) 1110355627f5SEric Biggers { 1111355627f5SEric Biggers #ifdef CONFIG_UPROBES 1112355627f5SEric Biggers mm->uprobes_state.xol_area = NULL; 1113355627f5SEric Biggers #endif 1114355627f5SEric Biggers } 1115355627f5SEric Biggers 1116bfedb589SEric W. Biederman static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, 1117bfedb589SEric W. Biederman struct user_namespace *user_ns) 11181da177e4SLinus Torvalds { 1119f1a79412SShakeel Butt int i; 1120f1a79412SShakeel Butt 1121d4af56c5SLiam R. Howlett mt_init_flags(&mm->mm_mt, MM_MT_FLAGS); 1122d4af56c5SLiam R. Howlett mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock); 11231da177e4SLinus Torvalds atomic_set(&mm->mm_users, 1); 11241da177e4SLinus Torvalds atomic_set(&mm->mm_count, 1); 112557efa1feSJason Gunthorpe seqcount_init(&mm->write_protect_seq); 1126d8ed45c5SMichel Lespinasse mmap_init_lock(mm); 11271da177e4SLinus Torvalds INIT_LIST_HEAD(&mm->mmlist); 1128af5b0f6aSKirill A. Shutemov mm_pgtables_bytes_init(mm); 112941f727fdSVladimir Davydov mm->map_count = 0; 113041f727fdSVladimir Davydov mm->locked_vm = 0; 113170f8a3caSDavidlohr Bueso atomic64_set(&mm->pinned_vm, 0); 1132d559db08SKAMEZAWA Hiroyuki memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); 11331da177e4SLinus Torvalds spin_lock_init(&mm->page_table_lock); 113488aa7cc6SYang Shi spin_lock_init(&mm->arg_lock); 113541f727fdSVladimir Davydov mm_init_cpumask(mm); 1136858f0993SAlexey Dobriyan mm_init_aio(mm); 1137cf475ad2SBalbir Singh mm_init_owner(mm, p); 1138a6cbd440SFenghua Yu mm_pasid_init(mm); 11392b7e8665SEric Biggers RCU_INIT_POINTER(mm->exe_file, NULL); 1140984cfe4eSJason Gunthorpe mmu_notifier_subscriptions_init(mm); 114116af97dcSNadav Amit init_tlb_flush_pending(mm); 114241f727fdSVladimir Davydov #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 114341f727fdSVladimir Davydov mm->pmd_huge_pte = NULL; 114441f727fdSVladimir Davydov #endif 1145355627f5SEric Biggers mm_init_uprobes_state(mm); 114613db8c50SLiu Zixian hugetlb_count_init(mm); 11471da177e4SLinus Torvalds 1148a0715cc2SAlex Thorlton if (current->mm) { 1149a0715cc2SAlex Thorlton mm->flags = current->mm->flags & MMF_INIT_MASK; 1150a0715cc2SAlex Thorlton mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; 1151a0715cc2SAlex Thorlton } else { 1152a0715cc2SAlex Thorlton mm->flags = default_dump_filter; 11531da177e4SLinus Torvalds mm->def_flags = 0; 1154a0715cc2SAlex Thorlton } 1155a0715cc2SAlex Thorlton 115641f727fdSVladimir Davydov if (mm_alloc_pgd(mm)) 115741f727fdSVladimir Davydov goto fail_nopgd; 115878fb7466SPavel Emelianov 115941f727fdSVladimir Davydov if (init_new_context(p, mm)) 116041f727fdSVladimir Davydov goto fail_nocontext; 116141f727fdSVladimir Davydov 1162f1a79412SShakeel Butt for (i = 0; i < NR_MM_COUNTERS; i++) 1163f1a79412SShakeel Butt if (percpu_counter_init(&mm->rss_stat[i], 0, GFP_KERNEL_ACCOUNT)) 1164f1a79412SShakeel Butt goto fail_pcpu; 1165f1a79412SShakeel Butt 1166bfedb589SEric W. Biederman mm->user_ns = get_user_ns(user_ns); 1167bd74fdaeSYu Zhao lru_gen_init_mm(mm); 1168af7f588dSMathieu Desnoyers mm_init_cid(mm); 116941f727fdSVladimir Davydov return mm; 117041f727fdSVladimir Davydov 1171f1a79412SShakeel Butt fail_pcpu: 1172f1a79412SShakeel Butt while (i > 0) 1173f1a79412SShakeel Butt percpu_counter_destroy(&mm->rss_stat[--i]); 117441f727fdSVladimir Davydov fail_nocontext: 117541f727fdSVladimir Davydov mm_free_pgd(mm); 117641f727fdSVladimir Davydov fail_nopgd: 11771da177e4SLinus Torvalds free_mm(mm); 11781da177e4SLinus Torvalds return NULL; 11791da177e4SLinus Torvalds } 11801da177e4SLinus Torvalds 11811da177e4SLinus Torvalds /* 11821da177e4SLinus Torvalds * Allocate and initialize an mm_struct. 11831da177e4SLinus Torvalds */ 11841da177e4SLinus Torvalds struct mm_struct *mm_alloc(void) 11851da177e4SLinus Torvalds { 11861da177e4SLinus Torvalds struct mm_struct *mm; 11871da177e4SLinus Torvalds 11881da177e4SLinus Torvalds mm = allocate_mm(); 1189de03c72cSKOSAKI Motohiro if (!mm) 1190de03c72cSKOSAKI Motohiro return NULL; 1191de03c72cSKOSAKI Motohiro 11921da177e4SLinus Torvalds memset(mm, 0, sizeof(*mm)); 1193bfedb589SEric W. Biederman return mm_init(mm, current, current_user_ns()); 11941da177e4SLinus Torvalds } 11951da177e4SLinus Torvalds 1196ec8d7c14SMichal Hocko static inline void __mmput(struct mm_struct *mm) 11971da177e4SLinus Torvalds { 1198ec8d7c14SMichal Hocko VM_BUG_ON(atomic_read(&mm->mm_users)); 11990ae26f1bSAndrew Morton 1200d4b3b638SSrikar Dronamraju uprobe_clear_state(mm); 12011da177e4SLinus Torvalds exit_aio(mm); 12021c2fb7a4SAndrea Arcangeli ksm_exit(mm); 1203ba76149fSAndrea Arcangeli khugepaged_exit(mm); /* must run before exit_mmap */ 12041da177e4SLinus Torvalds exit_mmap(mm); 12056fcb52a5SAaron Lu mm_put_huge_zero_page(mm); 1206925d1c40SMatt Helsley set_mm_exe_file(mm, NULL); 12071da177e4SLinus Torvalds if (!list_empty(&mm->mmlist)) { 12081da177e4SLinus Torvalds spin_lock(&mmlist_lock); 12091da177e4SLinus Torvalds list_del(&mm->mmlist); 12101da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 12111da177e4SLinus Torvalds } 1212801460d0SHiroshi Shimamoto if (mm->binfmt) 1213801460d0SHiroshi Shimamoto module_put(mm->binfmt->module); 1214bd74fdaeSYu Zhao lru_gen_del_mm(mm); 12151da177e4SLinus Torvalds mmdrop(mm); 12161da177e4SLinus Torvalds } 1217ec8d7c14SMichal Hocko 1218ec8d7c14SMichal Hocko /* 1219ec8d7c14SMichal Hocko * Decrement the use count and release all resources for an mm. 1220ec8d7c14SMichal Hocko */ 1221ec8d7c14SMichal Hocko void mmput(struct mm_struct *mm) 1222ec8d7c14SMichal Hocko { 1223ec8d7c14SMichal Hocko might_sleep(); 1224ec8d7c14SMichal Hocko 1225ec8d7c14SMichal Hocko if (atomic_dec_and_test(&mm->mm_users)) 1226ec8d7c14SMichal Hocko __mmput(mm); 12271da177e4SLinus Torvalds } 12281da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(mmput); 12291da177e4SLinus Torvalds 1230a1b2289cSSherry Yang #ifdef CONFIG_MMU 1231a1b2289cSSherry Yang static void mmput_async_fn(struct work_struct *work) 1232a1b2289cSSherry Yang { 1233a1b2289cSSherry Yang struct mm_struct *mm = container_of(work, struct mm_struct, 1234a1b2289cSSherry Yang async_put_work); 1235a1b2289cSSherry Yang 1236a1b2289cSSherry Yang __mmput(mm); 1237a1b2289cSSherry Yang } 1238a1b2289cSSherry Yang 1239a1b2289cSSherry Yang void mmput_async(struct mm_struct *mm) 1240a1b2289cSSherry Yang { 1241a1b2289cSSherry Yang if (atomic_dec_and_test(&mm->mm_users)) { 1242a1b2289cSSherry Yang INIT_WORK(&mm->async_put_work, mmput_async_fn); 1243a1b2289cSSherry Yang schedule_work(&mm->async_put_work); 1244a1b2289cSSherry Yang } 1245a1b2289cSSherry Yang } 124685eaeb50SYishai Hadas EXPORT_SYMBOL_GPL(mmput_async); 1247a1b2289cSSherry Yang #endif 1248a1b2289cSSherry Yang 124990f31d0eSKonstantin Khlebnikov /** 125090f31d0eSKonstantin Khlebnikov * set_mm_exe_file - change a reference to the mm's executable file 125190f31d0eSKonstantin Khlebnikov * 125290f31d0eSKonstantin Khlebnikov * This changes mm's executable file (shown as symlink /proc/[pid]/exe). 125390f31d0eSKonstantin Khlebnikov * 12546e399cd1SDavidlohr Bueso * Main users are mmput() and sys_execve(). Callers prevent concurrent 12556e399cd1SDavidlohr Bueso * invocations: in mmput() nobody alive left, in execve task is single 125635d7bdc8SDavid Hildenbrand * threaded. 1257fe69d560SDavid Hildenbrand * 1258fe69d560SDavid Hildenbrand * Can only fail if new_exe_file != NULL. 125990f31d0eSKonstantin Khlebnikov */ 1260fe69d560SDavid Hildenbrand int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) 126138646013SJiri Slaby { 12626e399cd1SDavidlohr Bueso struct file *old_exe_file; 12636e399cd1SDavidlohr Bueso 12646e399cd1SDavidlohr Bueso /* 12656e399cd1SDavidlohr Bueso * It is safe to dereference the exe_file without RCU as 12666e399cd1SDavidlohr Bueso * this function is only called if nobody else can access 12676e399cd1SDavidlohr Bueso * this mm -- see comment above for justification. 12686e399cd1SDavidlohr Bueso */ 12696e399cd1SDavidlohr Bueso old_exe_file = rcu_dereference_raw(mm->exe_file); 127090f31d0eSKonstantin Khlebnikov 1271fe69d560SDavid Hildenbrand if (new_exe_file) { 1272fe69d560SDavid Hildenbrand /* 1273fe69d560SDavid Hildenbrand * We expect the caller (i.e., sys_execve) to already denied 1274fe69d560SDavid Hildenbrand * write access, so this is unlikely to fail. 1275fe69d560SDavid Hildenbrand */ 1276fe69d560SDavid Hildenbrand if (unlikely(deny_write_access(new_exe_file))) 1277fe69d560SDavid Hildenbrand return -EACCES; 127838646013SJiri Slaby get_file(new_exe_file); 1279fe69d560SDavid Hildenbrand } 128090f31d0eSKonstantin Khlebnikov rcu_assign_pointer(mm->exe_file, new_exe_file); 1281fe69d560SDavid Hildenbrand if (old_exe_file) { 1282fe69d560SDavid Hildenbrand allow_write_access(old_exe_file); 128390f31d0eSKonstantin Khlebnikov fput(old_exe_file); 128438646013SJiri Slaby } 1285fe69d560SDavid Hildenbrand return 0; 1286fe69d560SDavid Hildenbrand } 128738646013SJiri Slaby 128890f31d0eSKonstantin Khlebnikov /** 128935d7bdc8SDavid Hildenbrand * replace_mm_exe_file - replace a reference to the mm's executable file 129035d7bdc8SDavid Hildenbrand * 129135d7bdc8SDavid Hildenbrand * This changes mm's executable file (shown as symlink /proc/[pid]/exe), 129235d7bdc8SDavid Hildenbrand * dealing with concurrent invocation and without grabbing the mmap lock in 129335d7bdc8SDavid Hildenbrand * write mode. 129435d7bdc8SDavid Hildenbrand * 129535d7bdc8SDavid Hildenbrand * Main user is sys_prctl(PR_SET_MM_MAP/EXE_FILE). 129635d7bdc8SDavid Hildenbrand */ 129735d7bdc8SDavid Hildenbrand int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) 129835d7bdc8SDavid Hildenbrand { 129935d7bdc8SDavid Hildenbrand struct vm_area_struct *vma; 130035d7bdc8SDavid Hildenbrand struct file *old_exe_file; 130135d7bdc8SDavid Hildenbrand int ret = 0; 130235d7bdc8SDavid Hildenbrand 130335d7bdc8SDavid Hildenbrand /* Forbid mm->exe_file change if old file still mapped. */ 130435d7bdc8SDavid Hildenbrand old_exe_file = get_mm_exe_file(mm); 130535d7bdc8SDavid Hildenbrand if (old_exe_file) { 1306fa5e5876SMatthew Wilcox (Oracle) VMA_ITERATOR(vmi, mm, 0); 130735d7bdc8SDavid Hildenbrand mmap_read_lock(mm); 1308fa5e5876SMatthew Wilcox (Oracle) for_each_vma(vmi, vma) { 130935d7bdc8SDavid Hildenbrand if (!vma->vm_file) 131035d7bdc8SDavid Hildenbrand continue; 131135d7bdc8SDavid Hildenbrand if (path_equal(&vma->vm_file->f_path, 1312fa5e5876SMatthew Wilcox (Oracle) &old_exe_file->f_path)) { 131335d7bdc8SDavid Hildenbrand ret = -EBUSY; 1314fa5e5876SMatthew Wilcox (Oracle) break; 1315fa5e5876SMatthew Wilcox (Oracle) } 131635d7bdc8SDavid Hildenbrand } 131735d7bdc8SDavid Hildenbrand mmap_read_unlock(mm); 131835d7bdc8SDavid Hildenbrand fput(old_exe_file); 131935d7bdc8SDavid Hildenbrand if (ret) 132035d7bdc8SDavid Hildenbrand return ret; 132135d7bdc8SDavid Hildenbrand } 132235d7bdc8SDavid Hildenbrand 132335d7bdc8SDavid Hildenbrand /* set the new file, lockless */ 1324fe69d560SDavid Hildenbrand ret = deny_write_access(new_exe_file); 1325fe69d560SDavid Hildenbrand if (ret) 1326fe69d560SDavid Hildenbrand return -EACCES; 132735d7bdc8SDavid Hildenbrand get_file(new_exe_file); 1328fe69d560SDavid Hildenbrand 132935d7bdc8SDavid Hildenbrand old_exe_file = xchg(&mm->exe_file, new_exe_file); 1330fe69d560SDavid Hildenbrand if (old_exe_file) { 1331fe69d560SDavid Hildenbrand /* 1332fe69d560SDavid Hildenbrand * Don't race with dup_mmap() getting the file and disallowing 1333fe69d560SDavid Hildenbrand * write access while someone might open the file writable. 1334fe69d560SDavid Hildenbrand */ 1335fe69d560SDavid Hildenbrand mmap_read_lock(mm); 1336fe69d560SDavid Hildenbrand allow_write_access(old_exe_file); 133735d7bdc8SDavid Hildenbrand fput(old_exe_file); 1338fe69d560SDavid Hildenbrand mmap_read_unlock(mm); 1339fe69d560SDavid Hildenbrand } 134035d7bdc8SDavid Hildenbrand return 0; 134135d7bdc8SDavid Hildenbrand } 134238646013SJiri Slaby 134390f31d0eSKonstantin Khlebnikov /** 134490f31d0eSKonstantin Khlebnikov * get_mm_exe_file - acquire a reference to the mm's executable file 134590f31d0eSKonstantin Khlebnikov * 134690f31d0eSKonstantin Khlebnikov * Returns %NULL if mm has no associated executable file. 134790f31d0eSKonstantin Khlebnikov * User must release file via fput(). 134890f31d0eSKonstantin Khlebnikov */ 134938646013SJiri Slaby struct file *get_mm_exe_file(struct mm_struct *mm) 135038646013SJiri Slaby { 135138646013SJiri Slaby struct file *exe_file; 135238646013SJiri Slaby 135390f31d0eSKonstantin Khlebnikov rcu_read_lock(); 135490f31d0eSKonstantin Khlebnikov exe_file = rcu_dereference(mm->exe_file); 135590f31d0eSKonstantin Khlebnikov if (exe_file && !get_file_rcu(exe_file)) 135690f31d0eSKonstantin Khlebnikov exe_file = NULL; 135790f31d0eSKonstantin Khlebnikov rcu_read_unlock(); 135838646013SJiri Slaby return exe_file; 135938646013SJiri Slaby } 136038646013SJiri Slaby 13611da177e4SLinus Torvalds /** 1362cd81a917SMateusz Guzik * get_task_exe_file - acquire a reference to the task's executable file 1363cd81a917SMateusz Guzik * 1364cd81a917SMateusz Guzik * Returns %NULL if task's mm (if any) has no associated executable file or 1365cd81a917SMateusz Guzik * this is a kernel thread with borrowed mm (see the comment above get_task_mm). 1366cd81a917SMateusz Guzik * User must release file via fput(). 1367cd81a917SMateusz Guzik */ 1368cd81a917SMateusz Guzik struct file *get_task_exe_file(struct task_struct *task) 1369cd81a917SMateusz Guzik { 1370cd81a917SMateusz Guzik struct file *exe_file = NULL; 1371cd81a917SMateusz Guzik struct mm_struct *mm; 1372cd81a917SMateusz Guzik 1373cd81a917SMateusz Guzik task_lock(task); 1374cd81a917SMateusz Guzik mm = task->mm; 1375cd81a917SMateusz Guzik if (mm) { 1376cd81a917SMateusz Guzik if (!(task->flags & PF_KTHREAD)) 1377cd81a917SMateusz Guzik exe_file = get_mm_exe_file(mm); 1378cd81a917SMateusz Guzik } 1379cd81a917SMateusz Guzik task_unlock(task); 1380cd81a917SMateusz Guzik return exe_file; 1381cd81a917SMateusz Guzik } 1382cd81a917SMateusz Guzik 1383cd81a917SMateusz Guzik /** 13841da177e4SLinus Torvalds * get_task_mm - acquire a reference to the task's mm 13851da177e4SLinus Torvalds * 1386246bb0b1SOleg Nesterov * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning 13871da177e4SLinus Torvalds * this kernel workthread has transiently adopted a user mm with use_mm, 13881da177e4SLinus Torvalds * to do its AIO) is not set and if so returns a reference to it, after 13891da177e4SLinus Torvalds * bumping up the use count. User must release the mm via mmput() 13901da177e4SLinus Torvalds * after use. Typically used by /proc and ptrace. 13911da177e4SLinus Torvalds */ 13921da177e4SLinus Torvalds struct mm_struct *get_task_mm(struct task_struct *task) 13931da177e4SLinus Torvalds { 13941da177e4SLinus Torvalds struct mm_struct *mm; 13951da177e4SLinus Torvalds 13961da177e4SLinus Torvalds task_lock(task); 13971da177e4SLinus Torvalds mm = task->mm; 13981da177e4SLinus Torvalds if (mm) { 1399246bb0b1SOleg Nesterov if (task->flags & PF_KTHREAD) 14001da177e4SLinus Torvalds mm = NULL; 14011da177e4SLinus Torvalds else 14023fce371bSVegard Nossum mmget(mm); 14031da177e4SLinus Torvalds } 14041da177e4SLinus Torvalds task_unlock(task); 14051da177e4SLinus Torvalds return mm; 14061da177e4SLinus Torvalds } 14071da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(get_task_mm); 14081da177e4SLinus Torvalds 14098cdb878dSChristopher Yeoh struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) 14108cdb878dSChristopher Yeoh { 14118cdb878dSChristopher Yeoh struct mm_struct *mm; 14128cdb878dSChristopher Yeoh int err; 14138cdb878dSChristopher Yeoh 1414f7cfd871SEric W. Biederman err = down_read_killable(&task->signal->exec_update_lock); 14158cdb878dSChristopher Yeoh if (err) 14168cdb878dSChristopher Yeoh return ERR_PTR(err); 14178cdb878dSChristopher Yeoh 14188cdb878dSChristopher Yeoh mm = get_task_mm(task); 14198cdb878dSChristopher Yeoh if (mm && mm != current->mm && 14208cdb878dSChristopher Yeoh !ptrace_may_access(task, mode)) { 14218cdb878dSChristopher Yeoh mmput(mm); 14228cdb878dSChristopher Yeoh mm = ERR_PTR(-EACCES); 14238cdb878dSChristopher Yeoh } 1424f7cfd871SEric W. Biederman up_read(&task->signal->exec_update_lock); 14258cdb878dSChristopher Yeoh 14268cdb878dSChristopher Yeoh return mm; 14278cdb878dSChristopher Yeoh } 14288cdb878dSChristopher Yeoh 142957b59c4aSOleg Nesterov static void complete_vfork_done(struct task_struct *tsk) 1430c415c3b4SOleg Nesterov { 1431d68b46feSOleg Nesterov struct completion *vfork; 1432c415c3b4SOleg Nesterov 1433d68b46feSOleg Nesterov task_lock(tsk); 1434d68b46feSOleg Nesterov vfork = tsk->vfork_done; 1435d68b46feSOleg Nesterov if (likely(vfork)) { 1436c415c3b4SOleg Nesterov tsk->vfork_done = NULL; 1437d68b46feSOleg Nesterov complete(vfork); 1438d68b46feSOleg Nesterov } 1439d68b46feSOleg Nesterov task_unlock(tsk); 1440d68b46feSOleg Nesterov } 1441d68b46feSOleg Nesterov 1442d68b46feSOleg Nesterov static int wait_for_vfork_done(struct task_struct *child, 1443d68b46feSOleg Nesterov struct completion *vfork) 1444d68b46feSOleg Nesterov { 1445f5d39b02SPeter Zijlstra unsigned int state = TASK_UNINTERRUPTIBLE|TASK_KILLABLE|TASK_FREEZABLE; 1446d68b46feSOleg Nesterov int killed; 1447d68b46feSOleg Nesterov 144876f969e8SRoman Gushchin cgroup_enter_frozen(); 1449f5d39b02SPeter Zijlstra killed = wait_for_completion_state(vfork, state); 145076f969e8SRoman Gushchin cgroup_leave_frozen(false); 1451d68b46feSOleg Nesterov 1452d68b46feSOleg Nesterov if (killed) { 1453d68b46feSOleg Nesterov task_lock(child); 1454d68b46feSOleg Nesterov child->vfork_done = NULL; 1455d68b46feSOleg Nesterov task_unlock(child); 1456d68b46feSOleg Nesterov } 1457d68b46feSOleg Nesterov 1458d68b46feSOleg Nesterov put_task_struct(child); 1459d68b46feSOleg Nesterov return killed; 1460c415c3b4SOleg Nesterov } 1461c415c3b4SOleg Nesterov 14621da177e4SLinus Torvalds /* Please note the differences between mmput and mm_release. 14631da177e4SLinus Torvalds * mmput is called whenever we stop holding onto a mm_struct, 14641da177e4SLinus Torvalds * error success whatever. 14651da177e4SLinus Torvalds * 14661da177e4SLinus Torvalds * mm_release is called after a mm_struct has been removed 14671da177e4SLinus Torvalds * from the current process. 14681da177e4SLinus Torvalds * 14691da177e4SLinus Torvalds * This difference is important for error handling, when we 14701da177e4SLinus Torvalds * only half set up a mm_struct for a new process and need to restore 14711da177e4SLinus Torvalds * the old one. Because we mmput the new mm_struct before 14721da177e4SLinus Torvalds * restoring the old one. . . 14731da177e4SLinus Torvalds * Eric Biederman 10 January 1998 14741da177e4SLinus Torvalds */ 14754610ba7aSThomas Gleixner static void mm_release(struct task_struct *tsk, struct mm_struct *mm) 14761da177e4SLinus Torvalds { 14770326f5a9SSrikar Dronamraju uprobe_free_utask(tsk); 14780326f5a9SSrikar Dronamraju 14791da177e4SLinus Torvalds /* Get rid of any cached register state */ 14801da177e4SLinus Torvalds deactivate_mm(tsk, mm); 14811da177e4SLinus Torvalds 1482fec1d011SRoland McGrath /* 1483735f2770SMichal Hocko * Signal userspace if we're not exiting with a core dump 1484735f2770SMichal Hocko * because we want to leave the value intact for debugging 1485735f2770SMichal Hocko * purposes. 1486fec1d011SRoland McGrath */ 14879c8a8228SEric Dumazet if (tsk->clear_child_tid) { 148892307383SEric W. Biederman if (atomic_read(&mm->mm_users) > 1) { 14891da177e4SLinus Torvalds /* 14901da177e4SLinus Torvalds * We don't check the error code - if userspace has 14911da177e4SLinus Torvalds * not set up a proper pointer then tough luck. 14921da177e4SLinus Torvalds */ 14939c8a8228SEric Dumazet put_user(0, tsk->clear_child_tid); 14942de0db99SDominik Brodowski do_futex(tsk->clear_child_tid, FUTEX_WAKE, 14952de0db99SDominik Brodowski 1, NULL, NULL, 0, 0); 14969c8a8228SEric Dumazet } 14979c8a8228SEric Dumazet tsk->clear_child_tid = NULL; 14981da177e4SLinus Torvalds } 1499f7505d64SKonstantin Khlebnikov 1500f7505d64SKonstantin Khlebnikov /* 1501f7505d64SKonstantin Khlebnikov * All done, finally we can wake up parent and return this mm to him. 1502f7505d64SKonstantin Khlebnikov * Also kthread_stop() uses this completion for synchronization. 1503f7505d64SKonstantin Khlebnikov */ 1504f7505d64SKonstantin Khlebnikov if (tsk->vfork_done) 1505f7505d64SKonstantin Khlebnikov complete_vfork_done(tsk); 15061da177e4SLinus Torvalds } 15071da177e4SLinus Torvalds 15084610ba7aSThomas Gleixner void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm) 15094610ba7aSThomas Gleixner { 1510150d7158SThomas Gleixner futex_exit_release(tsk); 15114610ba7aSThomas Gleixner mm_release(tsk, mm); 15124610ba7aSThomas Gleixner } 15134610ba7aSThomas Gleixner 15144610ba7aSThomas Gleixner void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm) 15154610ba7aSThomas Gleixner { 1516150d7158SThomas Gleixner futex_exec_release(tsk); 15174610ba7aSThomas Gleixner mm_release(tsk, mm); 15184610ba7aSThomas Gleixner } 15194610ba7aSThomas Gleixner 152013585fa0SNadav Amit /** 152113585fa0SNadav Amit * dup_mm() - duplicates an existing mm structure 152213585fa0SNadav Amit * @tsk: the task_struct with which the new mm will be associated. 152313585fa0SNadav Amit * @oldmm: the mm to duplicate. 152413585fa0SNadav Amit * 152513585fa0SNadav Amit * Allocates a new mm structure and duplicates the provided @oldmm structure 152613585fa0SNadav Amit * content into it. 152713585fa0SNadav Amit * 152813585fa0SNadav Amit * Return: the duplicated mm or NULL on failure. 1529a0a7ec30SJANAK DESAI */ 153013585fa0SNadav Amit static struct mm_struct *dup_mm(struct task_struct *tsk, 153113585fa0SNadav Amit struct mm_struct *oldmm) 1532a0a7ec30SJANAK DESAI { 153313585fa0SNadav Amit struct mm_struct *mm; 1534a0a7ec30SJANAK DESAI int err; 1535a0a7ec30SJANAK DESAI 1536a0a7ec30SJANAK DESAI mm = allocate_mm(); 1537a0a7ec30SJANAK DESAI if (!mm) 1538a0a7ec30SJANAK DESAI goto fail_nomem; 1539a0a7ec30SJANAK DESAI 1540a0a7ec30SJANAK DESAI memcpy(mm, oldmm, sizeof(*mm)); 1541a0a7ec30SJANAK DESAI 1542bfedb589SEric W. Biederman if (!mm_init(mm, tsk, mm->user_ns)) 1543a0a7ec30SJANAK DESAI goto fail_nomem; 1544a0a7ec30SJANAK DESAI 1545a0a7ec30SJANAK DESAI err = dup_mmap(mm, oldmm); 1546a0a7ec30SJANAK DESAI if (err) 1547a0a7ec30SJANAK DESAI goto free_pt; 1548a0a7ec30SJANAK DESAI 1549a0a7ec30SJANAK DESAI mm->hiwater_rss = get_mm_rss(mm); 1550a0a7ec30SJANAK DESAI mm->hiwater_vm = mm->total_vm; 1551a0a7ec30SJANAK DESAI 1552801460d0SHiroshi Shimamoto if (mm->binfmt && !try_module_get(mm->binfmt->module)) 1553801460d0SHiroshi Shimamoto goto free_pt; 1554801460d0SHiroshi Shimamoto 1555a0a7ec30SJANAK DESAI return mm; 1556a0a7ec30SJANAK DESAI 1557a0a7ec30SJANAK DESAI free_pt: 1558801460d0SHiroshi Shimamoto /* don't put binfmt in mmput, we haven't got module yet */ 1559801460d0SHiroshi Shimamoto mm->binfmt = NULL; 1560c3f3ce04SAndrea Arcangeli mm_init_owner(mm, NULL); 1561a0a7ec30SJANAK DESAI mmput(mm); 1562a0a7ec30SJANAK DESAI 1563a0a7ec30SJANAK DESAI fail_nomem: 1564a0a7ec30SJANAK DESAI return NULL; 1565a0a7ec30SJANAK DESAI } 1566a0a7ec30SJANAK DESAI 15671da177e4SLinus Torvalds static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) 15681da177e4SLinus Torvalds { 15691da177e4SLinus Torvalds struct mm_struct *mm, *oldmm; 15701da177e4SLinus Torvalds 15711da177e4SLinus Torvalds tsk->min_flt = tsk->maj_flt = 0; 15721da177e4SLinus Torvalds tsk->nvcsw = tsk->nivcsw = 0; 157317406b82SMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK 157417406b82SMandeep Singh Baines tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; 1575a2e51445SDmitry Vyukov tsk->last_switch_time = 0; 157617406b82SMandeep Singh Baines #endif 15771da177e4SLinus Torvalds 15781da177e4SLinus Torvalds tsk->mm = NULL; 15791da177e4SLinus Torvalds tsk->active_mm = NULL; 15801da177e4SLinus Torvalds 15811da177e4SLinus Torvalds /* 15821da177e4SLinus Torvalds * Are we cloning a kernel thread? 15831da177e4SLinus Torvalds * 15841da177e4SLinus Torvalds * We need to steal a active VM for that.. 15851da177e4SLinus Torvalds */ 15861da177e4SLinus Torvalds oldmm = current->mm; 15871da177e4SLinus Torvalds if (!oldmm) 15881da177e4SLinus Torvalds return 0; 15891da177e4SLinus Torvalds 15901da177e4SLinus Torvalds if (clone_flags & CLONE_VM) { 15913fce371bSVegard Nossum mmget(oldmm); 15921da177e4SLinus Torvalds mm = oldmm; 1593a6895399SRolf Eike Beer } else { 159413585fa0SNadav Amit mm = dup_mm(tsk, current->mm); 15951da177e4SLinus Torvalds if (!mm) 1596a6895399SRolf Eike Beer return -ENOMEM; 1597a6895399SRolf Eike Beer } 15981da177e4SLinus Torvalds 15991da177e4SLinus Torvalds tsk->mm = mm; 16001da177e4SLinus Torvalds tsk->active_mm = mm; 1601af7f588dSMathieu Desnoyers sched_mm_cid_fork(tsk); 16021da177e4SLinus Torvalds return 0; 16031da177e4SLinus Torvalds } 16041da177e4SLinus Torvalds 1605a39bc516SAlexey Dobriyan static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) 16061da177e4SLinus Torvalds { 1607498052bbSAl Viro struct fs_struct *fs = current->fs; 16081da177e4SLinus Torvalds if (clone_flags & CLONE_FS) { 1609498052bbSAl Viro /* tsk->fs is already what we want */ 16102a4419b5SNick Piggin spin_lock(&fs->lock); 1611498052bbSAl Viro if (fs->in_exec) { 16122a4419b5SNick Piggin spin_unlock(&fs->lock); 1613498052bbSAl Viro return -EAGAIN; 1614498052bbSAl Viro } 1615498052bbSAl Viro fs->users++; 16162a4419b5SNick Piggin spin_unlock(&fs->lock); 16171da177e4SLinus Torvalds return 0; 16181da177e4SLinus Torvalds } 1619498052bbSAl Viro tsk->fs = copy_fs_struct(fs); 16201da177e4SLinus Torvalds if (!tsk->fs) 16211da177e4SLinus Torvalds return -ENOMEM; 16221da177e4SLinus Torvalds return 0; 16231da177e4SLinus Torvalds } 16241da177e4SLinus Torvalds 1625a016f338SJANAK DESAI static int copy_files(unsigned long clone_flags, struct task_struct *tsk) 1626a016f338SJANAK DESAI { 1627a016f338SJANAK DESAI struct files_struct *oldf, *newf; 1628a016f338SJANAK DESAI int error = 0; 1629a016f338SJANAK DESAI 1630a016f338SJANAK DESAI /* 1631a016f338SJANAK DESAI * A background process may not have any files ... 1632a016f338SJANAK DESAI */ 1633a016f338SJANAK DESAI oldf = current->files; 1634a016f338SJANAK DESAI if (!oldf) 1635a016f338SJANAK DESAI goto out; 1636a016f338SJANAK DESAI 1637a016f338SJANAK DESAI if (clone_flags & CLONE_FILES) { 1638a016f338SJANAK DESAI atomic_inc(&oldf->count); 1639a016f338SJANAK DESAI goto out; 1640a016f338SJANAK DESAI } 1641a016f338SJANAK DESAI 164260997c3dSChristian Brauner newf = dup_fd(oldf, NR_OPEN_MAX, &error); 1643a016f338SJANAK DESAI if (!newf) 1644a016f338SJANAK DESAI goto out; 1645a016f338SJANAK DESAI 1646a016f338SJANAK DESAI tsk->files = newf; 1647a016f338SJANAK DESAI error = 0; 1648a016f338SJANAK DESAI out: 1649a016f338SJANAK DESAI return error; 1650a016f338SJANAK DESAI } 1651a016f338SJANAK DESAI 1652a39bc516SAlexey Dobriyan static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) 16531da177e4SLinus Torvalds { 16541da177e4SLinus Torvalds struct sighand_struct *sig; 16551da177e4SLinus Torvalds 165660348802SZhaolei if (clone_flags & CLONE_SIGHAND) { 1657d036bda7SElena Reshetova refcount_inc(¤t->sighand->count); 16581da177e4SLinus Torvalds return 0; 16591da177e4SLinus Torvalds } 16601da177e4SLinus Torvalds sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); 16610c282b06SMadhuparna Bhowmik RCU_INIT_POINTER(tsk->sighand, sig); 16621da177e4SLinus Torvalds if (!sig) 16631da177e4SLinus Torvalds return -ENOMEM; 16649d7fb042SPeter Zijlstra 1665d036bda7SElena Reshetova refcount_set(&sig->count, 1); 166606e62a46SJann Horn spin_lock_irq(¤t->sighand->siglock); 16671da177e4SLinus Torvalds memcpy(sig->action, current->sighand->action, sizeof(sig->action)); 166806e62a46SJann Horn spin_unlock_irq(¤t->sighand->siglock); 1669b612e5dfSChristian Brauner 1670b612e5dfSChristian Brauner /* Reset all signal handler not set to SIG_IGN to SIG_DFL. */ 1671b612e5dfSChristian Brauner if (clone_flags & CLONE_CLEAR_SIGHAND) 1672b612e5dfSChristian Brauner flush_signal_handlers(tsk, 0); 1673b612e5dfSChristian Brauner 16741da177e4SLinus Torvalds return 0; 16751da177e4SLinus Torvalds } 16761da177e4SLinus Torvalds 1677a7e5328aSOleg Nesterov void __cleanup_sighand(struct sighand_struct *sighand) 1678c81addc9SOleg Nesterov { 1679d036bda7SElena Reshetova if (refcount_dec_and_test(&sighand->count)) { 1680d80e731eSOleg Nesterov signalfd_cleanup(sighand); 1681392809b2SOleg Nesterov /* 16825f0d5a3aSPaul E. McKenney * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it 1683392809b2SOleg Nesterov * without an RCU grace period, see __lock_task_sighand(). 1684392809b2SOleg Nesterov */ 1685c81addc9SOleg Nesterov kmem_cache_free(sighand_cachep, sighand); 1686c81addc9SOleg Nesterov } 1687d80e731eSOleg Nesterov } 1688c81addc9SOleg Nesterov 1689f06febc9SFrank Mayhar /* 1690f06febc9SFrank Mayhar * Initialize POSIX timer handling for a thread group. 1691f06febc9SFrank Mayhar */ 1692f06febc9SFrank Mayhar static void posix_cpu_timers_init_group(struct signal_struct *sig) 1693f06febc9SFrank Mayhar { 16942b69942fSThomas Gleixner struct posix_cputimers *pct = &sig->posix_cputimers; 169578d7d407SJiri Slaby unsigned long cpu_limit; 169678d7d407SJiri Slaby 1697316c1608SJason Low cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); 16983a245c0fSThomas Gleixner posix_cputimers_group_init(pct, cpu_limit); 16996279a751SOleg Nesterov } 17006279a751SOleg Nesterov 1701a39bc516SAlexey Dobriyan static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) 17021da177e4SLinus Torvalds { 17031da177e4SLinus Torvalds struct signal_struct *sig; 17041da177e4SLinus Torvalds 17054ab6c083SOleg Nesterov if (clone_flags & CLONE_THREAD) 1706490dea45SPeter Zijlstra return 0; 17076279a751SOleg Nesterov 1708a56704efSVeaceslav Falico sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL); 17091da177e4SLinus Torvalds tsk->signal = sig; 17101da177e4SLinus Torvalds if (!sig) 17111da177e4SLinus Torvalds return -ENOMEM; 17121da177e4SLinus Torvalds 1713b3ac022cSOleg Nesterov sig->nr_threads = 1; 1714d80f7d7bSEric W. Biederman sig->quick_threads = 1; 17151da177e4SLinus Torvalds atomic_set(&sig->live, 1); 171660d4de3fSElena Reshetova refcount_set(&sig->sigcnt, 1); 17170c740d0aSOleg Nesterov 17180c740d0aSOleg Nesterov /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */ 17190c740d0aSOleg Nesterov sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node); 17200c740d0aSOleg Nesterov tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head); 17210c740d0aSOleg Nesterov 17221da177e4SLinus Torvalds init_waitqueue_head(&sig->wait_chldexit); 1723db51aeccSOleg Nesterov sig->curr_target = tsk; 17241da177e4SLinus Torvalds init_sigpending(&sig->shared_pending); 1725c3ad2c3bSEric W. Biederman INIT_HLIST_HEAD(&sig->multiprocess); 1726e78c3496SRik van Riel seqlock_init(&sig->stats_lock); 17279d7fb042SPeter Zijlstra prev_cputime_init(&sig->prev_cputime); 17281da177e4SLinus Torvalds 1729baa73d9eSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS 1730b18b6a9cSNicolas Pitre INIT_LIST_HEAD(&sig->posix_timers); 1731c9cb2e3dSThomas Gleixner hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 17321da177e4SLinus Torvalds sig->real_timer.function = it_real_fn; 1733baa73d9eSNicolas Pitre #endif 17341da177e4SLinus Torvalds 17351da177e4SLinus Torvalds task_lock(current->group_leader); 17361da177e4SLinus Torvalds memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 17371da177e4SLinus Torvalds task_unlock(current->group_leader); 17381da177e4SLinus Torvalds 17396279a751SOleg Nesterov posix_cpu_timers_init_group(sig); 17406279a751SOleg Nesterov 1741522ed776SMiloslav Trmac tty_audit_fork(sig); 17425091faa4SMike Galbraith sched_autogroup_fork(sig); 1743522ed776SMiloslav Trmac 1744a63d83f4SDavid Rientjes sig->oom_score_adj = current->signal->oom_score_adj; 1745dabb16f6SMandeep Singh Baines sig->oom_score_adj_min = current->signal->oom_score_adj_min; 174628b83c51SKOSAKI Motohiro 17479b1bf12dSKOSAKI Motohiro mutex_init(&sig->cred_guard_mutex); 1748f7cfd871SEric W. Biederman init_rwsem(&sig->exec_update_lock); 17499b1bf12dSKOSAKI Motohiro 17501da177e4SLinus Torvalds return 0; 17511da177e4SLinus Torvalds } 17521da177e4SLinus Torvalds 1753dbd95212SKees Cook static void copy_seccomp(struct task_struct *p) 1754dbd95212SKees Cook { 1755dbd95212SKees Cook #ifdef CONFIG_SECCOMP 1756dbd95212SKees Cook /* 1757dbd95212SKees Cook * Must be called with sighand->lock held, which is common to 1758dbd95212SKees Cook * all threads in the group. Holding cred_guard_mutex is not 1759dbd95212SKees Cook * needed because this new task is not yet running and cannot 1760dbd95212SKees Cook * be racing exec. 1761dbd95212SKees Cook */ 176269f6a34bSGuenter Roeck assert_spin_locked(¤t->sighand->siglock); 1763dbd95212SKees Cook 1764dbd95212SKees Cook /* Ref-count the new filter user, and assign it. */ 1765dbd95212SKees Cook get_seccomp_filter(current); 1766dbd95212SKees Cook p->seccomp = current->seccomp; 1767dbd95212SKees Cook 1768dbd95212SKees Cook /* 1769dbd95212SKees Cook * Explicitly enable no_new_privs here in case it got set 1770dbd95212SKees Cook * between the task_struct being duplicated and holding the 1771dbd95212SKees Cook * sighand lock. The seccomp state and nnp must be in sync. 1772dbd95212SKees Cook */ 1773dbd95212SKees Cook if (task_no_new_privs(current)) 1774dbd95212SKees Cook task_set_no_new_privs(p); 1775dbd95212SKees Cook 1776dbd95212SKees Cook /* 1777dbd95212SKees Cook * If the parent gained a seccomp mode after copying thread 1778dbd95212SKees Cook * flags and between before we held the sighand lock, we have 1779dbd95212SKees Cook * to manually enable the seccomp thread flag here. 1780dbd95212SKees Cook */ 1781dbd95212SKees Cook if (p->seccomp.mode != SECCOMP_MODE_DISABLED) 178223d67a54SGabriel Krisman Bertazi set_task_syscall_work(p, SECCOMP); 1783dbd95212SKees Cook #endif 1784dbd95212SKees Cook } 1785dbd95212SKees Cook 178617da2bd9SHeiko Carstens SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) 17871da177e4SLinus Torvalds { 17881da177e4SLinus Torvalds current->clear_child_tid = tidptr; 17891da177e4SLinus Torvalds 1790b488893aSPavel Emelyanov return task_pid_vnr(current); 17911da177e4SLinus Torvalds } 17921da177e4SLinus Torvalds 1793a39bc516SAlexey Dobriyan static void rt_mutex_init_task(struct task_struct *p) 179423f78d4aSIngo Molnar { 17951d615482SThomas Gleixner raw_spin_lock_init(&p->pi_lock); 1796e29e175bSZilvinas Valinskas #ifdef CONFIG_RT_MUTEXES 1797a23ba907SDavidlohr Bueso p->pi_waiters = RB_ROOT_CACHED; 1798e96a7705SXunlei Pang p->pi_top_task = NULL; 179923f78d4aSIngo Molnar p->pi_blocked_on = NULL; 180023f78d4aSIngo Molnar #endif 180123f78d4aSIngo Molnar } 180223f78d4aSIngo Molnar 18032c470475SEric W. Biederman static inline void init_task_pid_links(struct task_struct *task) 18042c470475SEric W. Biederman { 18052c470475SEric W. Biederman enum pid_type type; 18062c470475SEric W. Biederman 180796e1e984SAlexander Guril for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) 18082c470475SEric W. Biederman INIT_HLIST_NODE(&task->pid_links[type]); 18092c470475SEric W. Biederman } 18102c470475SEric W. Biederman 181181907739SOleg Nesterov static inline void 181281907739SOleg Nesterov init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) 181381907739SOleg Nesterov { 18142c470475SEric W. Biederman if (type == PIDTYPE_PID) 18152c470475SEric W. Biederman task->thread_pid = pid; 18162c470475SEric W. Biederman else 18172c470475SEric W. Biederman task->signal->pids[type] = pid; 181881907739SOleg Nesterov } 181981907739SOleg Nesterov 18206bfbaa51SIngo Molnar static inline void rcu_copy_process(struct task_struct *p) 18216bfbaa51SIngo Molnar { 18226bfbaa51SIngo Molnar #ifdef CONFIG_PREEMPT_RCU 18236bfbaa51SIngo Molnar p->rcu_read_lock_nesting = 0; 18246bfbaa51SIngo Molnar p->rcu_read_unlock_special.s = 0; 18256bfbaa51SIngo Molnar p->rcu_blocked_node = NULL; 18266bfbaa51SIngo Molnar INIT_LIST_HEAD(&p->rcu_node_entry); 18276bfbaa51SIngo Molnar #endif /* #ifdef CONFIG_PREEMPT_RCU */ 18286bfbaa51SIngo Molnar #ifdef CONFIG_TASKS_RCU 18296bfbaa51SIngo Molnar p->rcu_tasks_holdout = false; 18306bfbaa51SIngo Molnar INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); 18316bfbaa51SIngo Molnar p->rcu_tasks_idle_cpu = -1; 18326bfbaa51SIngo Molnar #endif /* #ifdef CONFIG_TASKS_RCU */ 1833d5f177d3SPaul E. McKenney #ifdef CONFIG_TASKS_TRACE_RCU 1834d5f177d3SPaul E. McKenney p->trc_reader_nesting = 0; 1835276c4104SPaul E. McKenney p->trc_reader_special.s = 0; 1836d5f177d3SPaul E. McKenney INIT_LIST_HEAD(&p->trc_holdout_list); 1837434c9eefSPaul E. McKenney INIT_LIST_HEAD(&p->trc_blkd_node); 1838d5f177d3SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ 18396bfbaa51SIngo Molnar } 18406bfbaa51SIngo Molnar 18413695eae5SChristian Brauner struct pid *pidfd_pid(const struct file *file) 18423695eae5SChristian Brauner { 18433695eae5SChristian Brauner if (file->f_op == &pidfd_fops) 18443695eae5SChristian Brauner return file->private_data; 18453695eae5SChristian Brauner 18463695eae5SChristian Brauner return ERR_PTR(-EBADF); 18473695eae5SChristian Brauner } 18483695eae5SChristian Brauner 1849b3e58382SChristian Brauner static int pidfd_release(struct inode *inode, struct file *file) 1850b3e58382SChristian Brauner { 1851b3e58382SChristian Brauner struct pid *pid = file->private_data; 1852b3e58382SChristian Brauner 1853b3e58382SChristian Brauner file->private_data = NULL; 1854b3e58382SChristian Brauner put_pid(pid); 1855b3e58382SChristian Brauner return 0; 1856b3e58382SChristian Brauner } 1857b3e58382SChristian Brauner 1858b3e58382SChristian Brauner #ifdef CONFIG_PROC_FS 185915d42eb2SChristian Kellner /** 186015d42eb2SChristian Kellner * pidfd_show_fdinfo - print information about a pidfd 186115d42eb2SChristian Kellner * @m: proc fdinfo file 186215d42eb2SChristian Kellner * @f: file referencing a pidfd 186315d42eb2SChristian Kellner * 186415d42eb2SChristian Kellner * Pid: 186515d42eb2SChristian Kellner * This function will print the pid that a given pidfd refers to in the 186615d42eb2SChristian Kellner * pid namespace of the procfs instance. 186715d42eb2SChristian Kellner * If the pid namespace of the process is not a descendant of the pid 186815d42eb2SChristian Kellner * namespace of the procfs instance 0 will be shown as its pid. This is 186915d42eb2SChristian Kellner * similar to calling getppid() on a process whose parent is outside of 187015d42eb2SChristian Kellner * its pid namespace. 187115d42eb2SChristian Kellner * 187215d42eb2SChristian Kellner * NSpid: 187315d42eb2SChristian Kellner * If pid namespaces are supported then this function will also print 187415d42eb2SChristian Kellner * the pid of a given pidfd refers to for all descendant pid namespaces 187515d42eb2SChristian Kellner * starting from the current pid namespace of the instance, i.e. the 187615d42eb2SChristian Kellner * Pid field and the first entry in the NSpid field will be identical. 187715d42eb2SChristian Kellner * If the pid namespace of the process is not a descendant of the pid 187815d42eb2SChristian Kellner * namespace of the procfs instance 0 will be shown as its first NSpid 187915d42eb2SChristian Kellner * entry and no others will be shown. 188015d42eb2SChristian Kellner * Note that this differs from the Pid and NSpid fields in 188115d42eb2SChristian Kellner * /proc/<pid>/status where Pid and NSpid are always shown relative to 188215d42eb2SChristian Kellner * the pid namespace of the procfs instance. The difference becomes 188315d42eb2SChristian Kellner * obvious when sending around a pidfd between pid namespaces from a 1884a8ca6b13SXiaofeng Cao * different branch of the tree, i.e. where no ancestral relation is 188515d42eb2SChristian Kellner * present between the pid namespaces: 188615d42eb2SChristian Kellner * - create two new pid namespaces ns1 and ns2 in the initial pid 188715d42eb2SChristian Kellner * namespace (also take care to create new mount namespaces in the 188815d42eb2SChristian Kellner * new pid namespace and mount procfs) 188915d42eb2SChristian Kellner * - create a process with a pidfd in ns1 189015d42eb2SChristian Kellner * - send pidfd from ns1 to ns2 189115d42eb2SChristian Kellner * - read /proc/self/fdinfo/<pidfd> and observe that both Pid and NSpid 189215d42eb2SChristian Kellner * have exactly one entry, which is 0 189315d42eb2SChristian Kellner */ 1894b3e58382SChristian Brauner static void pidfd_show_fdinfo(struct seq_file *m, struct file *f) 1895b3e58382SChristian Brauner { 1896b3e58382SChristian Brauner struct pid *pid = f->private_data; 18973d6d8da4SChristian Brauner struct pid_namespace *ns; 18983d6d8da4SChristian Brauner pid_t nr = -1; 1899b3e58382SChristian Brauner 19003d6d8da4SChristian Brauner if (likely(pid_has_task(pid, PIDTYPE_PID))) { 19019d78edeaSAlexey Gladkov ns = proc_pid_ns(file_inode(m->file)->i_sb); 19023d6d8da4SChristian Brauner nr = pid_nr_ns(pid, ns); 19033d6d8da4SChristian Brauner } 19043d6d8da4SChristian Brauner 19053d6d8da4SChristian Brauner seq_put_decimal_ll(m, "Pid:\t", nr); 190615d42eb2SChristian Kellner 190715d42eb2SChristian Kellner #ifdef CONFIG_PID_NS 19083d6d8da4SChristian Brauner seq_put_decimal_ll(m, "\nNSpid:\t", nr); 19093d6d8da4SChristian Brauner if (nr > 0) { 191015d42eb2SChristian Kellner int i; 191115d42eb2SChristian Kellner 191215d42eb2SChristian Kellner /* If nr is non-zero it means that 'pid' is valid and that 191315d42eb2SChristian Kellner * ns, i.e. the pid namespace associated with the procfs 191415d42eb2SChristian Kellner * instance, is in the pid namespace hierarchy of pid. 191515d42eb2SChristian Kellner * Start at one below the already printed level. 191615d42eb2SChristian Kellner */ 191715d42eb2SChristian Kellner for (i = ns->level + 1; i <= pid->level; i++) 19183d6d8da4SChristian Brauner seq_put_decimal_ll(m, "\t", pid->numbers[i].nr); 191915d42eb2SChristian Kellner } 192015d42eb2SChristian Kellner #endif 1921b3e58382SChristian Brauner seq_putc(m, '\n'); 1922b3e58382SChristian Brauner } 1923b3e58382SChristian Brauner #endif 1924b3e58382SChristian Brauner 1925b53b0b9dSJoel Fernandes (Google) /* 1926b53b0b9dSJoel Fernandes (Google) * Poll support for process exit notification. 1927b53b0b9dSJoel Fernandes (Google) */ 19289e77716aSLuc Van Oostenryck static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts) 1929b53b0b9dSJoel Fernandes (Google) { 1930b53b0b9dSJoel Fernandes (Google) struct pid *pid = file->private_data; 19319e77716aSLuc Van Oostenryck __poll_t poll_flags = 0; 1932b53b0b9dSJoel Fernandes (Google) 1933b53b0b9dSJoel Fernandes (Google) poll_wait(file, &pid->wait_pidfd, pts); 1934b53b0b9dSJoel Fernandes (Google) 1935b53b0b9dSJoel Fernandes (Google) /* 1936b53b0b9dSJoel Fernandes (Google) * Inform pollers only when the whole thread group exits. 1937b53b0b9dSJoel Fernandes (Google) * If the thread group leader exits before all other threads in the 1938b53b0b9dSJoel Fernandes (Google) * group, then poll(2) should block, similar to the wait(2) family. 1939b53b0b9dSJoel Fernandes (Google) */ 194038fd525aSEric W. Biederman if (thread_group_exited(pid)) 19419e77716aSLuc Van Oostenryck poll_flags = EPOLLIN | EPOLLRDNORM; 1942b53b0b9dSJoel Fernandes (Google) 1943b53b0b9dSJoel Fernandes (Google) return poll_flags; 1944b53b0b9dSJoel Fernandes (Google) } 1945b53b0b9dSJoel Fernandes (Google) 1946b3e58382SChristian Brauner const struct file_operations pidfd_fops = { 1947b3e58382SChristian Brauner .release = pidfd_release, 1948b53b0b9dSJoel Fernandes (Google) .poll = pidfd_poll, 1949b3e58382SChristian Brauner #ifdef CONFIG_PROC_FS 1950b3e58382SChristian Brauner .show_fdinfo = pidfd_show_fdinfo, 1951b3e58382SChristian Brauner #endif 1952b3e58382SChristian Brauner }; 1953b3e58382SChristian Brauner 19546ae930d9SChristian Brauner /** 19556ae930d9SChristian Brauner * __pidfd_prepare - allocate a new pidfd_file and reserve a pidfd 19566ae930d9SChristian Brauner * @pid: the struct pid for which to create a pidfd 19576ae930d9SChristian Brauner * @flags: flags of the new @pidfd 19586ae930d9SChristian Brauner * @pidfd: the pidfd to return 19596ae930d9SChristian Brauner * 19606ae930d9SChristian Brauner * Allocate a new file that stashes @pid and reserve a new pidfd number in the 19616ae930d9SChristian Brauner * caller's file descriptor table. The pidfd is reserved but not installed yet. 19626ae930d9SChristian Brauner 19636ae930d9SChristian Brauner * The helper doesn't perform checks on @pid which makes it useful for pidfds 19646ae930d9SChristian Brauner * created via CLONE_PIDFD where @pid has no task attached when the pidfd and 19656ae930d9SChristian Brauner * pidfd file are prepared. 19666ae930d9SChristian Brauner * 19676ae930d9SChristian Brauner * If this function returns successfully the caller is responsible to either 19686ae930d9SChristian Brauner * call fd_install() passing the returned pidfd and pidfd file as arguments in 19696ae930d9SChristian Brauner * order to install the pidfd into its file descriptor table or they must use 19706ae930d9SChristian Brauner * put_unused_fd() and fput() on the returned pidfd and pidfd file 19716ae930d9SChristian Brauner * respectively. 19726ae930d9SChristian Brauner * 19736ae930d9SChristian Brauner * This function is useful when a pidfd must already be reserved but there 19746ae930d9SChristian Brauner * might still be points of failure afterwards and the caller wants to ensure 19756ae930d9SChristian Brauner * that no pidfd is leaked into its file descriptor table. 19766ae930d9SChristian Brauner * 19776ae930d9SChristian Brauner * Return: On success, a reserved pidfd is returned from the function and a new 19786ae930d9SChristian Brauner * pidfd file is returned in the last argument to the function. On 19796ae930d9SChristian Brauner * error, a negative error code is returned from the function and the 19806ae930d9SChristian Brauner * last argument remains unchanged. 19816ae930d9SChristian Brauner */ 19826ae930d9SChristian Brauner static int __pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret) 19836ae930d9SChristian Brauner { 19846ae930d9SChristian Brauner int pidfd; 19856ae930d9SChristian Brauner struct file *pidfd_file; 19866ae930d9SChristian Brauner 19876ae930d9SChristian Brauner if (flags & ~(O_NONBLOCK | O_RDWR | O_CLOEXEC)) 19886ae930d9SChristian Brauner return -EINVAL; 19896ae930d9SChristian Brauner 19906ae930d9SChristian Brauner pidfd = get_unused_fd_flags(O_RDWR | O_CLOEXEC); 19916ae930d9SChristian Brauner if (pidfd < 0) 19926ae930d9SChristian Brauner return pidfd; 19936ae930d9SChristian Brauner 19946ae930d9SChristian Brauner pidfd_file = anon_inode_getfile("[pidfd]", &pidfd_fops, pid, 19956ae930d9SChristian Brauner flags | O_RDWR | O_CLOEXEC); 19966ae930d9SChristian Brauner if (IS_ERR(pidfd_file)) { 19976ae930d9SChristian Brauner put_unused_fd(pidfd); 19986ae930d9SChristian Brauner return PTR_ERR(pidfd_file); 19996ae930d9SChristian Brauner } 20006ae930d9SChristian Brauner get_pid(pid); /* held by pidfd_file now */ 20016ae930d9SChristian Brauner *ret = pidfd_file; 20026ae930d9SChristian Brauner return pidfd; 20036ae930d9SChristian Brauner } 20046ae930d9SChristian Brauner 20056ae930d9SChristian Brauner /** 20066ae930d9SChristian Brauner * pidfd_prepare - allocate a new pidfd_file and reserve a pidfd 20076ae930d9SChristian Brauner * @pid: the struct pid for which to create a pidfd 20086ae930d9SChristian Brauner * @flags: flags of the new @pidfd 20096ae930d9SChristian Brauner * @pidfd: the pidfd to return 20106ae930d9SChristian Brauner * 20116ae930d9SChristian Brauner * Allocate a new file that stashes @pid and reserve a new pidfd number in the 20126ae930d9SChristian Brauner * caller's file descriptor table. The pidfd is reserved but not installed yet. 20136ae930d9SChristian Brauner * 20146ae930d9SChristian Brauner * The helper verifies that @pid is used as a thread group leader. 20156ae930d9SChristian Brauner * 20166ae930d9SChristian Brauner * If this function returns successfully the caller is responsible to either 20176ae930d9SChristian Brauner * call fd_install() passing the returned pidfd and pidfd file as arguments in 20186ae930d9SChristian Brauner * order to install the pidfd into its file descriptor table or they must use 20196ae930d9SChristian Brauner * put_unused_fd() and fput() on the returned pidfd and pidfd file 20206ae930d9SChristian Brauner * respectively. 20216ae930d9SChristian Brauner * 20226ae930d9SChristian Brauner * This function is useful when a pidfd must already be reserved but there 20236ae930d9SChristian Brauner * might still be points of failure afterwards and the caller wants to ensure 20246ae930d9SChristian Brauner * that no pidfd is leaked into its file descriptor table. 20256ae930d9SChristian Brauner * 20266ae930d9SChristian Brauner * Return: On success, a reserved pidfd is returned from the function and a new 20276ae930d9SChristian Brauner * pidfd file is returned in the last argument to the function. On 20286ae930d9SChristian Brauner * error, a negative error code is returned from the function and the 20296ae930d9SChristian Brauner * last argument remains unchanged. 20306ae930d9SChristian Brauner */ 20316ae930d9SChristian Brauner int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret) 20326ae930d9SChristian Brauner { 20336ae930d9SChristian Brauner if (!pid || !pid_has_task(pid, PIDTYPE_TGID)) 20346ae930d9SChristian Brauner return -EINVAL; 20356ae930d9SChristian Brauner 20366ae930d9SChristian Brauner return __pidfd_prepare(pid, flags, ret); 20376ae930d9SChristian Brauner } 20386ae930d9SChristian Brauner 2039c3f3ce04SAndrea Arcangeli static void __delayed_free_task(struct rcu_head *rhp) 2040c3f3ce04SAndrea Arcangeli { 2041c3f3ce04SAndrea Arcangeli struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 2042c3f3ce04SAndrea Arcangeli 2043c3f3ce04SAndrea Arcangeli free_task(tsk); 2044c3f3ce04SAndrea Arcangeli } 2045c3f3ce04SAndrea Arcangeli 2046c3f3ce04SAndrea Arcangeli static __always_inline void delayed_free_task(struct task_struct *tsk) 2047c3f3ce04SAndrea Arcangeli { 2048c3f3ce04SAndrea Arcangeli if (IS_ENABLED(CONFIG_MEMCG)) 2049c3f3ce04SAndrea Arcangeli call_rcu(&tsk->rcu, __delayed_free_task); 2050c3f3ce04SAndrea Arcangeli else 2051c3f3ce04SAndrea Arcangeli free_task(tsk); 2052c3f3ce04SAndrea Arcangeli } 2053c3f3ce04SAndrea Arcangeli 205467197a4fSSuren Baghdasaryan static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk) 205567197a4fSSuren Baghdasaryan { 205667197a4fSSuren Baghdasaryan /* Skip if kernel thread */ 205767197a4fSSuren Baghdasaryan if (!tsk->mm) 205867197a4fSSuren Baghdasaryan return; 205967197a4fSSuren Baghdasaryan 206067197a4fSSuren Baghdasaryan /* Skip if spawning a thread or using vfork */ 206167197a4fSSuren Baghdasaryan if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM) 206267197a4fSSuren Baghdasaryan return; 206367197a4fSSuren Baghdasaryan 206467197a4fSSuren Baghdasaryan /* We need to synchronize with __set_oom_adj */ 206567197a4fSSuren Baghdasaryan mutex_lock(&oom_adj_mutex); 206667197a4fSSuren Baghdasaryan set_bit(MMF_MULTIPROCESS, &tsk->mm->flags); 206767197a4fSSuren Baghdasaryan /* Update the values in case they were changed after copy_signal */ 206867197a4fSSuren Baghdasaryan tsk->signal->oom_score_adj = current->signal->oom_score_adj; 206967197a4fSSuren Baghdasaryan tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min; 207067197a4fSSuren Baghdasaryan mutex_unlock(&oom_adj_mutex); 207167197a4fSSuren Baghdasaryan } 207267197a4fSSuren Baghdasaryan 207379257534SDaniel Bristot de Oliveira #ifdef CONFIG_RV 207479257534SDaniel Bristot de Oliveira static void rv_task_fork(struct task_struct *p) 207579257534SDaniel Bristot de Oliveira { 207679257534SDaniel Bristot de Oliveira int i; 207779257534SDaniel Bristot de Oliveira 207879257534SDaniel Bristot de Oliveira for (i = 0; i < RV_PER_TASK_MONITORS; i++) 207979257534SDaniel Bristot de Oliveira p->rv[i].da_mon.monitoring = false; 208079257534SDaniel Bristot de Oliveira } 208179257534SDaniel Bristot de Oliveira #else 208279257534SDaniel Bristot de Oliveira #define rv_task_fork(p) do {} while (0) 208379257534SDaniel Bristot de Oliveira #endif 208479257534SDaniel Bristot de Oliveira 2085f06febc9SFrank Mayhar /* 20861da177e4SLinus Torvalds * This creates a new process as a copy of the old one, 20871da177e4SLinus Torvalds * but does not actually start it yet. 20881da177e4SLinus Torvalds * 20891da177e4SLinus Torvalds * It copies the registers, and all the appropriate 20901da177e4SLinus Torvalds * parts of the process environment (as per the clone 20911da177e4SLinus Torvalds * flags). The actual kick-off is left to the caller. 20921da177e4SLinus Torvalds */ 20930766f788SEmese Revfy static __latent_entropy struct task_struct *copy_process( 209409a05394SRoland McGrath struct pid *pid, 20953033f14aSJosh Triplett int trace, 20967f192e3cSChristian Brauner int node, 20977f192e3cSChristian Brauner struct kernel_clone_args *args) 20981da177e4SLinus Torvalds { 2099b3e58382SChristian Brauner int pidfd = -1, retval; 2100a24efe62SMariusz Kozlowski struct task_struct *p; 2101c3ad2c3bSEric W. Biederman struct multiprocess_signals delayed; 21026fd2fe49SAl Viro struct file *pidfile = NULL; 2103c5febea0SEric W. Biederman const u64 clone_flags = args->flags; 2104769071acSAndrei Vagin struct nsproxy *nsp = current->nsproxy; 21051da177e4SLinus Torvalds 2106667b6094SMarcos Paulo de Souza /* 2107667b6094SMarcos Paulo de Souza * Don't allow sharing the root directory with processes in a different 2108667b6094SMarcos Paulo de Souza * namespace 2109667b6094SMarcos Paulo de Souza */ 21101da177e4SLinus Torvalds if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) 21111da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 21121da177e4SLinus Torvalds 2113e66eded8SEric W. Biederman if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) 2114e66eded8SEric W. Biederman return ERR_PTR(-EINVAL); 2115e66eded8SEric W. Biederman 21161da177e4SLinus Torvalds /* 21171da177e4SLinus Torvalds * Thread groups must share signals as well, and detached threads 21181da177e4SLinus Torvalds * can only be started up within the thread group. 21191da177e4SLinus Torvalds */ 21201da177e4SLinus Torvalds if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) 21211da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 21221da177e4SLinus Torvalds 21231da177e4SLinus Torvalds /* 21241da177e4SLinus Torvalds * Shared signal handlers imply shared VM. By way of the above, 21251da177e4SLinus Torvalds * thread groups also imply shared VM. Blocking this case allows 21261da177e4SLinus Torvalds * for various simplifications in other code. 21271da177e4SLinus Torvalds */ 21281da177e4SLinus Torvalds if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) 21291da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 21301da177e4SLinus Torvalds 2131123be07bSSukadev Bhattiprolu /* 2132123be07bSSukadev Bhattiprolu * Siblings of global init remain as zombies on exit since they are 2133123be07bSSukadev Bhattiprolu * not reaped by their parent (swapper). To solve this and to avoid 2134123be07bSSukadev Bhattiprolu * multi-rooted process trees, prevent global and container-inits 2135123be07bSSukadev Bhattiprolu * from creating siblings. 2136123be07bSSukadev Bhattiprolu */ 2137123be07bSSukadev Bhattiprolu if ((clone_flags & CLONE_PARENT) && 2138123be07bSSukadev Bhattiprolu current->signal->flags & SIGNAL_UNKILLABLE) 2139123be07bSSukadev Bhattiprolu return ERR_PTR(-EINVAL); 2140123be07bSSukadev Bhattiprolu 21418382fcacSEric W. Biederman /* 214240a0d32dSOleg Nesterov * If the new process will be in a different pid or user namespace 2143faf00da5SEric W. Biederman * do not allow it to share a thread group with the forking task. 21448382fcacSEric W. Biederman */ 2145faf00da5SEric W. Biederman if (clone_flags & CLONE_THREAD) { 214640a0d32dSOleg Nesterov if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || 2147769071acSAndrei Vagin (task_active_pid_ns(current) != nsp->pid_ns_for_children)) 2148769071acSAndrei Vagin return ERR_PTR(-EINVAL); 2149769071acSAndrei Vagin } 2150769071acSAndrei Vagin 2151b3e58382SChristian Brauner if (clone_flags & CLONE_PIDFD) { 2152b3e58382SChristian Brauner /* 2153b3e58382SChristian Brauner * - CLONE_DETACHED is blocked so that we can potentially 2154b3e58382SChristian Brauner * reuse it later for CLONE_PIDFD. 2155b3e58382SChristian Brauner * - CLONE_THREAD is blocked until someone really needs it. 2156b3e58382SChristian Brauner */ 21577f192e3cSChristian Brauner if (clone_flags & (CLONE_DETACHED | CLONE_THREAD)) 2158b3e58382SChristian Brauner return ERR_PTR(-EINVAL); 2159b3e58382SChristian Brauner } 2160b3e58382SChristian Brauner 2161c3ad2c3bSEric W. Biederman /* 2162c3ad2c3bSEric W. Biederman * Force any signals received before this point to be delivered 2163c3ad2c3bSEric W. Biederman * before the fork happens. Collect up signals sent to multiple 2164c3ad2c3bSEric W. Biederman * processes that happen during the fork and delay them so that 2165c3ad2c3bSEric W. Biederman * they appear to happen after the fork. 2166c3ad2c3bSEric W. Biederman */ 2167c3ad2c3bSEric W. Biederman sigemptyset(&delayed.signal); 2168c3ad2c3bSEric W. Biederman INIT_HLIST_NODE(&delayed.node); 2169c3ad2c3bSEric W. Biederman 2170c3ad2c3bSEric W. Biederman spin_lock_irq(¤t->sighand->siglock); 2171c3ad2c3bSEric W. Biederman if (!(clone_flags & CLONE_THREAD)) 2172c3ad2c3bSEric W. Biederman hlist_add_head(&delayed.node, ¤t->signal->multiprocess); 2173c3ad2c3bSEric W. Biederman recalc_sigpending(); 2174c3ad2c3bSEric W. Biederman spin_unlock_irq(¤t->sighand->siglock); 2175c3ad2c3bSEric W. Biederman retval = -ERESTARTNOINTR; 217666ae0d1eSJens Axboe if (task_sigpending(current)) 2177c3ad2c3bSEric W. Biederman goto fork_out; 2178c3ad2c3bSEric W. Biederman 21791da177e4SLinus Torvalds retval = -ENOMEM; 2180725fc629SAndi Kleen p = dup_task_struct(current, node); 21811da177e4SLinus Torvalds if (!p) 21821da177e4SLinus Torvalds goto fork_out; 2183753550ebSEric W. Biederman p->flags &= ~PF_KTHREAD; 2184753550ebSEric W. Biederman if (args->kthread) 2185753550ebSEric W. Biederman p->flags |= PF_KTHREAD; 2186b16b3855SJens Axboe if (args->io_thread) { 2187b16b3855SJens Axboe /* 2188b16b3855SJens Axboe * Mark us an IO worker, and block any signal that isn't 2189b16b3855SJens Axboe * fatal or STOP 2190b16b3855SJens Axboe */ 2191cc440e87SJens Axboe p->flags |= PF_IO_WORKER; 2192b16b3855SJens Axboe siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2193b16b3855SJens Axboe } 21941da177e4SLinus Torvalds 21957f192e3cSChristian Brauner p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL; 21964d6501dcSVegard Nossum /* 21974d6501dcSVegard Nossum * Clear TID on mm_release()? 21984d6501dcSVegard Nossum */ 21997f192e3cSChristian Brauner p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? args->child_tid : NULL; 22004d6501dcSVegard Nossum 2201f7e8b616SSteven Rostedt ftrace_graph_init_task(p); 2202f7e8b616SSteven Rostedt 2203bea493a0SPeter Zijlstra rt_mutex_init_task(p); 2204bea493a0SPeter Zijlstra 2205a21ee605SPeter Zijlstra lockdep_assert_irqs_enabled(); 2206d12c1a37SIngo Molnar #ifdef CONFIG_PROVE_LOCKING 2207de30a2b3SIngo Molnar DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 2208de30a2b3SIngo Molnar #endif 22098f2f9c4dSEric W. Biederman retval = copy_creds(p, clone_flags); 22108f2f9c4dSEric W. Biederman if (retval < 0) 22118f2f9c4dSEric W. Biederman goto bad_fork_free; 22128f2f9c4dSEric W. Biederman 22131da177e4SLinus Torvalds retval = -EAGAIN; 2214de399236SAlexey Gladkov if (is_rlimit_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) { 2215b57922b6SEric Paris if (p->real_cred->user != INIT_USER && 2216b57922b6SEric Paris !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) 22178f2f9c4dSEric W. Biederman goto bad_fork_cleanup_count; 22181da177e4SLinus Torvalds } 221972fa5997SVasiliy Kulikov current->flags &= ~PF_NPROC_EXCEEDED; 22201da177e4SLinus Torvalds 22211da177e4SLinus Torvalds /* 22221da177e4SLinus Torvalds * If multiple threads are within copy_process(), then this check 22231da177e4SLinus Torvalds * triggers too late. This doesn't hurt, the check is only there 22241da177e4SLinus Torvalds * to stop root fork bombs. 22251da177e4SLinus Torvalds */ 222604ec93feSLi Zefan retval = -EAGAIN; 2227c17d1a3aSWeilong Chen if (data_race(nr_threads >= max_threads)) 22281da177e4SLinus Torvalds goto bad_fork_cleanup_count; 22291da177e4SLinus Torvalds 2230ca74e92bSShailabh Nagar delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ 2231a8ea6fc9SFrederic Weisbecker p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE | PF_NO_SETAFFINITY); 2232514ddb44SDavid Rientjes p->flags |= PF_FORKNOEXEC; 22331da177e4SLinus Torvalds INIT_LIST_HEAD(&p->children); 22341da177e4SLinus Torvalds INIT_LIST_HEAD(&p->sibling); 2235f41d911fSPaul E. McKenney rcu_copy_process(p); 22361da177e4SLinus Torvalds p->vfork_done = NULL; 22371da177e4SLinus Torvalds spin_lock_init(&p->alloc_lock); 22381da177e4SLinus Torvalds 22391da177e4SLinus Torvalds init_sigpending(&p->pending); 22401da177e4SLinus Torvalds 224164861634SMartin Schwidefsky p->utime = p->stime = p->gtime = 0; 224240565b5aSStanislaw Gruszka #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 224364861634SMartin Schwidefsky p->utimescaled = p->stimescaled = 0; 224440565b5aSStanislaw Gruszka #endif 22459d7fb042SPeter Zijlstra prev_cputime_init(&p->prev_cputime); 22469d7fb042SPeter Zijlstra 22476a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 2248bac5b6b6SFrederic Weisbecker seqcount_init(&p->vtime.seqcount); 2249bac5b6b6SFrederic Weisbecker p->vtime.starttime = 0; 2250bac5b6b6SFrederic Weisbecker p->vtime.state = VTIME_INACTIVE; 22516a61671bSFrederic Weisbecker #endif 22526a61671bSFrederic Weisbecker 22530f212204SJens Axboe #ifdef CONFIG_IO_URING 22540f212204SJens Axboe p->io_uring = NULL; 22550f212204SJens Axboe #endif 22560f212204SJens Axboe 2257a3a2e76cSKAMEZAWA Hiroyuki #if defined(SPLIT_RSS_COUNTING) 2258a3a2e76cSKAMEZAWA Hiroyuki memset(&p->rss_stat, 0, sizeof(p->rss_stat)); 2259a3a2e76cSKAMEZAWA Hiroyuki #endif 2260172ba844SBalbir Singh 22616976675dSArjan van de Ven p->default_timer_slack_ns = current->timer_slack_ns; 22626976675dSArjan van de Ven 2263eb414681SJohannes Weiner #ifdef CONFIG_PSI 2264eb414681SJohannes Weiner p->psi_flags = 0; 2265eb414681SJohannes Weiner #endif 2266eb414681SJohannes Weiner 22675995477aSAndrea Righi task_io_accounting_init(&p->ioac); 22681da177e4SLinus Torvalds acct_clear_integrals(p); 22691da177e4SLinus Torvalds 22703a245c0fSThomas Gleixner posix_cputimers_init(&p->posix_cputimers); 22711da177e4SLinus Torvalds 22721da177e4SLinus Torvalds p->io_context = NULL; 2273c0b0ae8aSRichard Guy Briggs audit_set_context(p, NULL); 2274b4f48b63SPaul Menage cgroup_fork(p); 2275343f4c49SEric W. Biederman if (args->kthread) { 227640966e31SEric W. Biederman if (!set_kthread_struct(p)) 2277ff8288ffSEric W. Biederman goto bad_fork_cleanup_delayacct; 227840966e31SEric W. Biederman } 22791da177e4SLinus Torvalds #ifdef CONFIG_NUMA 2280846a16bfSLee Schermerhorn p->mempolicy = mpol_dup(p->mempolicy); 22811da177e4SLinus Torvalds if (IS_ERR(p->mempolicy)) { 22821da177e4SLinus Torvalds retval = PTR_ERR(p->mempolicy); 22831da177e4SLinus Torvalds p->mempolicy = NULL; 2284ff8288ffSEric W. Biederman goto bad_fork_cleanup_delayacct; 22851da177e4SLinus Torvalds } 22861da177e4SLinus Torvalds #endif 2287778d3b0fSMichal Hocko #ifdef CONFIG_CPUSETS 2288778d3b0fSMichal Hocko p->cpuset_mem_spread_rotor = NUMA_NO_NODE; 2289778d3b0fSMichal Hocko p->cpuset_slab_spread_rotor = NUMA_NO_NODE; 2290b7505861SAhmed S. Darwish seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock); 2291778d3b0fSMichal Hocko #endif 2292de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS 22930584df9cSMarco Elver memset(&p->irqtrace, 0, sizeof(p->irqtrace)); 22940584df9cSMarco Elver p->irqtrace.hardirq_disable_ip = _THIS_IP_; 22950584df9cSMarco Elver p->irqtrace.softirq_enable_ip = _THIS_IP_; 2296de30a2b3SIngo Molnar p->softirqs_enabled = 1; 2297de30a2b3SIngo Molnar p->softirq_context = 0; 2298de30a2b3SIngo Molnar #endif 22998bcbde54SDavid Hildenbrand 23008bcbde54SDavid Hildenbrand p->pagefault_disabled = 0; 23018bcbde54SDavid Hildenbrand 2302fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP 2303b09be676SByungchul Park lockdep_init_task(p); 2304fbb9ce95SIngo Molnar #endif 23051da177e4SLinus Torvalds 2306408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES 2307408894eeSIngo Molnar p->blocked_on = NULL; /* not blocked yet */ 2308408894eeSIngo Molnar #endif 2309cafe5635SKent Overstreet #ifdef CONFIG_BCACHE 2310cafe5635SKent Overstreet p->sequential_io = 0; 2311cafe5635SKent Overstreet p->sequential_io_avg = 0; 2312cafe5635SKent Overstreet #endif 2313a10787e6SSong Liu #ifdef CONFIG_BPF_SYSCALL 2314a10787e6SSong Liu RCU_INIT_POINTER(p->bpf_storage, NULL); 2315c7603cfaSAndrii Nakryiko p->bpf_ctx = NULL; 2316a10787e6SSong Liu #endif 23170f481406SMarkus Metzger 23183c90e6e9SSrivatsa Vaddagiri /* Perform scheduler related setup. Assign this task to a CPU. */ 2319aab03e05SDario Faggioli retval = sched_fork(clone_flags, p); 2320aab03e05SDario Faggioli if (retval) 2321aab03e05SDario Faggioli goto bad_fork_cleanup_policy; 23226ab423e0SPeter Zijlstra 23232b26f0aaSMarco Elver retval = perf_event_init_task(p, clone_flags); 23246ab423e0SPeter Zijlstra if (retval) 23256ab423e0SPeter Zijlstra goto bad_fork_cleanup_policy; 2326fb0a685cSDaniel Rebelo de Oliveira retval = audit_alloc(p); 2327fb0a685cSDaniel Rebelo de Oliveira if (retval) 23286c72e350SPeter Zijlstra goto bad_fork_cleanup_perf; 23291da177e4SLinus Torvalds /* copy all the process information */ 2330ab602f79SJack Miller shm_init_task(p); 2331e4e55b47STetsuo Handa retval = security_task_alloc(p, clone_flags); 2332fb0a685cSDaniel Rebelo de Oliveira if (retval) 23331da177e4SLinus Torvalds goto bad_fork_cleanup_audit; 2334e4e55b47STetsuo Handa retval = copy_semundo(clone_flags, p); 2335e4e55b47STetsuo Handa if (retval) 2336e4e55b47STetsuo Handa goto bad_fork_cleanup_security; 2337fb0a685cSDaniel Rebelo de Oliveira retval = copy_files(clone_flags, p); 2338fb0a685cSDaniel Rebelo de Oliveira if (retval) 23391da177e4SLinus Torvalds goto bad_fork_cleanup_semundo; 2340fb0a685cSDaniel Rebelo de Oliveira retval = copy_fs(clone_flags, p); 2341fb0a685cSDaniel Rebelo de Oliveira if (retval) 23421da177e4SLinus Torvalds goto bad_fork_cleanup_files; 2343fb0a685cSDaniel Rebelo de Oliveira retval = copy_sighand(clone_flags, p); 2344fb0a685cSDaniel Rebelo de Oliveira if (retval) 23451da177e4SLinus Torvalds goto bad_fork_cleanup_fs; 2346fb0a685cSDaniel Rebelo de Oliveira retval = copy_signal(clone_flags, p); 2347fb0a685cSDaniel Rebelo de Oliveira if (retval) 23481da177e4SLinus Torvalds goto bad_fork_cleanup_sighand; 2349fb0a685cSDaniel Rebelo de Oliveira retval = copy_mm(clone_flags, p); 2350fb0a685cSDaniel Rebelo de Oliveira if (retval) 23511da177e4SLinus Torvalds goto bad_fork_cleanup_signal; 2352fb0a685cSDaniel Rebelo de Oliveira retval = copy_namespaces(clone_flags, p); 2353fb0a685cSDaniel Rebelo de Oliveira if (retval) 2354d84f4f99SDavid Howells goto bad_fork_cleanup_mm; 2355fb0a685cSDaniel Rebelo de Oliveira retval = copy_io(clone_flags, p); 2356fb0a685cSDaniel Rebelo de Oliveira if (retval) 2357fd0928dfSJens Axboe goto bad_fork_cleanup_namespaces; 2358c5febea0SEric W. Biederman retval = copy_thread(p, args); 23591da177e4SLinus Torvalds if (retval) 2360fd0928dfSJens Axboe goto bad_fork_cleanup_io; 23611da177e4SLinus Torvalds 2362afaef01cSAlexander Popov stackleak_task_init(p); 2363afaef01cSAlexander Popov 2364425fb2b4SPavel Emelyanov if (pid != &init_struct_pid) { 236549cb2fc4SAdrian Reber pid = alloc_pid(p->nsproxy->pid_ns_for_children, args->set_tid, 236649cb2fc4SAdrian Reber args->set_tid_size); 236735f71bc0SMichal Hocko if (IS_ERR(pid)) { 236835f71bc0SMichal Hocko retval = PTR_ERR(pid); 23690740aa5fSJiri Slaby goto bad_fork_cleanup_thread; 2370425fb2b4SPavel Emelyanov } 237135f71bc0SMichal Hocko } 2372425fb2b4SPavel Emelyanov 2373b3e58382SChristian Brauner /* 2374b3e58382SChristian Brauner * This has to happen after we've potentially unshared the file 2375b3e58382SChristian Brauner * descriptor table (so that the pidfd doesn't leak into the child 2376b3e58382SChristian Brauner * if the fd table isn't shared). 2377b3e58382SChristian Brauner */ 2378b3e58382SChristian Brauner if (clone_flags & CLONE_PIDFD) { 2379*ca7707f5SChristian Brauner /* Note that no task has been attached to @pid yet. */ 2380*ca7707f5SChristian Brauner retval = __pidfd_prepare(pid, O_RDWR | O_CLOEXEC, &pidfile); 2381b3e58382SChristian Brauner if (retval < 0) 2382b3e58382SChristian Brauner goto bad_fork_free_pid; 2383b3e58382SChristian Brauner pidfd = retval; 23846fd2fe49SAl Viro 23857f192e3cSChristian Brauner retval = put_user(pidfd, args->pidfd); 2386b3e58382SChristian Brauner if (retval) 2387b3e58382SChristian Brauner goto bad_fork_put_pidfd; 2388b3e58382SChristian Brauner } 2389b3e58382SChristian Brauner 239073c10101SJens Axboe #ifdef CONFIG_BLOCK 239173c10101SJens Axboe p->plug = NULL; 239273c10101SJens Axboe #endif 2393ba31c1a4SThomas Gleixner futex_init_task(p); 2394ba31c1a4SThomas Gleixner 23951da177e4SLinus Torvalds /* 2396f9a3879aSGOTO Masanori * sigaltstack should be cleared when sharing the same VM 2397f9a3879aSGOTO Masanori */ 2398f9a3879aSGOTO Masanori if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) 23992a742138SStas Sergeev sas_ss_reset(p); 2400f9a3879aSGOTO Masanori 2401f9a3879aSGOTO Masanori /* 24026580807dSOleg Nesterov * Syscall tracing and stepping should be turned off in the 24036580807dSOleg Nesterov * child regardless of CLONE_PTRACE. 24041da177e4SLinus Torvalds */ 24056580807dSOleg Nesterov user_disable_single_step(p); 240664c19ba2SGabriel Krisman Bertazi clear_task_syscall_work(p, SYSCALL_TRACE); 240764eb35f7SGabriel Krisman Bertazi #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU) 240864eb35f7SGabriel Krisman Bertazi clear_task_syscall_work(p, SYSCALL_EMU); 2409ed75e8d5SLaurent Vivier #endif 2410e02c9b0dSLin Feng clear_tsk_latency_tracing(p); 24111da177e4SLinus Torvalds 24121da177e4SLinus Torvalds /* ok, now we should be set up.. */ 241318c830dfSOleg Nesterov p->pid = pid_nr(pid); 241418c830dfSOleg Nesterov if (clone_flags & CLONE_THREAD) { 241518c830dfSOleg Nesterov p->group_leader = current->group_leader; 241618c830dfSOleg Nesterov p->tgid = current->tgid; 241718c830dfSOleg Nesterov } else { 241818c830dfSOleg Nesterov p->group_leader = p; 241918c830dfSOleg Nesterov p->tgid = p->pid; 242018c830dfSOleg Nesterov } 24215f8aadd8SOleg Nesterov 24229d823e8fSWu Fengguang p->nr_dirtied = 0; 24239d823e8fSWu Fengguang p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); 242483712358SWu Fengguang p->dirty_paused_when = 0; 24259d823e8fSWu Fengguang 2426bb8cbbfeSOleg Nesterov p->pdeath_signal = 0; 242747e65328SOleg Nesterov INIT_LIST_HEAD(&p->thread_group); 2428158e1645SAl Viro p->task_works = NULL; 2429ca7752caSMichael Pratt clear_posix_cputimers_work(p); 24301da177e4SLinus Torvalds 2431d741bf41SPeter Zijlstra #ifdef CONFIG_KRETPROBES 2432d741bf41SPeter Zijlstra p->kretprobe_instances.first = NULL; 2433d741bf41SPeter Zijlstra #endif 243454ecbe6fSMasami Hiramatsu #ifdef CONFIG_RETHOOK 243554ecbe6fSMasami Hiramatsu p->rethooks.first = NULL; 243654ecbe6fSMasami Hiramatsu #endif 2437d741bf41SPeter Zijlstra 243818c830dfSOleg Nesterov /* 24397e47682eSAleksa Sarai * Ensure that the cgroup subsystem policies allow the new process to be 24407b7b8a2cSRandy Dunlap * forked. It should be noted that the new process's css_set can be changed 24417e47682eSAleksa Sarai * between here and cgroup_post_fork() if an organisation operation is in 24427e47682eSAleksa Sarai * progress. 24437e47682eSAleksa Sarai */ 2444ef2c41cfSChristian Brauner retval = cgroup_can_fork(p, args); 24457e47682eSAleksa Sarai if (retval) 24465a5cf5cbSChristian Brauner goto bad_fork_put_pidfd; 24477e47682eSAleksa Sarai 24487e47682eSAleksa Sarai /* 2449b1e82065SPeter Zijlstra * Now that the cgroups are pinned, re-clone the parent cgroup and put 2450b1e82065SPeter Zijlstra * the new task on the correct runqueue. All this *before* the task 2451b1e82065SPeter Zijlstra * becomes visible. 2452b1e82065SPeter Zijlstra * 2453b1e82065SPeter Zijlstra * This isn't part of ->can_fork() because while the re-cloning is 2454b1e82065SPeter Zijlstra * cgroup specific, it unconditionally needs to place the task on a 2455b1e82065SPeter Zijlstra * runqueue. 2456b1e82065SPeter Zijlstra */ 2457b1e82065SPeter Zijlstra sched_cgroup_fork(p, args); 2458b1e82065SPeter Zijlstra 2459b1e82065SPeter Zijlstra /* 24607b558513SDavid Herrmann * From this point on we must avoid any synchronous user-space 24617b558513SDavid Herrmann * communication until we take the tasklist-lock. In particular, we do 24627b558513SDavid Herrmann * not want user-space to be able to predict the process start-time by 24637b558513SDavid Herrmann * stalling fork(2) after we recorded the start_time but before it is 24647b558513SDavid Herrmann * visible to the system. 24657b558513SDavid Herrmann */ 24667b558513SDavid Herrmann 24677b558513SDavid Herrmann p->start_time = ktime_get_ns(); 2468cf25e24dSPeter Zijlstra p->start_boottime = ktime_get_boottime_ns(); 24697b558513SDavid Herrmann 24707b558513SDavid Herrmann /* 247118c830dfSOleg Nesterov * Make it visible to the rest of the system, but dont wake it up yet. 247218c830dfSOleg Nesterov * Need tasklist lock for parent etc handling! 247318c830dfSOleg Nesterov */ 24741da177e4SLinus Torvalds write_lock_irq(&tasklist_lock); 24751da177e4SLinus Torvalds 24761da177e4SLinus Torvalds /* CLONE_PARENT re-uses the old parent */ 24772d5516cbSOleg Nesterov if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { 24781da177e4SLinus Torvalds p->real_parent = current->real_parent; 24792d5516cbSOleg Nesterov p->parent_exec_id = current->parent_exec_id; 2480b4e00444SEddy Wu if (clone_flags & CLONE_THREAD) 2481b4e00444SEddy Wu p->exit_signal = -1; 2482b4e00444SEddy Wu else 2483b4e00444SEddy Wu p->exit_signal = current->group_leader->exit_signal; 24842d5516cbSOleg Nesterov } else { 24851da177e4SLinus Torvalds p->real_parent = current; 24862d5516cbSOleg Nesterov p->parent_exec_id = current->self_exec_id; 2487b4e00444SEddy Wu p->exit_signal = args->exit_signal; 24882d5516cbSOleg Nesterov } 24891da177e4SLinus Torvalds 2490d83a7cb3SJosh Poimboeuf klp_copy_process(p); 2491d83a7cb3SJosh Poimboeuf 249285dd3f61SPeter Zijlstra sched_core_fork(p); 249385dd3f61SPeter Zijlstra 24941da177e4SLinus Torvalds spin_lock(¤t->sighand->siglock); 24954a2c7a78SOleg Nesterov 249679257534SDaniel Bristot de Oliveira rv_task_fork(p); 249779257534SDaniel Bristot de Oliveira 2498d7822b1eSMathieu Desnoyers rseq_fork(p, clone_flags); 2499d7822b1eSMathieu Desnoyers 25004ca1d3eeSEric W. Biederman /* Don't start children in a dying pid namespace */ 2501e8cfbc24SGargi Sharma if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) { 25023fd37226SKirill Tkhai retval = -ENOMEM; 25033fd37226SKirill Tkhai goto bad_fork_cancel_cgroup; 25043fd37226SKirill Tkhai } 25054a2c7a78SOleg Nesterov 25067673bf55SEric W. Biederman /* Let kill terminate clone/fork in the middle */ 25077673bf55SEric W. Biederman if (fatal_signal_pending(current)) { 25087673bf55SEric W. Biederman retval = -EINTR; 25097673bf55SEric W. Biederman goto bad_fork_cancel_cgroup; 25107673bf55SEric W. Biederman } 25117673bf55SEric W. Biederman 2512a1140cb2SKuniyuki Iwashima /* No more failure paths after this point. */ 2513a1140cb2SKuniyuki Iwashima 2514a1140cb2SKuniyuki Iwashima /* 2515a1140cb2SKuniyuki Iwashima * Copy seccomp details explicitly here, in case they were changed 2516a1140cb2SKuniyuki Iwashima * before holding sighand lock. 2517a1140cb2SKuniyuki Iwashima */ 2518a1140cb2SKuniyuki Iwashima copy_seccomp(p); 2519a1140cb2SKuniyuki Iwashima 25202c470475SEric W. Biederman init_task_pid_links(p); 252173b9ebfeSOleg Nesterov if (likely(p->pid)) { 25224b9d33e6STejun Heo ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); 25231da177e4SLinus Torvalds 252481907739SOleg Nesterov init_task_pid(p, PIDTYPE_PID, pid); 25251da177e4SLinus Torvalds if (thread_group_leader(p)) { 25266883f81aSEric W. Biederman init_task_pid(p, PIDTYPE_TGID, pid); 252781907739SOleg Nesterov init_task_pid(p, PIDTYPE_PGID, task_pgrp(current)); 252881907739SOleg Nesterov init_task_pid(p, PIDTYPE_SID, task_session(current)); 252981907739SOleg Nesterov 25301c4042c2SEric W. Biederman if (is_child_reaper(pid)) { 253117cf22c3SEric W. Biederman ns_of_pid(pid)->child_reaper = p; 25321c4042c2SEric W. Biederman p->signal->flags |= SIGNAL_UNKILLABLE; 25331c4042c2SEric W. Biederman } 2534c3ad2c3bSEric W. Biederman p->signal->shared_pending.signal = delayed.signal; 25359c9f4dedSAlan Cox p->signal->tty = tty_kref_get(current->signal->tty); 2536749860ceSPavel Tikhomirov /* 2537749860ceSPavel Tikhomirov * Inherit has_child_subreaper flag under the same 2538749860ceSPavel Tikhomirov * tasklist_lock with adding child to the process tree 2539749860ceSPavel Tikhomirov * for propagate_has_child_subreaper optimization. 2540749860ceSPavel Tikhomirov */ 2541749860ceSPavel Tikhomirov p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper || 2542749860ceSPavel Tikhomirov p->real_parent->signal->is_child_subreaper; 25439cd80bbbSOleg Nesterov list_add_tail(&p->sibling, &p->real_parent->children); 25445e85d4abSEric W. Biederman list_add_tail_rcu(&p->tasks, &init_task.tasks); 25456883f81aSEric W. Biederman attach_pid(p, PIDTYPE_TGID); 254681907739SOleg Nesterov attach_pid(p, PIDTYPE_PGID); 254781907739SOleg Nesterov attach_pid(p, PIDTYPE_SID); 2548909ea964SChristoph Lameter __this_cpu_inc(process_counts); 254980628ca0SOleg Nesterov } else { 255080628ca0SOleg Nesterov current->signal->nr_threads++; 2551d80f7d7bSEric W. Biederman current->signal->quick_threads++; 255280628ca0SOleg Nesterov atomic_inc(¤t->signal->live); 255360d4de3fSElena Reshetova refcount_inc(¤t->signal->sigcnt); 2554924de3b8SEric W. Biederman task_join_group_stop(p); 255580628ca0SOleg Nesterov list_add_tail_rcu(&p->thread_group, 255680628ca0SOleg Nesterov &p->group_leader->thread_group); 25570c740d0aSOleg Nesterov list_add_tail_rcu(&p->thread_node, 25580c740d0aSOleg Nesterov &p->signal->thread_head); 25591da177e4SLinus Torvalds } 256081907739SOleg Nesterov attach_pid(p, PIDTYPE_PID); 25611da177e4SLinus Torvalds nr_threads++; 256273b9ebfeSOleg Nesterov } 25631da177e4SLinus Torvalds total_forks++; 2564c3ad2c3bSEric W. Biederman hlist_del_init(&delayed.node); 25653f17da69SOleg Nesterov spin_unlock(¤t->sighand->siglock); 25664af4206bSOleg Nesterov syscall_tracepoint_update(p); 25671da177e4SLinus Torvalds write_unlock_irq(&tasklist_lock); 25684af4206bSOleg Nesterov 2569ddc204b5SWaiman Long if (pidfile) 2570ddc204b5SWaiman Long fd_install(pidfd, pidfile); 2571ddc204b5SWaiman Long 2572c13cf856SAndrew Morton proc_fork_connector(p); 2573b1e82065SPeter Zijlstra sched_post_fork(p); 2574ef2c41cfSChristian Brauner cgroup_post_fork(p, args); 2575cdd6c482SIngo Molnar perf_event_fork(p); 257643d2b113SKAMEZAWA Hiroyuki 257743d2b113SKAMEZAWA Hiroyuki trace_task_newtask(p, clone_flags); 25783ab67966SOleg Nesterov uprobe_copy_process(p, clone_flags); 257943d2b113SKAMEZAWA Hiroyuki 258067197a4fSSuren Baghdasaryan copy_oom_score_adj(clone_flags, p); 258167197a4fSSuren Baghdasaryan 25821da177e4SLinus Torvalds return p; 25831da177e4SLinus Torvalds 25847e47682eSAleksa Sarai bad_fork_cancel_cgroup: 258585dd3f61SPeter Zijlstra sched_core_free(p); 25863fd37226SKirill Tkhai spin_unlock(¤t->sighand->siglock); 25873fd37226SKirill Tkhai write_unlock_irq(&tasklist_lock); 2588ef2c41cfSChristian Brauner cgroup_cancel_fork(p, args); 2589b3e58382SChristian Brauner bad_fork_put_pidfd: 25906fd2fe49SAl Viro if (clone_flags & CLONE_PIDFD) { 25916fd2fe49SAl Viro fput(pidfile); 25926fd2fe49SAl Viro put_unused_fd(pidfd); 25936fd2fe49SAl Viro } 2594425fb2b4SPavel Emelyanov bad_fork_free_pid: 2595425fb2b4SPavel Emelyanov if (pid != &init_struct_pid) 2596425fb2b4SPavel Emelyanov free_pid(pid); 25970740aa5fSJiri Slaby bad_fork_cleanup_thread: 25980740aa5fSJiri Slaby exit_thread(p); 2599fd0928dfSJens Axboe bad_fork_cleanup_io: 2600b69f2292SLouis Rilling if (p->io_context) 2601b69f2292SLouis Rilling exit_io_context(p); 2602ab516013SSerge E. Hallyn bad_fork_cleanup_namespaces: 2603444f378bSLinus Torvalds exit_task_namespaces(p); 26041da177e4SLinus Torvalds bad_fork_cleanup_mm: 2605c3f3ce04SAndrea Arcangeli if (p->mm) { 2606c3f3ce04SAndrea Arcangeli mm_clear_owner(p->mm, p); 26071da177e4SLinus Torvalds mmput(p->mm); 2608c3f3ce04SAndrea Arcangeli } 26091da177e4SLinus Torvalds bad_fork_cleanup_signal: 26104ab6c083SOleg Nesterov if (!(clone_flags & CLONE_THREAD)) 26111c5354deSMike Galbraith free_signal_struct(p->signal); 26121da177e4SLinus Torvalds bad_fork_cleanup_sighand: 2613a7e5328aSOleg Nesterov __cleanup_sighand(p->sighand); 26141da177e4SLinus Torvalds bad_fork_cleanup_fs: 26151da177e4SLinus Torvalds exit_fs(p); /* blocking */ 26161da177e4SLinus Torvalds bad_fork_cleanup_files: 26171da177e4SLinus Torvalds exit_files(p); /* blocking */ 26181da177e4SLinus Torvalds bad_fork_cleanup_semundo: 26191da177e4SLinus Torvalds exit_sem(p); 2620e4e55b47STetsuo Handa bad_fork_cleanup_security: 2621e4e55b47STetsuo Handa security_task_free(p); 26221da177e4SLinus Torvalds bad_fork_cleanup_audit: 26231da177e4SLinus Torvalds audit_free(p); 26246c72e350SPeter Zijlstra bad_fork_cleanup_perf: 2625cdd6c482SIngo Molnar perf_event_free_task(p); 26266c72e350SPeter Zijlstra bad_fork_cleanup_policy: 2627b09be676SByungchul Park lockdep_free_task(p); 26281da177e4SLinus Torvalds #ifdef CONFIG_NUMA 2629f0be3d32SLee Schermerhorn mpol_put(p->mempolicy); 26301da177e4SLinus Torvalds #endif 2631ff8288ffSEric W. Biederman bad_fork_cleanup_delayacct: 263235df17c5SShailabh Nagar delayacct_tsk_free(p); 26331da177e4SLinus Torvalds bad_fork_cleanup_count: 263421d1c5e3SAlexey Gladkov dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); 2635e0e81739SDavid Howells exit_creds(p); 26361da177e4SLinus Torvalds bad_fork_free: 26372f064a59SPeter Zijlstra WRITE_ONCE(p->__state, TASK_DEAD); 26381a03d3f1SSebastian Andrzej Siewior exit_task_stack_account(p); 263968f24b08SAndy Lutomirski put_task_stack(p); 2640c3f3ce04SAndrea Arcangeli delayed_free_task(p); 2641fe7d37d1SOleg Nesterov fork_out: 2642c3ad2c3bSEric W. Biederman spin_lock_irq(¤t->sighand->siglock); 2643c3ad2c3bSEric W. Biederman hlist_del_init(&delayed.node); 2644c3ad2c3bSEric W. Biederman spin_unlock_irq(¤t->sighand->siglock); 2645fe7d37d1SOleg Nesterov return ERR_PTR(retval); 26461da177e4SLinus Torvalds } 26471da177e4SLinus Torvalds 26482c470475SEric W. Biederman static inline void init_idle_pids(struct task_struct *idle) 2649f106eee1SOleg Nesterov { 2650f106eee1SOleg Nesterov enum pid_type type; 2651f106eee1SOleg Nesterov 2652f106eee1SOleg Nesterov for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { 26532c470475SEric W. Biederman INIT_HLIST_NODE(&idle->pid_links[type]); /* not really needed */ 26542c470475SEric W. Biederman init_task_pid(idle, type, &init_struct_pid); 2655f106eee1SOleg Nesterov } 2656f106eee1SOleg Nesterov } 2657f106eee1SOleg Nesterov 265836cb0e1cSEric W. Biederman static int idle_dummy(void *dummy) 265936cb0e1cSEric W. Biederman { 266036cb0e1cSEric W. Biederman /* This function is never called */ 266136cb0e1cSEric W. Biederman return 0; 266236cb0e1cSEric W. Biederman } 266336cb0e1cSEric W. Biederman 2664f1a0a376SValentin Schneider struct task_struct * __init fork_idle(int cpu) 26651da177e4SLinus Torvalds { 266636c8b586SIngo Molnar struct task_struct *task; 26677f192e3cSChristian Brauner struct kernel_clone_args args = { 26687f192e3cSChristian Brauner .flags = CLONE_VM, 26695bd2e97cSEric W. Biederman .fn = &idle_dummy, 26705bd2e97cSEric W. Biederman .fn_arg = NULL, 2671343f4c49SEric W. Biederman .kthread = 1, 267236cb0e1cSEric W. Biederman .idle = 1, 26737f192e3cSChristian Brauner }; 26747f192e3cSChristian Brauner 26757f192e3cSChristian Brauner task = copy_process(&init_struct_pid, 0, cpu_to_node(cpu), &args); 2676f106eee1SOleg Nesterov if (!IS_ERR(task)) { 26772c470475SEric W. Biederman init_idle_pids(task); 26781da177e4SLinus Torvalds init_idle(task, cpu); 2679f106eee1SOleg Nesterov } 268073b9ebfeSOleg Nesterov 26811da177e4SLinus Torvalds return task; 26821da177e4SLinus Torvalds } 26831da177e4SLinus Torvalds 26841da177e4SLinus Torvalds /* 2685cc440e87SJens Axboe * This is like kernel_clone(), but shaved down and tailored to just 2686cc440e87SJens Axboe * creating io_uring workers. It returns a created task, or an error pointer. 2687cc440e87SJens Axboe * The returned task is inactive, and the caller must fire it up through 2688cc440e87SJens Axboe * wake_up_new_task(p). All signals are blocked in the created task. 2689cc440e87SJens Axboe */ 2690cc440e87SJens Axboe struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node) 2691cc440e87SJens Axboe { 2692cc440e87SJens Axboe unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD| 2693cc440e87SJens Axboe CLONE_IO; 2694cc440e87SJens Axboe struct kernel_clone_args args = { 2695cc440e87SJens Axboe .flags = ((lower_32_bits(flags) | CLONE_VM | 2696cc440e87SJens Axboe CLONE_UNTRACED) & ~CSIGNAL), 2697cc440e87SJens Axboe .exit_signal = (lower_32_bits(flags) & CSIGNAL), 26985bd2e97cSEric W. Biederman .fn = fn, 26995bd2e97cSEric W. Biederman .fn_arg = arg, 2700cc440e87SJens Axboe .io_thread = 1, 2701cc440e87SJens Axboe }; 2702cc440e87SJens Axboe 2703b16b3855SJens Axboe return copy_process(NULL, 0, node, &args); 2704cc440e87SJens Axboe } 2705cc440e87SJens Axboe 2706cc440e87SJens Axboe /* 27071da177e4SLinus Torvalds * Ok, this is the main fork-routine. 27081da177e4SLinus Torvalds * 27091da177e4SLinus Torvalds * It copies the process, and if successful kick-starts 27101da177e4SLinus Torvalds * it and waits for it to finish using the VM if required. 2711a0eb9abdSEugene Syromiatnikov * 2712a0eb9abdSEugene Syromiatnikov * args->exit_signal is expected to be checked for sanity by the caller. 27131da177e4SLinus Torvalds */ 2714cad6967aSChristian Brauner pid_t kernel_clone(struct kernel_clone_args *args) 27151da177e4SLinus Torvalds { 27167f192e3cSChristian Brauner u64 clone_flags = args->flags; 27179f5325aaSMarcos Paulo de Souza struct completion vfork; 27189f5325aaSMarcos Paulo de Souza struct pid *pid; 27191da177e4SLinus Torvalds struct task_struct *p; 27201da177e4SLinus Torvalds int trace = 0; 2721cad6967aSChristian Brauner pid_t nr; 27221da177e4SLinus Torvalds 2723bdff746aSAndrew Morton /* 27243af8588cSChristian Brauner * For legacy clone() calls, CLONE_PIDFD uses the parent_tid argument 27253af8588cSChristian Brauner * to return the pidfd. Hence, CLONE_PIDFD and CLONE_PARENT_SETTID are 27263af8588cSChristian Brauner * mutually exclusive. With clone3() CLONE_PIDFD has grown a separate 27273af8588cSChristian Brauner * field in struct clone_args and it still doesn't make sense to have 27283af8588cSChristian Brauner * them both point at the same memory location. Performing this check 27293af8588cSChristian Brauner * here has the advantage that we don't need to have a separate helper 27303af8588cSChristian Brauner * to check for legacy clone(). 27313af8588cSChristian Brauner */ 27323af8588cSChristian Brauner if ((args->flags & CLONE_PIDFD) && 27333af8588cSChristian Brauner (args->flags & CLONE_PARENT_SETTID) && 27343af8588cSChristian Brauner (args->pidfd == args->parent_tid)) 27353af8588cSChristian Brauner return -EINVAL; 27363af8588cSChristian Brauner 27373af8588cSChristian Brauner /* 27384b9d33e6STejun Heo * Determine whether and which event to report to ptracer. When 27394b9d33e6STejun Heo * called from kernel_thread or CLONE_UNTRACED is explicitly 27404b9d33e6STejun Heo * requested, no event is reported; otherwise, report if the event 27414b9d33e6STejun Heo * for the type of forking is enabled. 274209a05394SRoland McGrath */ 2743e80d6661SAl Viro if (!(clone_flags & CLONE_UNTRACED)) { 27444b9d33e6STejun Heo if (clone_flags & CLONE_VFORK) 27454b9d33e6STejun Heo trace = PTRACE_EVENT_VFORK; 27467f192e3cSChristian Brauner else if (args->exit_signal != SIGCHLD) 27474b9d33e6STejun Heo trace = PTRACE_EVENT_CLONE; 27484b9d33e6STejun Heo else 27494b9d33e6STejun Heo trace = PTRACE_EVENT_FORK; 27504b9d33e6STejun Heo 27514b9d33e6STejun Heo if (likely(!ptrace_event_enabled(current, trace))) 27524b9d33e6STejun Heo trace = 0; 27534b9d33e6STejun Heo } 27541da177e4SLinus Torvalds 27557f192e3cSChristian Brauner p = copy_process(NULL, trace, NUMA_NO_NODE, args); 275638addce8SEmese Revfy add_latent_entropy(); 27579f5325aaSMarcos Paulo de Souza 27589f5325aaSMarcos Paulo de Souza if (IS_ERR(p)) 27599f5325aaSMarcos Paulo de Souza return PTR_ERR(p); 27609f5325aaSMarcos Paulo de Souza 27611da177e4SLinus Torvalds /* 27621da177e4SLinus Torvalds * Do this prior waking up the new thread - the thread pointer 27631da177e4SLinus Torvalds * might get invalid after that point, if the thread exits quickly. 27641da177e4SLinus Torvalds */ 27650a16b607SMathieu Desnoyers trace_sched_process_fork(current, p); 27660a16b607SMathieu Desnoyers 27674e52365fSMatthew Dempsky pid = get_task_pid(p, PIDTYPE_PID); 27684e52365fSMatthew Dempsky nr = pid_vnr(pid); 276930e49c26SPavel Emelyanov 277030e49c26SPavel Emelyanov if (clone_flags & CLONE_PARENT_SETTID) 27717f192e3cSChristian Brauner put_user(nr, args->parent_tid); 2772a6f5e063SSukadev Bhattiprolu 27731da177e4SLinus Torvalds if (clone_flags & CLONE_VFORK) { 27741da177e4SLinus Torvalds p->vfork_done = &vfork; 27751da177e4SLinus Torvalds init_completion(&vfork); 2776d68b46feSOleg Nesterov get_task_struct(p); 27771da177e4SLinus Torvalds } 27781da177e4SLinus Torvalds 2779bd74fdaeSYu Zhao if (IS_ENABLED(CONFIG_LRU_GEN) && !(clone_flags & CLONE_VM)) { 2780bd74fdaeSYu Zhao /* lock the task to synchronize with memcg migration */ 2781bd74fdaeSYu Zhao task_lock(p); 2782bd74fdaeSYu Zhao lru_gen_add_mm(p->mm); 2783bd74fdaeSYu Zhao task_unlock(p); 2784bd74fdaeSYu Zhao } 2785bd74fdaeSYu Zhao 27863e51e3edSSamir Bellabes wake_up_new_task(p); 27871da177e4SLinus Torvalds 27884b9d33e6STejun Heo /* forking complete and child started to run, tell ptracer */ 27894b9d33e6STejun Heo if (unlikely(trace)) 27904e52365fSMatthew Dempsky ptrace_event_pid(trace, pid); 279109a05394SRoland McGrath 27921da177e4SLinus Torvalds if (clone_flags & CLONE_VFORK) { 2793d68b46feSOleg Nesterov if (!wait_for_vfork_done(p, &vfork)) 27944e52365fSMatthew Dempsky ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); 27959f59ce5dSChuck Ebbert } 27964e52365fSMatthew Dempsky 27974e52365fSMatthew Dempsky put_pid(pid); 279892476d7fSEric W. Biederman return nr; 27991da177e4SLinus Torvalds } 28001da177e4SLinus Torvalds 28012aa3a7f8SAl Viro /* 28022aa3a7f8SAl Viro * Create a kernel thread. 28032aa3a7f8SAl Viro */ 28042aa3a7f8SAl Viro pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 28052aa3a7f8SAl Viro { 28067f192e3cSChristian Brauner struct kernel_clone_args args = { 28073f2c788aSChristian Brauner .flags = ((lower_32_bits(flags) | CLONE_VM | 28083f2c788aSChristian Brauner CLONE_UNTRACED) & ~CSIGNAL), 28093f2c788aSChristian Brauner .exit_signal = (lower_32_bits(flags) & CSIGNAL), 28105bd2e97cSEric W. Biederman .fn = fn, 28115bd2e97cSEric W. Biederman .fn_arg = arg, 2812343f4c49SEric W. Biederman .kthread = 1, 2813343f4c49SEric W. Biederman }; 2814343f4c49SEric W. Biederman 2815343f4c49SEric W. Biederman return kernel_clone(&args); 2816343f4c49SEric W. Biederman } 2817343f4c49SEric W. Biederman 2818343f4c49SEric W. Biederman /* 2819343f4c49SEric W. Biederman * Create a user mode thread. 2820343f4c49SEric W. Biederman */ 2821343f4c49SEric W. Biederman pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags) 2822343f4c49SEric W. Biederman { 2823343f4c49SEric W. Biederman struct kernel_clone_args args = { 2824343f4c49SEric W. Biederman .flags = ((lower_32_bits(flags) | CLONE_VM | 2825343f4c49SEric W. Biederman CLONE_UNTRACED) & ~CSIGNAL), 2826343f4c49SEric W. Biederman .exit_signal = (lower_32_bits(flags) & CSIGNAL), 28275bd2e97cSEric W. Biederman .fn = fn, 28285bd2e97cSEric W. Biederman .fn_arg = arg, 28297f192e3cSChristian Brauner }; 28307f192e3cSChristian Brauner 2831cad6967aSChristian Brauner return kernel_clone(&args); 28322aa3a7f8SAl Viro } 28332aa3a7f8SAl Viro 2834d2125043SAl Viro #ifdef __ARCH_WANT_SYS_FORK 2835d2125043SAl Viro SYSCALL_DEFINE0(fork) 2836d2125043SAl Viro { 2837d2125043SAl Viro #ifdef CONFIG_MMU 28387f192e3cSChristian Brauner struct kernel_clone_args args = { 28397f192e3cSChristian Brauner .exit_signal = SIGCHLD, 28407f192e3cSChristian Brauner }; 28417f192e3cSChristian Brauner 2842cad6967aSChristian Brauner return kernel_clone(&args); 2843d2125043SAl Viro #else 2844d2125043SAl Viro /* can not support in nommu mode */ 28455d59e182SDaeseok Youn return -EINVAL; 2846d2125043SAl Viro #endif 2847d2125043SAl Viro } 2848d2125043SAl Viro #endif 2849d2125043SAl Viro 2850d2125043SAl Viro #ifdef __ARCH_WANT_SYS_VFORK 2851d2125043SAl Viro SYSCALL_DEFINE0(vfork) 2852d2125043SAl Viro { 28537f192e3cSChristian Brauner struct kernel_clone_args args = { 28547f192e3cSChristian Brauner .flags = CLONE_VFORK | CLONE_VM, 28557f192e3cSChristian Brauner .exit_signal = SIGCHLD, 28567f192e3cSChristian Brauner }; 28577f192e3cSChristian Brauner 2858cad6967aSChristian Brauner return kernel_clone(&args); 2859d2125043SAl Viro } 2860d2125043SAl Viro #endif 2861d2125043SAl Viro 2862d2125043SAl Viro #ifdef __ARCH_WANT_SYS_CLONE 2863d2125043SAl Viro #ifdef CONFIG_CLONE_BACKWARDS 2864d2125043SAl Viro SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 2865d2125043SAl Viro int __user *, parent_tidptr, 28663033f14aSJosh Triplett unsigned long, tls, 2867d2125043SAl Viro int __user *, child_tidptr) 2868d2125043SAl Viro #elif defined(CONFIG_CLONE_BACKWARDS2) 2869d2125043SAl Viro SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, 2870d2125043SAl Viro int __user *, parent_tidptr, 2871d2125043SAl Viro int __user *, child_tidptr, 28723033f14aSJosh Triplett unsigned long, tls) 2873dfa9771aSMichal Simek #elif defined(CONFIG_CLONE_BACKWARDS3) 2874dfa9771aSMichal Simek SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, 2875dfa9771aSMichal Simek int, stack_size, 2876dfa9771aSMichal Simek int __user *, parent_tidptr, 2877dfa9771aSMichal Simek int __user *, child_tidptr, 28783033f14aSJosh Triplett unsigned long, tls) 2879d2125043SAl Viro #else 2880d2125043SAl Viro SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 2881d2125043SAl Viro int __user *, parent_tidptr, 2882d2125043SAl Viro int __user *, child_tidptr, 28833033f14aSJosh Triplett unsigned long, tls) 2884d2125043SAl Viro #endif 2885d2125043SAl Viro { 28867f192e3cSChristian Brauner struct kernel_clone_args args = { 28873f2c788aSChristian Brauner .flags = (lower_32_bits(clone_flags) & ~CSIGNAL), 28887f192e3cSChristian Brauner .pidfd = parent_tidptr, 28897f192e3cSChristian Brauner .child_tid = child_tidptr, 28907f192e3cSChristian Brauner .parent_tid = parent_tidptr, 28913f2c788aSChristian Brauner .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL), 28927f192e3cSChristian Brauner .stack = newsp, 28937f192e3cSChristian Brauner .tls = tls, 28947f192e3cSChristian Brauner }; 28957f192e3cSChristian Brauner 2896cad6967aSChristian Brauner return kernel_clone(&args); 28977f192e3cSChristian Brauner } 2898d68dbb0cSChristian Brauner #endif 28997f192e3cSChristian Brauner 2900d68dbb0cSChristian Brauner #ifdef __ARCH_WANT_SYS_CLONE3 2901dd499f7aSAmanieu d'Antras 29027f192e3cSChristian Brauner noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs, 29037f192e3cSChristian Brauner struct clone_args __user *uargs, 2904f14c234bSAleksa Sarai size_t usize) 29057f192e3cSChristian Brauner { 2906f14c234bSAleksa Sarai int err; 29077f192e3cSChristian Brauner struct clone_args args; 290849cb2fc4SAdrian Reber pid_t *kset_tid = kargs->set_tid; 29097f192e3cSChristian Brauner 2910a966dcfeSEugene Syromiatnikov BUILD_BUG_ON(offsetofend(struct clone_args, tls) != 2911a966dcfeSEugene Syromiatnikov CLONE_ARGS_SIZE_VER0); 2912a966dcfeSEugene Syromiatnikov BUILD_BUG_ON(offsetofend(struct clone_args, set_tid_size) != 2913a966dcfeSEugene Syromiatnikov CLONE_ARGS_SIZE_VER1); 2914a966dcfeSEugene Syromiatnikov BUILD_BUG_ON(offsetofend(struct clone_args, cgroup) != 2915a966dcfeSEugene Syromiatnikov CLONE_ARGS_SIZE_VER2); 2916a966dcfeSEugene Syromiatnikov BUILD_BUG_ON(sizeof(struct clone_args) != CLONE_ARGS_SIZE_VER2); 2917a966dcfeSEugene Syromiatnikov 2918f14c234bSAleksa Sarai if (unlikely(usize > PAGE_SIZE)) 29197f192e3cSChristian Brauner return -E2BIG; 2920f14c234bSAleksa Sarai if (unlikely(usize < CLONE_ARGS_SIZE_VER0)) 29217f192e3cSChristian Brauner return -EINVAL; 29227f192e3cSChristian Brauner 2923f14c234bSAleksa Sarai err = copy_struct_from_user(&args, sizeof(args), uargs, usize); 2924f14c234bSAleksa Sarai if (err) 2925f14c234bSAleksa Sarai return err; 29267f192e3cSChristian Brauner 292749cb2fc4SAdrian Reber if (unlikely(args.set_tid_size > MAX_PID_NS_LEVEL)) 292849cb2fc4SAdrian Reber return -EINVAL; 292949cb2fc4SAdrian Reber 293049cb2fc4SAdrian Reber if (unlikely(!args.set_tid && args.set_tid_size > 0)) 293149cb2fc4SAdrian Reber return -EINVAL; 293249cb2fc4SAdrian Reber 293349cb2fc4SAdrian Reber if (unlikely(args.set_tid && args.set_tid_size == 0)) 293449cb2fc4SAdrian Reber return -EINVAL; 293549cb2fc4SAdrian Reber 2936a0eb9abdSEugene Syromiatnikov /* 2937a0eb9abdSEugene Syromiatnikov * Verify that higher 32bits of exit_signal are unset and that 2938a0eb9abdSEugene Syromiatnikov * it is a valid signal 2939a0eb9abdSEugene Syromiatnikov */ 2940a0eb9abdSEugene Syromiatnikov if (unlikely((args.exit_signal & ~((u64)CSIGNAL)) || 2941a0eb9abdSEugene Syromiatnikov !valid_signal(args.exit_signal))) 2942a0eb9abdSEugene Syromiatnikov return -EINVAL; 2943a0eb9abdSEugene Syromiatnikov 294462173872SEugene Syromiatnikov if ((args.flags & CLONE_INTO_CGROUP) && 294562173872SEugene Syromiatnikov (args.cgroup > INT_MAX || usize < CLONE_ARGS_SIZE_VER2)) 2946ef2c41cfSChristian Brauner return -EINVAL; 2947ef2c41cfSChristian Brauner 29487f192e3cSChristian Brauner *kargs = (struct kernel_clone_args){ 29497f192e3cSChristian Brauner .flags = args.flags, 29507f192e3cSChristian Brauner .pidfd = u64_to_user_ptr(args.pidfd), 29517f192e3cSChristian Brauner .child_tid = u64_to_user_ptr(args.child_tid), 29527f192e3cSChristian Brauner .parent_tid = u64_to_user_ptr(args.parent_tid), 29537f192e3cSChristian Brauner .exit_signal = args.exit_signal, 29547f192e3cSChristian Brauner .stack = args.stack, 29557f192e3cSChristian Brauner .stack_size = args.stack_size, 29567f192e3cSChristian Brauner .tls = args.tls, 295749cb2fc4SAdrian Reber .set_tid_size = args.set_tid_size, 2958ef2c41cfSChristian Brauner .cgroup = args.cgroup, 29597f192e3cSChristian Brauner }; 29607f192e3cSChristian Brauner 296149cb2fc4SAdrian Reber if (args.set_tid && 296249cb2fc4SAdrian Reber copy_from_user(kset_tid, u64_to_user_ptr(args.set_tid), 296349cb2fc4SAdrian Reber (kargs->set_tid_size * sizeof(pid_t)))) 296449cb2fc4SAdrian Reber return -EFAULT; 296549cb2fc4SAdrian Reber 296649cb2fc4SAdrian Reber kargs->set_tid = kset_tid; 296749cb2fc4SAdrian Reber 29687f192e3cSChristian Brauner return 0; 29697f192e3cSChristian Brauner } 29707f192e3cSChristian Brauner 2971fa729c4dSChristian Brauner /** 2972fa729c4dSChristian Brauner * clone3_stack_valid - check and prepare stack 2973fa729c4dSChristian Brauner * @kargs: kernel clone args 2974fa729c4dSChristian Brauner * 2975fa729c4dSChristian Brauner * Verify that the stack arguments userspace gave us are sane. 2976fa729c4dSChristian Brauner * In addition, set the stack direction for userspace since it's easy for us to 2977fa729c4dSChristian Brauner * determine. 2978fa729c4dSChristian Brauner */ 2979fa729c4dSChristian Brauner static inline bool clone3_stack_valid(struct kernel_clone_args *kargs) 2980fa729c4dSChristian Brauner { 2981fa729c4dSChristian Brauner if (kargs->stack == 0) { 2982fa729c4dSChristian Brauner if (kargs->stack_size > 0) 2983fa729c4dSChristian Brauner return false; 2984fa729c4dSChristian Brauner } else { 2985fa729c4dSChristian Brauner if (kargs->stack_size == 0) 2986fa729c4dSChristian Brauner return false; 2987fa729c4dSChristian Brauner 2988fa729c4dSChristian Brauner if (!access_ok((void __user *)kargs->stack, kargs->stack_size)) 2989fa729c4dSChristian Brauner return false; 2990fa729c4dSChristian Brauner 2991fa729c4dSChristian Brauner #if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_IA64) 2992fa729c4dSChristian Brauner kargs->stack += kargs->stack_size; 2993fa729c4dSChristian Brauner #endif 2994fa729c4dSChristian Brauner } 2995fa729c4dSChristian Brauner 2996fa729c4dSChristian Brauner return true; 2997fa729c4dSChristian Brauner } 2998fa729c4dSChristian Brauner 2999fa729c4dSChristian Brauner static bool clone3_args_valid(struct kernel_clone_args *kargs) 30007f192e3cSChristian Brauner { 3001b612e5dfSChristian Brauner /* Verify that no unknown flags are passed along. */ 3002ef2c41cfSChristian Brauner if (kargs->flags & 3003ef2c41cfSChristian Brauner ~(CLONE_LEGACY_FLAGS | CLONE_CLEAR_SIGHAND | CLONE_INTO_CGROUP)) 30047f192e3cSChristian Brauner return false; 30057f192e3cSChristian Brauner 30067f192e3cSChristian Brauner /* 3007a8ca6b13SXiaofeng Cao * - make the CLONE_DETACHED bit reusable for clone3 3008a8ca6b13SXiaofeng Cao * - make the CSIGNAL bits reusable for clone3 30097f192e3cSChristian Brauner */ 3010a402f1e3STobias Klauser if (kargs->flags & (CLONE_DETACHED | (CSIGNAL & (~CLONE_NEWTIME)))) 30117f192e3cSChristian Brauner return false; 30127f192e3cSChristian Brauner 3013b612e5dfSChristian Brauner if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) == 3014b612e5dfSChristian Brauner (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) 3015b612e5dfSChristian Brauner return false; 3016b612e5dfSChristian Brauner 30177f192e3cSChristian Brauner if ((kargs->flags & (CLONE_THREAD | CLONE_PARENT)) && 30187f192e3cSChristian Brauner kargs->exit_signal) 30197f192e3cSChristian Brauner return false; 30207f192e3cSChristian Brauner 3021fa729c4dSChristian Brauner if (!clone3_stack_valid(kargs)) 3022fa729c4dSChristian Brauner return false; 3023fa729c4dSChristian Brauner 30247f192e3cSChristian Brauner return true; 30257f192e3cSChristian Brauner } 30267f192e3cSChristian Brauner 3027501bd016SChristian Brauner /** 3028501bd016SChristian Brauner * clone3 - create a new process with specific properties 3029501bd016SChristian Brauner * @uargs: argument structure 3030501bd016SChristian Brauner * @size: size of @uargs 3031501bd016SChristian Brauner * 3032501bd016SChristian Brauner * clone3() is the extensible successor to clone()/clone2(). 3033501bd016SChristian Brauner * It takes a struct as argument that is versioned by its size. 3034501bd016SChristian Brauner * 3035501bd016SChristian Brauner * Return: On success, a positive PID for the child process. 3036501bd016SChristian Brauner * On error, a negative errno number. 3037501bd016SChristian Brauner */ 30387f192e3cSChristian Brauner SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size) 30397f192e3cSChristian Brauner { 30407f192e3cSChristian Brauner int err; 30417f192e3cSChristian Brauner 30427f192e3cSChristian Brauner struct kernel_clone_args kargs; 304349cb2fc4SAdrian Reber pid_t set_tid[MAX_PID_NS_LEVEL]; 304449cb2fc4SAdrian Reber 304549cb2fc4SAdrian Reber kargs.set_tid = set_tid; 30467f192e3cSChristian Brauner 30477f192e3cSChristian Brauner err = copy_clone_args_from_user(&kargs, uargs, size); 30487f192e3cSChristian Brauner if (err) 30497f192e3cSChristian Brauner return err; 30507f192e3cSChristian Brauner 30517f192e3cSChristian Brauner if (!clone3_args_valid(&kargs)) 30527f192e3cSChristian Brauner return -EINVAL; 30537f192e3cSChristian Brauner 3054cad6967aSChristian Brauner return kernel_clone(&kargs); 3055d2125043SAl Viro } 3056d2125043SAl Viro #endif 3057d2125043SAl Viro 30580f1b92cbSOleg Nesterov void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data) 30590f1b92cbSOleg Nesterov { 30600f1b92cbSOleg Nesterov struct task_struct *leader, *parent, *child; 30610f1b92cbSOleg Nesterov int res; 30620f1b92cbSOleg Nesterov 30630f1b92cbSOleg Nesterov read_lock(&tasklist_lock); 30640f1b92cbSOleg Nesterov leader = top = top->group_leader; 30650f1b92cbSOleg Nesterov down: 30660f1b92cbSOleg Nesterov for_each_thread(leader, parent) { 30670f1b92cbSOleg Nesterov list_for_each_entry(child, &parent->children, sibling) { 30680f1b92cbSOleg Nesterov res = visitor(child, data); 30690f1b92cbSOleg Nesterov if (res) { 30700f1b92cbSOleg Nesterov if (res < 0) 30710f1b92cbSOleg Nesterov goto out; 30720f1b92cbSOleg Nesterov leader = child; 30730f1b92cbSOleg Nesterov goto down; 30740f1b92cbSOleg Nesterov } 30750f1b92cbSOleg Nesterov up: 30760f1b92cbSOleg Nesterov ; 30770f1b92cbSOleg Nesterov } 30780f1b92cbSOleg Nesterov } 30790f1b92cbSOleg Nesterov 30800f1b92cbSOleg Nesterov if (leader != top) { 30810f1b92cbSOleg Nesterov child = leader; 30820f1b92cbSOleg Nesterov parent = child->real_parent; 30830f1b92cbSOleg Nesterov leader = parent->group_leader; 30840f1b92cbSOleg Nesterov goto up; 30850f1b92cbSOleg Nesterov } 30860f1b92cbSOleg Nesterov out: 30870f1b92cbSOleg Nesterov read_unlock(&tasklist_lock); 30880f1b92cbSOleg Nesterov } 30890f1b92cbSOleg Nesterov 30905fd63b30SRavikiran G Thirumalai #ifndef ARCH_MIN_MMSTRUCT_ALIGN 30915fd63b30SRavikiran G Thirumalai #define ARCH_MIN_MMSTRUCT_ALIGN 0 30925fd63b30SRavikiran G Thirumalai #endif 30935fd63b30SRavikiran G Thirumalai 309451cc5068SAlexey Dobriyan static void sighand_ctor(void *data) 3095aa1757f9SOleg Nesterov { 3096aa1757f9SOleg Nesterov struct sighand_struct *sighand = data; 3097aa1757f9SOleg Nesterov 3098aa1757f9SOleg Nesterov spin_lock_init(&sighand->siglock); 3099b8fceee1SDavide Libenzi init_waitqueue_head(&sighand->signalfd_wqh); 3100fba2afaaSDavide Libenzi } 3101aa1757f9SOleg Nesterov 3102af806027SPeter Zijlstra void __init mm_cache_init(void) 31031da177e4SLinus Torvalds { 3104c1a2f7f0SRik van Riel unsigned int mm_size; 3105c1a2f7f0SRik van Riel 3106af806027SPeter Zijlstra /* 3107af806027SPeter Zijlstra * The mm_cpumask is located at the end of mm_struct, and is 3108af806027SPeter Zijlstra * dynamically sized based on the maximum CPU number this system 3109af806027SPeter Zijlstra * can have, taking hotplug into account (nr_cpu_ids). 3110af806027SPeter Zijlstra */ 3111af7f588dSMathieu Desnoyers mm_size = sizeof(struct mm_struct) + cpumask_size() + mm_cid_size(); 3112af806027SPeter Zijlstra 3113af806027SPeter Zijlstra mm_cachep = kmem_cache_create_usercopy("mm_struct", 3114af806027SPeter Zijlstra mm_size, ARCH_MIN_MMSTRUCT_ALIGN, 3115af806027SPeter Zijlstra SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 3116af806027SPeter Zijlstra offsetof(struct mm_struct, saved_auxv), 3117af806027SPeter Zijlstra sizeof_field(struct mm_struct, saved_auxv), 3118af806027SPeter Zijlstra NULL); 3119af806027SPeter Zijlstra } 3120af806027SPeter Zijlstra 3121af806027SPeter Zijlstra void __init proc_caches_init(void) 3122af806027SPeter Zijlstra { 31231da177e4SLinus Torvalds sighand_cachep = kmem_cache_create("sighand_cache", 31241da177e4SLinus Torvalds sizeof(struct sighand_struct), 0, 31255f0d5a3aSPaul E. McKenney SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| 312675f296d9SLevin, Alexander (Sasha Levin) SLAB_ACCOUNT, sighand_ctor); 31271da177e4SLinus Torvalds signal_cachep = kmem_cache_create("signal_cache", 31281da177e4SLinus Torvalds sizeof(struct signal_struct), 0, 312975f296d9SLevin, Alexander (Sasha Levin) SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 31305d097056SVladimir Davydov NULL); 31311da177e4SLinus Torvalds files_cachep = kmem_cache_create("files_cache", 31321da177e4SLinus Torvalds sizeof(struct files_struct), 0, 313375f296d9SLevin, Alexander (Sasha Levin) SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 31345d097056SVladimir Davydov NULL); 31351da177e4SLinus Torvalds fs_cachep = kmem_cache_create("fs_cache", 31361da177e4SLinus Torvalds sizeof(struct fs_struct), 0, 313775f296d9SLevin, Alexander (Sasha Levin) SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 31385d097056SVladimir Davydov NULL); 3139c1a2f7f0SRik van Riel 31405d097056SVladimir Davydov vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); 31418feae131SDavid Howells mmap_init(); 314266577193SAl Viro nsproxy_cache_init(); 31431da177e4SLinus Torvalds } 3144cf2e340fSJANAK DESAI 3145cf2e340fSJANAK DESAI /* 31469bfb23fcSOleg Nesterov * Check constraints on flags passed to the unshare system call. 3147cf2e340fSJANAK DESAI */ 31489bfb23fcSOleg Nesterov static int check_unshare_flags(unsigned long unshare_flags) 3149cf2e340fSJANAK DESAI { 31509bfb23fcSOleg Nesterov if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| 31519bfb23fcSOleg Nesterov CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| 315250804fe3SEric W. Biederman CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET| 3153769071acSAndrei Vagin CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP| 3154769071acSAndrei Vagin CLONE_NEWTIME)) 3155cf2e340fSJANAK DESAI return -EINVAL; 31569bfb23fcSOleg Nesterov /* 315712c641abSEric W. Biederman * Not implemented, but pretend it works if there is nothing 315812c641abSEric W. Biederman * to unshare. Note that unsharing the address space or the 315912c641abSEric W. Biederman * signal handlers also need to unshare the signal queues (aka 316012c641abSEric W. Biederman * CLONE_THREAD). 31619bfb23fcSOleg Nesterov */ 31629bfb23fcSOleg Nesterov if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { 316312c641abSEric W. Biederman if (!thread_group_empty(current)) 316412c641abSEric W. Biederman return -EINVAL; 316512c641abSEric W. Biederman } 316612c641abSEric W. Biederman if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) { 3167d036bda7SElena Reshetova if (refcount_read(¤t->sighand->count) > 1) 316812c641abSEric W. Biederman return -EINVAL; 316912c641abSEric W. Biederman } 317012c641abSEric W. Biederman if (unshare_flags & CLONE_VM) { 317112c641abSEric W. Biederman if (!current_is_single_threaded()) 31729bfb23fcSOleg Nesterov return -EINVAL; 31739bfb23fcSOleg Nesterov } 3174cf2e340fSJANAK DESAI 3175cf2e340fSJANAK DESAI return 0; 3176cf2e340fSJANAK DESAI } 3177cf2e340fSJANAK DESAI 3178cf2e340fSJANAK DESAI /* 317999d1419dSJANAK DESAI * Unshare the filesystem structure if it is being shared 3180cf2e340fSJANAK DESAI */ 3181cf2e340fSJANAK DESAI static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) 3182cf2e340fSJANAK DESAI { 3183cf2e340fSJANAK DESAI struct fs_struct *fs = current->fs; 3184cf2e340fSJANAK DESAI 3185498052bbSAl Viro if (!(unshare_flags & CLONE_FS) || !fs) 3186498052bbSAl Viro return 0; 3187498052bbSAl Viro 3188498052bbSAl Viro /* don't need lock here; in the worst case we'll do useless copy */ 3189498052bbSAl Viro if (fs->users == 1) 3190498052bbSAl Viro return 0; 3191498052bbSAl Viro 3192498052bbSAl Viro *new_fsp = copy_fs_struct(fs); 319399d1419dSJANAK DESAI if (!*new_fsp) 319499d1419dSJANAK DESAI return -ENOMEM; 3195cf2e340fSJANAK DESAI 3196cf2e340fSJANAK DESAI return 0; 3197cf2e340fSJANAK DESAI } 3198cf2e340fSJANAK DESAI 3199cf2e340fSJANAK DESAI /* 3200a016f338SJANAK DESAI * Unshare file descriptor table if it is being shared 3201cf2e340fSJANAK DESAI */ 320260997c3dSChristian Brauner int unshare_fd(unsigned long unshare_flags, unsigned int max_fds, 320360997c3dSChristian Brauner struct files_struct **new_fdp) 3204cf2e340fSJANAK DESAI { 3205cf2e340fSJANAK DESAI struct files_struct *fd = current->files; 3206a016f338SJANAK DESAI int error = 0; 3207cf2e340fSJANAK DESAI 3208cf2e340fSJANAK DESAI if ((unshare_flags & CLONE_FILES) && 3209a016f338SJANAK DESAI (fd && atomic_read(&fd->count) > 1)) { 321060997c3dSChristian Brauner *new_fdp = dup_fd(fd, max_fds, &error); 3211a016f338SJANAK DESAI if (!*new_fdp) 3212a016f338SJANAK DESAI return error; 3213a016f338SJANAK DESAI } 3214cf2e340fSJANAK DESAI 3215cf2e340fSJANAK DESAI return 0; 3216cf2e340fSJANAK DESAI } 3217cf2e340fSJANAK DESAI 3218cf2e340fSJANAK DESAI /* 3219cf2e340fSJANAK DESAI * unshare allows a process to 'unshare' part of the process 3220cf2e340fSJANAK DESAI * context which was originally shared using clone. copy_* 3221cad6967aSChristian Brauner * functions used by kernel_clone() cannot be used here directly 3222cf2e340fSJANAK DESAI * because they modify an inactive task_struct that is being 3223cf2e340fSJANAK DESAI * constructed. Here we are modifying the current, active, 3224cf2e340fSJANAK DESAI * task_struct. 3225cf2e340fSJANAK DESAI */ 32269b32105eSDominik Brodowski int ksys_unshare(unsigned long unshare_flags) 3227cf2e340fSJANAK DESAI { 3228cf2e340fSJANAK DESAI struct fs_struct *fs, *new_fs = NULL; 3229ba1f70ddSRan Xiaokai struct files_struct *new_fd = NULL; 3230b2e0d987SEric W. Biederman struct cred *new_cred = NULL; 3231cf7b708cSPavel Emelyanov struct nsproxy *new_nsproxy = NULL; 32329edff4abSManfred Spraul int do_sysvsem = 0; 32339bfb23fcSOleg Nesterov int err; 3234cf2e340fSJANAK DESAI 323550804fe3SEric W. Biederman /* 3236faf00da5SEric W. Biederman * If unsharing a user namespace must also unshare the thread group 3237faf00da5SEric W. Biederman * and unshare the filesystem root and working directories. 3238b2e0d987SEric W. Biederman */ 3239b2e0d987SEric W. Biederman if (unshare_flags & CLONE_NEWUSER) 3240e66eded8SEric W. Biederman unshare_flags |= CLONE_THREAD | CLONE_FS; 3241b2e0d987SEric W. Biederman /* 324250804fe3SEric W. Biederman * If unsharing vm, must also unshare signal handlers. 324350804fe3SEric W. Biederman */ 324450804fe3SEric W. Biederman if (unshare_flags & CLONE_VM) 324550804fe3SEric W. Biederman unshare_flags |= CLONE_SIGHAND; 32466013f67fSManfred Spraul /* 324712c641abSEric W. Biederman * If unsharing a signal handlers, must also unshare the signal queues. 324812c641abSEric W. Biederman */ 324912c641abSEric W. Biederman if (unshare_flags & CLONE_SIGHAND) 325012c641abSEric W. Biederman unshare_flags |= CLONE_THREAD; 325112c641abSEric W. Biederman /* 32529bfb23fcSOleg Nesterov * If unsharing namespace, must also unshare filesystem information. 32539bfb23fcSOleg Nesterov */ 32549bfb23fcSOleg Nesterov if (unshare_flags & CLONE_NEWNS) 32559bfb23fcSOleg Nesterov unshare_flags |= CLONE_FS; 325650804fe3SEric W. Biederman 325750804fe3SEric W. Biederman err = check_unshare_flags(unshare_flags); 325850804fe3SEric W. Biederman if (err) 325950804fe3SEric W. Biederman goto bad_unshare_out; 32609bfb23fcSOleg Nesterov /* 32616013f67fSManfred Spraul * CLONE_NEWIPC must also detach from the undolist: after switching 32626013f67fSManfred Spraul * to a new ipc namespace, the semaphore arrays from the old 32636013f67fSManfred Spraul * namespace are unreachable. 32646013f67fSManfred Spraul */ 32656013f67fSManfred Spraul if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) 32669edff4abSManfred Spraul do_sysvsem = 1; 3267fb0a685cSDaniel Rebelo de Oliveira err = unshare_fs(unshare_flags, &new_fs); 3268fb0a685cSDaniel Rebelo de Oliveira if (err) 32699bfb23fcSOleg Nesterov goto bad_unshare_out; 327060997c3dSChristian Brauner err = unshare_fd(unshare_flags, NR_OPEN_MAX, &new_fd); 3271fb0a685cSDaniel Rebelo de Oliveira if (err) 32729bfb23fcSOleg Nesterov goto bad_unshare_cleanup_fs; 3273b2e0d987SEric W. Biederman err = unshare_userns(unshare_flags, &new_cred); 3274fb0a685cSDaniel Rebelo de Oliveira if (err) 32759edff4abSManfred Spraul goto bad_unshare_cleanup_fd; 3276b2e0d987SEric W. Biederman err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, 3277b2e0d987SEric W. Biederman new_cred, new_fs); 3278b2e0d987SEric W. Biederman if (err) 3279b2e0d987SEric W. Biederman goto bad_unshare_cleanup_cred; 3280cf2e340fSJANAK DESAI 3281905ae01cSAlexey Gladkov if (new_cred) { 3282905ae01cSAlexey Gladkov err = set_cred_ucounts(new_cred); 3283905ae01cSAlexey Gladkov if (err) 3284905ae01cSAlexey Gladkov goto bad_unshare_cleanup_cred; 3285905ae01cSAlexey Gladkov } 3286905ae01cSAlexey Gladkov 3287b2e0d987SEric W. Biederman if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) { 32889edff4abSManfred Spraul if (do_sysvsem) { 32899edff4abSManfred Spraul /* 32909edff4abSManfred Spraul * CLONE_SYSVSEM is equivalent to sys_exit(). 32919edff4abSManfred Spraul */ 32929edff4abSManfred Spraul exit_sem(current); 32939edff4abSManfred Spraul } 3294ab602f79SJack Miller if (unshare_flags & CLONE_NEWIPC) { 3295ab602f79SJack Miller /* Orphan segments in old ns (see sem above). */ 3296ab602f79SJack Miller exit_shm(current); 3297ab602f79SJack Miller shm_init_task(current); 3298ab602f79SJack Miller } 3299ab516013SSerge E. Hallyn 33006f977e6bSAlan Cox if (new_nsproxy) 3301cf7b708cSPavel Emelyanov switch_task_namespaces(current, new_nsproxy); 3302cf2e340fSJANAK DESAI 3303cf7b708cSPavel Emelyanov task_lock(current); 3304cf7b708cSPavel Emelyanov 3305cf2e340fSJANAK DESAI if (new_fs) { 3306cf2e340fSJANAK DESAI fs = current->fs; 33072a4419b5SNick Piggin spin_lock(&fs->lock); 3308cf2e340fSJANAK DESAI current->fs = new_fs; 3309498052bbSAl Viro if (--fs->users) 3310498052bbSAl Viro new_fs = NULL; 3311498052bbSAl Viro else 3312cf2e340fSJANAK DESAI new_fs = fs; 33132a4419b5SNick Piggin spin_unlock(&fs->lock); 3314cf2e340fSJANAK DESAI } 3315cf2e340fSJANAK DESAI 3316ba1f70ddSRan Xiaokai if (new_fd) 3317ba1f70ddSRan Xiaokai swap(current->files, new_fd); 3318cf2e340fSJANAK DESAI 3319cf2e340fSJANAK DESAI task_unlock(current); 3320b2e0d987SEric W. Biederman 3321b2e0d987SEric W. Biederman if (new_cred) { 3322b2e0d987SEric W. Biederman /* Install the new user namespace */ 3323b2e0d987SEric W. Biederman commit_creds(new_cred); 3324b2e0d987SEric W. Biederman new_cred = NULL; 3325b2e0d987SEric W. Biederman } 3326cf2e340fSJANAK DESAI } 3327cf2e340fSJANAK DESAI 3328e4222673SHari Bathini perf_event_namespaces(current); 3329e4222673SHari Bathini 3330b2e0d987SEric W. Biederman bad_unshare_cleanup_cred: 3331b2e0d987SEric W. Biederman if (new_cred) 3332b2e0d987SEric W. Biederman put_cred(new_cred); 3333cf2e340fSJANAK DESAI bad_unshare_cleanup_fd: 3334cf2e340fSJANAK DESAI if (new_fd) 3335cf2e340fSJANAK DESAI put_files_struct(new_fd); 3336cf2e340fSJANAK DESAI 3337cf2e340fSJANAK DESAI bad_unshare_cleanup_fs: 3338cf2e340fSJANAK DESAI if (new_fs) 3339498052bbSAl Viro free_fs_struct(new_fs); 3340cf2e340fSJANAK DESAI 3341cf2e340fSJANAK DESAI bad_unshare_out: 3342cf2e340fSJANAK DESAI return err; 3343cf2e340fSJANAK DESAI } 33443b125388SAl Viro 33459b32105eSDominik Brodowski SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) 33469b32105eSDominik Brodowski { 33479b32105eSDominik Brodowski return ksys_unshare(unshare_flags); 33489b32105eSDominik Brodowski } 33499b32105eSDominik Brodowski 33503b125388SAl Viro /* 33513b125388SAl Viro * Helper to unshare the files of the current task. 33523b125388SAl Viro * We don't want to expose copy_files internals to 33533b125388SAl Viro * the exec layer of the kernel. 33543b125388SAl Viro */ 33553b125388SAl Viro 33561f702603SEric W. Biederman int unshare_files(void) 33573b125388SAl Viro { 33583b125388SAl Viro struct task_struct *task = current; 33591f702603SEric W. Biederman struct files_struct *old, *copy = NULL; 33603b125388SAl Viro int error; 33613b125388SAl Viro 336260997c3dSChristian Brauner error = unshare_fd(CLONE_FILES, NR_OPEN_MAX, ©); 33631f702603SEric W. Biederman if (error || !copy) 33643b125388SAl Viro return error; 33651f702603SEric W. Biederman 33661f702603SEric W. Biederman old = task->files; 33673b125388SAl Viro task_lock(task); 33683b125388SAl Viro task->files = copy; 33693b125388SAl Viro task_unlock(task); 33701f702603SEric W. Biederman put_files_struct(old); 33713b125388SAl Viro return 0; 33723b125388SAl Viro } 337316db3d3fSHeinrich Schuchardt 337416db3d3fSHeinrich Schuchardt int sysctl_max_threads(struct ctl_table *table, int write, 3375b0daa2c7STobias Klauser void *buffer, size_t *lenp, loff_t *ppos) 337616db3d3fSHeinrich Schuchardt { 337716db3d3fSHeinrich Schuchardt struct ctl_table t; 337816db3d3fSHeinrich Schuchardt int ret; 337916db3d3fSHeinrich Schuchardt int threads = max_threads; 3380b0f53dbcSMichal Hocko int min = 1; 338116db3d3fSHeinrich Schuchardt int max = MAX_THREADS; 338216db3d3fSHeinrich Schuchardt 338316db3d3fSHeinrich Schuchardt t = *table; 338416db3d3fSHeinrich Schuchardt t.data = &threads; 338516db3d3fSHeinrich Schuchardt t.extra1 = &min; 338616db3d3fSHeinrich Schuchardt t.extra2 = &max; 338716db3d3fSHeinrich Schuchardt 338816db3d3fSHeinrich Schuchardt ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 338916db3d3fSHeinrich Schuchardt if (ret || !write) 339016db3d3fSHeinrich Schuchardt return ret; 339116db3d3fSHeinrich Schuchardt 3392b0f53dbcSMichal Hocko max_threads = threads; 339316db3d3fSHeinrich Schuchardt 339416db3d3fSHeinrich Schuchardt return 0; 339516db3d3fSHeinrich Schuchardt } 3396