11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/fork.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1991, 1992 Linus Torvalds 51da177e4SLinus Torvalds */ 61da177e4SLinus Torvalds 71da177e4SLinus Torvalds /* 81da177e4SLinus Torvalds * 'fork.c' contains the help-routines for the 'fork' system call 91da177e4SLinus Torvalds * (see also entry.S and others). 101da177e4SLinus Torvalds * Fork is rather simple, once you get the hang of it, but the memory 111da177e4SLinus Torvalds * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' 121da177e4SLinus Torvalds */ 131da177e4SLinus Torvalds 141da177e4SLinus Torvalds #include <linux/slab.h> 154eb5aaa3SIngo Molnar #include <linux/sched/autogroup.h> 166e84f315SIngo Molnar #include <linux/sched/mm.h> 17f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 188703e8a4SIngo Molnar #include <linux/sched/user.h> 196a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 2003441a34SIngo Molnar #include <linux/sched/stat.h> 2129930025SIngo Molnar #include <linux/sched/task.h> 2268db0cf1SIngo Molnar #include <linux/sched/task_stack.h> 2332ef5517SIngo Molnar #include <linux/sched/cputime.h> 24037741a6SIngo Molnar #include <linux/rtmutex.h> 251da177e4SLinus Torvalds #include <linux/init.h> 261da177e4SLinus Torvalds #include <linux/unistd.h> 271da177e4SLinus Torvalds #include <linux/module.h> 281da177e4SLinus Torvalds #include <linux/vmalloc.h> 291da177e4SLinus Torvalds #include <linux/completion.h> 301da177e4SLinus Torvalds #include <linux/personality.h> 311da177e4SLinus Torvalds #include <linux/mempolicy.h> 321da177e4SLinus Torvalds #include <linux/sem.h> 331da177e4SLinus Torvalds #include <linux/file.h> 349f3acc31SAl Viro #include <linux/fdtable.h> 35da9cbc87SJens Axboe #include <linux/iocontext.h> 361da177e4SLinus Torvalds #include <linux/key.h> 371da177e4SLinus Torvalds #include <linux/binfmts.h> 381da177e4SLinus Torvalds #include <linux/mman.h> 39cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 40133ff0eaSJérôme Glisse #include <linux/hmm.h> 411da177e4SLinus Torvalds #include <linux/fs.h> 42615d6e87SDavidlohr Bueso #include <linux/mm.h> 43615d6e87SDavidlohr Bueso #include <linux/vmacache.h> 44ab516013SSerge E. Hallyn #include <linux/nsproxy.h> 45c59ede7bSRandy.Dunlap #include <linux/capability.h> 461da177e4SLinus Torvalds #include <linux/cpu.h> 47b4f48b63SPaul Menage #include <linux/cgroup.h> 481da177e4SLinus Torvalds #include <linux/security.h> 49a1e78772SMel Gorman #include <linux/hugetlb.h> 50e2cfabdfSWill Drewry #include <linux/seccomp.h> 511da177e4SLinus Torvalds #include <linux/swap.h> 521da177e4SLinus Torvalds #include <linux/syscalls.h> 531da177e4SLinus Torvalds #include <linux/jiffies.h> 541da177e4SLinus Torvalds #include <linux/futex.h> 558141c7f3SLinus Torvalds #include <linux/compat.h> 56207205a2SEric Dumazet #include <linux/kthread.h> 577c3ab738SAndrew Morton #include <linux/task_io_accounting_ops.h> 58ab2af1f5SDipankar Sarma #include <linux/rcupdate.h> 591da177e4SLinus Torvalds #include <linux/ptrace.h> 601da177e4SLinus Torvalds #include <linux/mount.h> 611da177e4SLinus Torvalds #include <linux/audit.h> 6278fb7466SPavel Emelianov #include <linux/memcontrol.h> 63f201ae23SFrederic Weisbecker #include <linux/ftrace.h> 645e2bf014SMike Galbraith #include <linux/proc_fs.h> 651da177e4SLinus Torvalds #include <linux/profile.h> 661da177e4SLinus Torvalds #include <linux/rmap.h> 67f8af4da3SHugh Dickins #include <linux/ksm.h> 681da177e4SLinus Torvalds #include <linux/acct.h> 69893e26e6SPavel Emelyanov #include <linux/userfaultfd_k.h> 708f0ab514SJay Lan #include <linux/tsacct_kern.h> 719f46080cSMatt Helsley #include <linux/cn_proc.h> 72ba96a0c8SRafael J. Wysocki #include <linux/freezer.h> 73ca74e92bSShailabh Nagar #include <linux/delayacct.h> 74ad4ecbcbSShailabh Nagar #include <linux/taskstats_kern.h> 750a425405SArjan van de Ven #include <linux/random.h> 76522ed776SMiloslav Trmac #include <linux/tty.h> 77fd0928dfSJens Axboe #include <linux/blkdev.h> 785ad4e53bSAl Viro #include <linux/fs_struct.h> 797c9f8861SEric Sandeen #include <linux/magic.h> 80d70f2a14SAndrew Morton #include <linux/sched/mm.h> 81cdd6c482SIngo Molnar #include <linux/perf_event.h> 8242c4ab41SStanislaw Gruszka #include <linux/posix-timers.h> 838e7cac79SAvi Kivity #include <linux/user-return-notifier.h> 843d5992d2SYing Han #include <linux/oom.h> 85ba76149fSAndrea Arcangeli #include <linux/khugepaged.h> 86d80e731eSOleg Nesterov #include <linux/signalfd.h> 870326f5a9SSrikar Dronamraju #include <linux/uprobes.h> 88a27bb332SKent Overstreet #include <linux/aio.h> 8952f5684cSGideon Israel Dsouza #include <linux/compiler.h> 9016db3d3fSHeinrich Schuchardt #include <linux/sysctl.h> 915c9a8750SDmitry Vyukov #include <linux/kcov.h> 92d83a7cb3SJosh Poimboeuf #include <linux/livepatch.h> 9348ac3c18SMark Rutland #include <linux/thread_info.h> 941da177e4SLinus Torvalds 951da177e4SLinus Torvalds #include <asm/pgtable.h> 961da177e4SLinus Torvalds #include <asm/pgalloc.h> 977c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 981da177e4SLinus Torvalds #include <asm/mmu_context.h> 991da177e4SLinus Torvalds #include <asm/cacheflush.h> 1001da177e4SLinus Torvalds #include <asm/tlbflush.h> 1011da177e4SLinus Torvalds 102ad8d75ffSSteven Rostedt #include <trace/events/sched.h> 103ad8d75ffSSteven Rostedt 10443d2b113SKAMEZAWA Hiroyuki #define CREATE_TRACE_POINTS 10543d2b113SKAMEZAWA Hiroyuki #include <trace/events/task.h> 10643d2b113SKAMEZAWA Hiroyuki 1071da177e4SLinus Torvalds /* 108ac1b398dSHeinrich Schuchardt * Minimum number of threads to boot the kernel 109ac1b398dSHeinrich Schuchardt */ 110ac1b398dSHeinrich Schuchardt #define MIN_THREADS 20 111ac1b398dSHeinrich Schuchardt 112ac1b398dSHeinrich Schuchardt /* 113ac1b398dSHeinrich Schuchardt * Maximum number of threads 114ac1b398dSHeinrich Schuchardt */ 115ac1b398dSHeinrich Schuchardt #define MAX_THREADS FUTEX_TID_MASK 116ac1b398dSHeinrich Schuchardt 117ac1b398dSHeinrich Schuchardt /* 1181da177e4SLinus Torvalds * Protected counters by write_lock_irq(&tasklist_lock) 1191da177e4SLinus Torvalds */ 1201da177e4SLinus Torvalds unsigned long total_forks; /* Handle normal Linux uptimes. */ 1211da177e4SLinus Torvalds int nr_threads; /* The idle threads do not count.. */ 1221da177e4SLinus Torvalds 1231da177e4SLinus Torvalds int max_threads; /* tunable limit on nr_threads */ 1241da177e4SLinus Torvalds 1251da177e4SLinus Torvalds DEFINE_PER_CPU(unsigned long, process_counts) = 0; 1261da177e4SLinus Torvalds 1271da177e4SLinus Torvalds __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 128db1466b3SPaul E. McKenney 129db1466b3SPaul E. McKenney #ifdef CONFIG_PROVE_RCU 130db1466b3SPaul E. McKenney int lockdep_tasklist_lock_is_held(void) 131db1466b3SPaul E. McKenney { 132db1466b3SPaul E. McKenney return lockdep_is_held(&tasklist_lock); 133db1466b3SPaul E. McKenney } 134db1466b3SPaul E. McKenney EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); 135db1466b3SPaul E. McKenney #endif /* #ifdef CONFIG_PROVE_RCU */ 1361da177e4SLinus Torvalds 1371da177e4SLinus Torvalds int nr_processes(void) 1381da177e4SLinus Torvalds { 1391da177e4SLinus Torvalds int cpu; 1401da177e4SLinus Torvalds int total = 0; 1411da177e4SLinus Torvalds 1421d510750SIan Campbell for_each_possible_cpu(cpu) 1431da177e4SLinus Torvalds total += per_cpu(process_counts, cpu); 1441da177e4SLinus Torvalds 1451da177e4SLinus Torvalds return total; 1461da177e4SLinus Torvalds } 1471da177e4SLinus Torvalds 148f19b9f74SAkinobu Mita void __weak arch_release_task_struct(struct task_struct *tsk) 149f19b9f74SAkinobu Mita { 150f19b9f74SAkinobu Mita } 151f19b9f74SAkinobu Mita 152f5e10287SThomas Gleixner #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 153e18b890bSChristoph Lameter static struct kmem_cache *task_struct_cachep; 15441101809SThomas Gleixner 15541101809SThomas Gleixner static inline struct task_struct *alloc_task_struct_node(int node) 15641101809SThomas Gleixner { 15741101809SThomas Gleixner return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); 15841101809SThomas Gleixner } 15941101809SThomas Gleixner 16041101809SThomas Gleixner static inline void free_task_struct(struct task_struct *tsk) 16141101809SThomas Gleixner { 16241101809SThomas Gleixner kmem_cache_free(task_struct_cachep, tsk); 16341101809SThomas Gleixner } 1641da177e4SLinus Torvalds #endif 1651da177e4SLinus Torvalds 166b235beeaSLinus Torvalds void __weak arch_release_thread_stack(unsigned long *stack) 167f19b9f74SAkinobu Mita { 168f19b9f74SAkinobu Mita } 169f19b9f74SAkinobu Mita 170b235beeaSLinus Torvalds #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR 17141101809SThomas Gleixner 1720d15d74aSThomas Gleixner /* 1730d15d74aSThomas Gleixner * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a 1740d15d74aSThomas Gleixner * kmemcache based allocator. 1750d15d74aSThomas Gleixner */ 176ba14a194SAndy Lutomirski # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) 177ac496bf4SAndy Lutomirski 178ac496bf4SAndy Lutomirski #ifdef CONFIG_VMAP_STACK 179ac496bf4SAndy Lutomirski /* 180ac496bf4SAndy Lutomirski * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB 181ac496bf4SAndy Lutomirski * flush. Try to minimize the number of calls by caching stacks. 182ac496bf4SAndy Lutomirski */ 183ac496bf4SAndy Lutomirski #define NR_CACHED_STACKS 2 184ac496bf4SAndy Lutomirski static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]); 18519659c59SHoeun Ryu 18619659c59SHoeun Ryu static int free_vm_stack_cache(unsigned int cpu) 18719659c59SHoeun Ryu { 18819659c59SHoeun Ryu struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu); 18919659c59SHoeun Ryu int i; 19019659c59SHoeun Ryu 19119659c59SHoeun Ryu for (i = 0; i < NR_CACHED_STACKS; i++) { 19219659c59SHoeun Ryu struct vm_struct *vm_stack = cached_vm_stacks[i]; 19319659c59SHoeun Ryu 19419659c59SHoeun Ryu if (!vm_stack) 19519659c59SHoeun Ryu continue; 19619659c59SHoeun Ryu 19719659c59SHoeun Ryu vfree(vm_stack->addr); 19819659c59SHoeun Ryu cached_vm_stacks[i] = NULL; 19919659c59SHoeun Ryu } 20019659c59SHoeun Ryu 20119659c59SHoeun Ryu return 0; 20219659c59SHoeun Ryu } 203ac496bf4SAndy Lutomirski #endif 204ac496bf4SAndy Lutomirski 205ba14a194SAndy Lutomirski static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) 206b69c49b7SFUJITA Tomonori { 207ba14a194SAndy Lutomirski #ifdef CONFIG_VMAP_STACK 208ac496bf4SAndy Lutomirski void *stack; 209ac496bf4SAndy Lutomirski int i; 210ac496bf4SAndy Lutomirski 211ac496bf4SAndy Lutomirski for (i = 0; i < NR_CACHED_STACKS; i++) { 212112166f8SChristoph Lameter struct vm_struct *s; 213112166f8SChristoph Lameter 214112166f8SChristoph Lameter s = this_cpu_xchg(cached_stacks[i], NULL); 215ac496bf4SAndy Lutomirski 216ac496bf4SAndy Lutomirski if (!s) 217ac496bf4SAndy Lutomirski continue; 218ac496bf4SAndy Lutomirski 219ca182551SKonstantin Khlebnikov /* Clear stale pointers from reused stack. */ 220ca182551SKonstantin Khlebnikov memset(s->addr, 0, THREAD_SIZE); 221e01e8063SKees Cook 222ac496bf4SAndy Lutomirski tsk->stack_vm_area = s; 223ac496bf4SAndy Lutomirski return s->addr; 224ac496bf4SAndy Lutomirski } 225ac496bf4SAndy Lutomirski 22648ac3c18SMark Rutland stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN, 227ba14a194SAndy Lutomirski VMALLOC_START, VMALLOC_END, 22819809c2dSMichal Hocko THREADINFO_GFP, 229ba14a194SAndy Lutomirski PAGE_KERNEL, 230ac496bf4SAndy Lutomirski 0, node, __builtin_return_address(0)); 231ba14a194SAndy Lutomirski 232ba14a194SAndy Lutomirski /* 233ba14a194SAndy Lutomirski * We can't call find_vm_area() in interrupt context, and 234ba14a194SAndy Lutomirski * free_thread_stack() can be called in interrupt context, 235ba14a194SAndy Lutomirski * so cache the vm_struct. 236ba14a194SAndy Lutomirski */ 237ba14a194SAndy Lutomirski if (stack) 238ba14a194SAndy Lutomirski tsk->stack_vm_area = find_vm_area(stack); 239ba14a194SAndy Lutomirski return stack; 240ba14a194SAndy Lutomirski #else 2414949148aSVladimir Davydov struct page *page = alloc_pages_node(node, THREADINFO_GFP, 2422889f608SThomas Gleixner THREAD_SIZE_ORDER); 243b6a84016SEric Dumazet 244b6a84016SEric Dumazet return page ? page_address(page) : NULL; 245ba14a194SAndy Lutomirski #endif 246b69c49b7SFUJITA Tomonori } 247b69c49b7SFUJITA Tomonori 248ba14a194SAndy Lutomirski static inline void free_thread_stack(struct task_struct *tsk) 249b69c49b7SFUJITA Tomonori { 250ac496bf4SAndy Lutomirski #ifdef CONFIG_VMAP_STACK 251ac496bf4SAndy Lutomirski if (task_stack_vm_area(tsk)) { 252ac496bf4SAndy Lutomirski int i; 253ac496bf4SAndy Lutomirski 254ac496bf4SAndy Lutomirski for (i = 0; i < NR_CACHED_STACKS; i++) { 255112166f8SChristoph Lameter if (this_cpu_cmpxchg(cached_stacks[i], 256112166f8SChristoph Lameter NULL, tsk->stack_vm_area) != NULL) 257ac496bf4SAndy Lutomirski continue; 258ac496bf4SAndy Lutomirski 259ac496bf4SAndy Lutomirski return; 260ac496bf4SAndy Lutomirski } 261ac496bf4SAndy Lutomirski 2620f110a9bSAndrey Ryabinin vfree_atomic(tsk->stack); 263ac496bf4SAndy Lutomirski return; 264ac496bf4SAndy Lutomirski } 265ac496bf4SAndy Lutomirski #endif 266ac496bf4SAndy Lutomirski 267ba14a194SAndy Lutomirski __free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER); 268b69c49b7SFUJITA Tomonori } 2690d15d74aSThomas Gleixner # else 270b235beeaSLinus Torvalds static struct kmem_cache *thread_stack_cache; 2710d15d74aSThomas Gleixner 2729521d399SMichael Ellerman static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, 2730d15d74aSThomas Gleixner int node) 2740d15d74aSThomas Gleixner { 275b235beeaSLinus Torvalds return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); 2760d15d74aSThomas Gleixner } 2770d15d74aSThomas Gleixner 278ba14a194SAndy Lutomirski static void free_thread_stack(struct task_struct *tsk) 2790d15d74aSThomas Gleixner { 280ba14a194SAndy Lutomirski kmem_cache_free(thread_stack_cache, tsk->stack); 2810d15d74aSThomas Gleixner } 2820d15d74aSThomas Gleixner 283b235beeaSLinus Torvalds void thread_stack_cache_init(void) 2840d15d74aSThomas Gleixner { 285f9d29946SDavid Windsor thread_stack_cache = kmem_cache_create_usercopy("thread_stack", 286f9d29946SDavid Windsor THREAD_SIZE, THREAD_SIZE, 0, 0, 287f9d29946SDavid Windsor THREAD_SIZE, NULL); 288b235beeaSLinus Torvalds BUG_ON(thread_stack_cache == NULL); 2890d15d74aSThomas Gleixner } 2900d15d74aSThomas Gleixner # endif 291b69c49b7SFUJITA Tomonori #endif 292b69c49b7SFUJITA Tomonori 2931da177e4SLinus Torvalds /* SLAB cache for signal_struct structures (tsk->signal) */ 294e18b890bSChristoph Lameter static struct kmem_cache *signal_cachep; 2951da177e4SLinus Torvalds 2961da177e4SLinus Torvalds /* SLAB cache for sighand_struct structures (tsk->sighand) */ 297e18b890bSChristoph Lameter struct kmem_cache *sighand_cachep; 2981da177e4SLinus Torvalds 2991da177e4SLinus Torvalds /* SLAB cache for files_struct structures (tsk->files) */ 300e18b890bSChristoph Lameter struct kmem_cache *files_cachep; 3011da177e4SLinus Torvalds 3021da177e4SLinus Torvalds /* SLAB cache for fs_struct structures (tsk->fs) */ 303e18b890bSChristoph Lameter struct kmem_cache *fs_cachep; 3041da177e4SLinus Torvalds 3051da177e4SLinus Torvalds /* SLAB cache for vm_area_struct structures */ 3063928d4f5SLinus Torvalds static struct kmem_cache *vm_area_cachep; 3071da177e4SLinus Torvalds 3081da177e4SLinus Torvalds /* SLAB cache for mm_struct structures (tsk->mm) */ 309e18b890bSChristoph Lameter static struct kmem_cache *mm_cachep; 3101da177e4SLinus Torvalds 311490fc053SLinus Torvalds struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) 3123928d4f5SLinus Torvalds { 313490fc053SLinus Torvalds struct vm_area_struct *vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 314490fc053SLinus Torvalds 315*027232daSKirill A. Shutemov if (vma) 316*027232daSKirill A. Shutemov vma_init(vma, mm); 317490fc053SLinus Torvalds return vma; 3183928d4f5SLinus Torvalds } 3193928d4f5SLinus Torvalds 3203928d4f5SLinus Torvalds struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) 3213928d4f5SLinus Torvalds { 32295faf699SLinus Torvalds struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 32395faf699SLinus Torvalds 32495faf699SLinus Torvalds if (new) { 32595faf699SLinus Torvalds *new = *orig; 32695faf699SLinus Torvalds INIT_LIST_HEAD(&new->anon_vma_chain); 32795faf699SLinus Torvalds } 32895faf699SLinus Torvalds return new; 3293928d4f5SLinus Torvalds } 3303928d4f5SLinus Torvalds 3313928d4f5SLinus Torvalds void vm_area_free(struct vm_area_struct *vma) 3323928d4f5SLinus Torvalds { 3333928d4f5SLinus Torvalds kmem_cache_free(vm_area_cachep, vma); 3343928d4f5SLinus Torvalds } 3353928d4f5SLinus Torvalds 336ba14a194SAndy Lutomirski static void account_kernel_stack(struct task_struct *tsk, int account) 337c6a7f572SKOSAKI Motohiro { 338ba14a194SAndy Lutomirski void *stack = task_stack_page(tsk); 339ba14a194SAndy Lutomirski struct vm_struct *vm = task_stack_vm_area(tsk); 340ba14a194SAndy Lutomirski 341ba14a194SAndy Lutomirski BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0); 342ba14a194SAndy Lutomirski 343ba14a194SAndy Lutomirski if (vm) { 344ba14a194SAndy Lutomirski int i; 345ba14a194SAndy Lutomirski 346ba14a194SAndy Lutomirski BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE); 347ba14a194SAndy Lutomirski 348ba14a194SAndy Lutomirski for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { 349ba14a194SAndy Lutomirski mod_zone_page_state(page_zone(vm->pages[i]), 350ba14a194SAndy Lutomirski NR_KERNEL_STACK_KB, 351ba14a194SAndy Lutomirski PAGE_SIZE / 1024 * account); 352ba14a194SAndy Lutomirski } 353ba14a194SAndy Lutomirski 354ba14a194SAndy Lutomirski /* All stack pages belong to the same memcg. */ 355ed52be7bSJohannes Weiner mod_memcg_page_state(vm->pages[0], MEMCG_KERNEL_STACK_KB, 356ba14a194SAndy Lutomirski account * (THREAD_SIZE / 1024)); 357ba14a194SAndy Lutomirski } else { 358ba14a194SAndy Lutomirski /* 359ba14a194SAndy Lutomirski * All stack pages are in the same zone and belong to the 360ba14a194SAndy Lutomirski * same memcg. 361ba14a194SAndy Lutomirski */ 362efdc9490SAndy Lutomirski struct page *first_page = virt_to_page(stack); 363c6a7f572SKOSAKI Motohiro 364efdc9490SAndy Lutomirski mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB, 365d30dd8beSAndy Lutomirski THREAD_SIZE / 1024 * account); 366efdc9490SAndy Lutomirski 367ed52be7bSJohannes Weiner mod_memcg_page_state(first_page, MEMCG_KERNEL_STACK_KB, 368efdc9490SAndy Lutomirski account * (THREAD_SIZE / 1024)); 369c6a7f572SKOSAKI Motohiro } 370ba14a194SAndy Lutomirski } 371c6a7f572SKOSAKI Motohiro 37268f24b08SAndy Lutomirski static void release_task_stack(struct task_struct *tsk) 3731da177e4SLinus Torvalds { 374405c0759SAndy Lutomirski if (WARN_ON(tsk->state != TASK_DEAD)) 375405c0759SAndy Lutomirski return; /* Better to leak the stack than to free prematurely */ 376405c0759SAndy Lutomirski 377ba14a194SAndy Lutomirski account_kernel_stack(tsk, -1); 378b235beeaSLinus Torvalds arch_release_thread_stack(tsk->stack); 379ba14a194SAndy Lutomirski free_thread_stack(tsk); 38068f24b08SAndy Lutomirski tsk->stack = NULL; 38168f24b08SAndy Lutomirski #ifdef CONFIG_VMAP_STACK 38268f24b08SAndy Lutomirski tsk->stack_vm_area = NULL; 38368f24b08SAndy Lutomirski #endif 38468f24b08SAndy Lutomirski } 38568f24b08SAndy Lutomirski 38668f24b08SAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 38768f24b08SAndy Lutomirski void put_task_stack(struct task_struct *tsk) 38868f24b08SAndy Lutomirski { 38968f24b08SAndy Lutomirski if (atomic_dec_and_test(&tsk->stack_refcount)) 39068f24b08SAndy Lutomirski release_task_stack(tsk); 39168f24b08SAndy Lutomirski } 39268f24b08SAndy Lutomirski #endif 39368f24b08SAndy Lutomirski 39468f24b08SAndy Lutomirski void free_task(struct task_struct *tsk) 39568f24b08SAndy Lutomirski { 39668f24b08SAndy Lutomirski #ifndef CONFIG_THREAD_INFO_IN_TASK 39768f24b08SAndy Lutomirski /* 39868f24b08SAndy Lutomirski * The task is finally done with both the stack and thread_info, 39968f24b08SAndy Lutomirski * so free both. 40068f24b08SAndy Lutomirski */ 40168f24b08SAndy Lutomirski release_task_stack(tsk); 40268f24b08SAndy Lutomirski #else 40368f24b08SAndy Lutomirski /* 40468f24b08SAndy Lutomirski * If the task had a separate stack allocation, it should be gone 40568f24b08SAndy Lutomirski * by now. 40668f24b08SAndy Lutomirski */ 40768f24b08SAndy Lutomirski WARN_ON_ONCE(atomic_read(&tsk->stack_refcount) != 0); 40868f24b08SAndy Lutomirski #endif 40923f78d4aSIngo Molnar rt_mutex_debug_task_free(tsk); 410fb52607aSFrederic Weisbecker ftrace_graph_exit_task(tsk); 411e2cfabdfSWill Drewry put_seccomp_filter(tsk); 412f19b9f74SAkinobu Mita arch_release_task_struct(tsk); 4131da5c46fSOleg Nesterov if (tsk->flags & PF_KTHREAD) 4141da5c46fSOleg Nesterov free_kthread_struct(tsk); 4151da177e4SLinus Torvalds free_task_struct(tsk); 4161da177e4SLinus Torvalds } 4171da177e4SLinus Torvalds EXPORT_SYMBOL(free_task); 4181da177e4SLinus Torvalds 4191da177e4SLinus Torvalds #ifdef CONFIG_MMU 4200766f788SEmese Revfy static __latent_entropy int dup_mmap(struct mm_struct *mm, 4210766f788SEmese Revfy struct mm_struct *oldmm) 4221da177e4SLinus Torvalds { 423297c5eeeSLinus Torvalds struct vm_area_struct *mpnt, *tmp, *prev, **pprev; 4241da177e4SLinus Torvalds struct rb_node **rb_link, *rb_parent; 4251da177e4SLinus Torvalds int retval; 4261da177e4SLinus Torvalds unsigned long charge; 427893e26e6SPavel Emelyanov LIST_HEAD(uf); 4281da177e4SLinus Torvalds 42932cdba1eSOleg Nesterov uprobe_start_dup_mmap(); 4307c051267SMichal Hocko if (down_write_killable(&oldmm->mmap_sem)) { 4317c051267SMichal Hocko retval = -EINTR; 4327c051267SMichal Hocko goto fail_uprobe_end; 4337c051267SMichal Hocko } 434ec8c0446SRalf Baechle flush_cache_dup_mm(oldmm); 435f8ac4ec9SOleg Nesterov uprobe_dup_mmap(oldmm, mm); 436ad339451SIngo Molnar /* 437ad339451SIngo Molnar * Not linked in yet - no deadlock potential: 438ad339451SIngo Molnar */ 439ad339451SIngo Molnar down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); 4407ee78232SHugh Dickins 44190f31d0eSKonstantin Khlebnikov /* No ordering required: file already has been exposed. */ 44290f31d0eSKonstantin Khlebnikov RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); 44390f31d0eSKonstantin Khlebnikov 4444f7d4614SVladimir Davydov mm->total_vm = oldmm->total_vm; 44584638335SKonstantin Khlebnikov mm->data_vm = oldmm->data_vm; 4464f7d4614SVladimir Davydov mm->exec_vm = oldmm->exec_vm; 4474f7d4614SVladimir Davydov mm->stack_vm = oldmm->stack_vm; 4484f7d4614SVladimir Davydov 4491da177e4SLinus Torvalds rb_link = &mm->mm_rb.rb_node; 4501da177e4SLinus Torvalds rb_parent = NULL; 4511da177e4SLinus Torvalds pprev = &mm->mmap; 452f8af4da3SHugh Dickins retval = ksm_fork(mm, oldmm); 453f8af4da3SHugh Dickins if (retval) 454f8af4da3SHugh Dickins goto out; 455ba76149fSAndrea Arcangeli retval = khugepaged_fork(mm, oldmm); 456ba76149fSAndrea Arcangeli if (retval) 457ba76149fSAndrea Arcangeli goto out; 4581da177e4SLinus Torvalds 459297c5eeeSLinus Torvalds prev = NULL; 460fd3e42fcSHugh Dickins for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { 4611da177e4SLinus Torvalds struct file *file; 4621da177e4SLinus Torvalds 4631da177e4SLinus Torvalds if (mpnt->vm_flags & VM_DONTCOPY) { 46484638335SKonstantin Khlebnikov vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt)); 4651da177e4SLinus Torvalds continue; 4661da177e4SLinus Torvalds } 4671da177e4SLinus Torvalds charge = 0; 468655c79bbSTetsuo Handa /* 469655c79bbSTetsuo Handa * Don't duplicate many vmas if we've been oom-killed (for 470655c79bbSTetsuo Handa * example) 471655c79bbSTetsuo Handa */ 472655c79bbSTetsuo Handa if (fatal_signal_pending(current)) { 473655c79bbSTetsuo Handa retval = -EINTR; 474655c79bbSTetsuo Handa goto out; 475655c79bbSTetsuo Handa } 4761da177e4SLinus Torvalds if (mpnt->vm_flags & VM_ACCOUNT) { 477b2412b7fSHuang Shijie unsigned long len = vma_pages(mpnt); 478b2412b7fSHuang Shijie 479191c5424SAl Viro if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ 4801da177e4SLinus Torvalds goto fail_nomem; 4811da177e4SLinus Torvalds charge = len; 4821da177e4SLinus Torvalds } 4833928d4f5SLinus Torvalds tmp = vm_area_dup(mpnt); 4841da177e4SLinus Torvalds if (!tmp) 4851da177e4SLinus Torvalds goto fail_nomem; 486ef0855d3SOleg Nesterov retval = vma_dup_policy(mpnt, tmp); 487ef0855d3SOleg Nesterov if (retval) 4881da177e4SLinus Torvalds goto fail_nomem_policy; 489a247c3a9SAndrea Arcangeli tmp->vm_mm = mm; 490893e26e6SPavel Emelyanov retval = dup_userfaultfd(tmp, &uf); 491893e26e6SPavel Emelyanov if (retval) 492893e26e6SPavel Emelyanov goto fail_nomem_anon_vma_fork; 493d2cd9edeSRik van Riel if (tmp->vm_flags & VM_WIPEONFORK) { 494d2cd9edeSRik van Riel /* VM_WIPEONFORK gets a clean slate in the child. */ 495d2cd9edeSRik van Riel tmp->anon_vma = NULL; 496d2cd9edeSRik van Riel if (anon_vma_prepare(tmp)) 497d2cd9edeSRik van Riel goto fail_nomem_anon_vma_fork; 498d2cd9edeSRik van Riel } else if (anon_vma_fork(tmp, mpnt)) 4995beb4930SRik van Riel goto fail_nomem_anon_vma_fork; 500893e26e6SPavel Emelyanov tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT); 501297c5eeeSLinus Torvalds tmp->vm_next = tmp->vm_prev = NULL; 5021da177e4SLinus Torvalds file = tmp->vm_file; 5031da177e4SLinus Torvalds if (file) { 504496ad9aaSAl Viro struct inode *inode = file_inode(file); 505b88ed205SHugh Dickins struct address_space *mapping = file->f_mapping; 506b88ed205SHugh Dickins 5071da177e4SLinus Torvalds get_file(file); 5081da177e4SLinus Torvalds if (tmp->vm_flags & VM_DENYWRITE) 5091da177e4SLinus Torvalds atomic_dec(&inode->i_writecount); 51083cde9e8SDavidlohr Bueso i_mmap_lock_write(mapping); 511b88ed205SHugh Dickins if (tmp->vm_flags & VM_SHARED) 5124bb5f5d9SDavid Herrmann atomic_inc(&mapping->i_mmap_writable); 513b88ed205SHugh Dickins flush_dcache_mmap_lock(mapping); 514b88ed205SHugh Dickins /* insert tmp into the share list, just after mpnt */ 5159826a516SMichel Lespinasse vma_interval_tree_insert_after(tmp, mpnt, 5169826a516SMichel Lespinasse &mapping->i_mmap); 517b88ed205SHugh Dickins flush_dcache_mmap_unlock(mapping); 51883cde9e8SDavidlohr Bueso i_mmap_unlock_write(mapping); 5191da177e4SLinus Torvalds } 5201da177e4SLinus Torvalds 5211da177e4SLinus Torvalds /* 522a1e78772SMel Gorman * Clear hugetlb-related page reserves for children. This only 523a1e78772SMel Gorman * affects MAP_PRIVATE mappings. Faults generated by the child 524a1e78772SMel Gorman * are not guaranteed to succeed, even if read-only 525a1e78772SMel Gorman */ 526a1e78772SMel Gorman if (is_vm_hugetlb_page(tmp)) 527a1e78772SMel Gorman reset_vma_resv_huge_pages(tmp); 528a1e78772SMel Gorman 529a1e78772SMel Gorman /* 5307ee78232SHugh Dickins * Link in the new vma and copy the page table entries. 5311da177e4SLinus Torvalds */ 5321da177e4SLinus Torvalds *pprev = tmp; 5331da177e4SLinus Torvalds pprev = &tmp->vm_next; 534297c5eeeSLinus Torvalds tmp->vm_prev = prev; 535297c5eeeSLinus Torvalds prev = tmp; 5361da177e4SLinus Torvalds 5371da177e4SLinus Torvalds __vma_link_rb(mm, tmp, rb_link, rb_parent); 5381da177e4SLinus Torvalds rb_link = &tmp->vm_rb.rb_right; 5391da177e4SLinus Torvalds rb_parent = &tmp->vm_rb; 5401da177e4SLinus Torvalds 5411da177e4SLinus Torvalds mm->map_count++; 542d2cd9edeSRik van Riel if (!(tmp->vm_flags & VM_WIPEONFORK)) 5430b0db14cSHugh Dickins retval = copy_page_range(mm, oldmm, mpnt); 5441da177e4SLinus Torvalds 5451da177e4SLinus Torvalds if (tmp->vm_ops && tmp->vm_ops->open) 5461da177e4SLinus Torvalds tmp->vm_ops->open(tmp); 5471da177e4SLinus Torvalds 5481da177e4SLinus Torvalds if (retval) 5491da177e4SLinus Torvalds goto out; 5501da177e4SLinus Torvalds } 551d6dd61c8SJeremy Fitzhardinge /* a new mm has just been created */ 552d70f2a14SAndrew Morton arch_dup_mmap(oldmm, mm); 553d70f2a14SAndrew Morton retval = 0; 5541da177e4SLinus Torvalds out: 5557ee78232SHugh Dickins up_write(&mm->mmap_sem); 556fd3e42fcSHugh Dickins flush_tlb_mm(oldmm); 5571da177e4SLinus Torvalds up_write(&oldmm->mmap_sem); 558893e26e6SPavel Emelyanov dup_userfaultfd_complete(&uf); 5597c051267SMichal Hocko fail_uprobe_end: 56032cdba1eSOleg Nesterov uprobe_end_dup_mmap(); 5611da177e4SLinus Torvalds return retval; 5625beb4930SRik van Riel fail_nomem_anon_vma_fork: 563ef0855d3SOleg Nesterov mpol_put(vma_policy(tmp)); 5641da177e4SLinus Torvalds fail_nomem_policy: 5653928d4f5SLinus Torvalds vm_area_free(tmp); 5661da177e4SLinus Torvalds fail_nomem: 5671da177e4SLinus Torvalds retval = -ENOMEM; 5681da177e4SLinus Torvalds vm_unacct_memory(charge); 5691da177e4SLinus Torvalds goto out; 5701da177e4SLinus Torvalds } 5711da177e4SLinus Torvalds 5721da177e4SLinus Torvalds static inline int mm_alloc_pgd(struct mm_struct *mm) 5731da177e4SLinus Torvalds { 5741da177e4SLinus Torvalds mm->pgd = pgd_alloc(mm); 5751da177e4SLinus Torvalds if (unlikely(!mm->pgd)) 5761da177e4SLinus Torvalds return -ENOMEM; 5771da177e4SLinus Torvalds return 0; 5781da177e4SLinus Torvalds } 5791da177e4SLinus Torvalds 5801da177e4SLinus Torvalds static inline void mm_free_pgd(struct mm_struct *mm) 5811da177e4SLinus Torvalds { 5825e541973SBenjamin Herrenschmidt pgd_free(mm, mm->pgd); 5831da177e4SLinus Torvalds } 5841da177e4SLinus Torvalds #else 58590f31d0eSKonstantin Khlebnikov static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 58690f31d0eSKonstantin Khlebnikov { 58790f31d0eSKonstantin Khlebnikov down_write(&oldmm->mmap_sem); 58890f31d0eSKonstantin Khlebnikov RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); 58990f31d0eSKonstantin Khlebnikov up_write(&oldmm->mmap_sem); 59090f31d0eSKonstantin Khlebnikov return 0; 59190f31d0eSKonstantin Khlebnikov } 5921da177e4SLinus Torvalds #define mm_alloc_pgd(mm) (0) 5931da177e4SLinus Torvalds #define mm_free_pgd(mm) 5941da177e4SLinus Torvalds #endif /* CONFIG_MMU */ 5951da177e4SLinus Torvalds 596d70f2a14SAndrew Morton static void check_mm(struct mm_struct *mm) 597d70f2a14SAndrew Morton { 598d70f2a14SAndrew Morton int i; 599d70f2a14SAndrew Morton 600d70f2a14SAndrew Morton for (i = 0; i < NR_MM_COUNTERS; i++) { 601d70f2a14SAndrew Morton long x = atomic_long_read(&mm->rss_stat.count[i]); 602d70f2a14SAndrew Morton 603d70f2a14SAndrew Morton if (unlikely(x)) 604d70f2a14SAndrew Morton printk(KERN_ALERT "BUG: Bad rss-counter state " 605d70f2a14SAndrew Morton "mm:%p idx:%d val:%ld\n", mm, i, x); 606d70f2a14SAndrew Morton } 607d70f2a14SAndrew Morton 608d70f2a14SAndrew Morton if (mm_pgtables_bytes(mm)) 609d70f2a14SAndrew Morton pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n", 610d70f2a14SAndrew Morton mm_pgtables_bytes(mm)); 611d70f2a14SAndrew Morton 612d70f2a14SAndrew Morton #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 613d70f2a14SAndrew Morton VM_BUG_ON_MM(mm->pmd_huge_pte, mm); 614d70f2a14SAndrew Morton #endif 615d70f2a14SAndrew Morton } 6161da177e4SLinus Torvalds 617e94b1766SChristoph Lameter #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) 6181da177e4SLinus Torvalds #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) 6191da177e4SLinus Torvalds 620d70f2a14SAndrew Morton /* 621d70f2a14SAndrew Morton * Called when the last reference to the mm 622d70f2a14SAndrew Morton * is dropped: either by a lazy thread or by 623d70f2a14SAndrew Morton * mmput. Free the page directory and the mm. 624d70f2a14SAndrew Morton */ 625d34bc48fSAndrew Morton void __mmdrop(struct mm_struct *mm) 626d70f2a14SAndrew Morton { 627d70f2a14SAndrew Morton BUG_ON(mm == &init_mm); 6283eda69c9SMark Rutland WARN_ON_ONCE(mm == current->mm); 6293eda69c9SMark Rutland WARN_ON_ONCE(mm == current->active_mm); 630d70f2a14SAndrew Morton mm_free_pgd(mm); 631d70f2a14SAndrew Morton destroy_context(mm); 632d70f2a14SAndrew Morton hmm_mm_destroy(mm); 633d70f2a14SAndrew Morton mmu_notifier_mm_destroy(mm); 634d70f2a14SAndrew Morton check_mm(mm); 635d70f2a14SAndrew Morton put_user_ns(mm->user_ns); 636d70f2a14SAndrew Morton free_mm(mm); 637d70f2a14SAndrew Morton } 638d34bc48fSAndrew Morton EXPORT_SYMBOL_GPL(__mmdrop); 639d70f2a14SAndrew Morton 640d70f2a14SAndrew Morton static void mmdrop_async_fn(struct work_struct *work) 641d70f2a14SAndrew Morton { 642d70f2a14SAndrew Morton struct mm_struct *mm; 643d70f2a14SAndrew Morton 644d70f2a14SAndrew Morton mm = container_of(work, struct mm_struct, async_put_work); 645d70f2a14SAndrew Morton __mmdrop(mm); 646d70f2a14SAndrew Morton } 647d70f2a14SAndrew Morton 648d70f2a14SAndrew Morton static void mmdrop_async(struct mm_struct *mm) 649d70f2a14SAndrew Morton { 650d70f2a14SAndrew Morton if (unlikely(atomic_dec_and_test(&mm->mm_count))) { 651d70f2a14SAndrew Morton INIT_WORK(&mm->async_put_work, mmdrop_async_fn); 652d70f2a14SAndrew Morton schedule_work(&mm->async_put_work); 653d70f2a14SAndrew Morton } 654d70f2a14SAndrew Morton } 655d70f2a14SAndrew Morton 6561da177e4SLinus Torvalds static inline void free_signal_struct(struct signal_struct *sig) 6571da177e4SLinus Torvalds { 6581da177e4SLinus Torvalds taskstats_tgid_free(sig); 6591da177e4SLinus Torvalds sched_autogroup_exit(sig); 6601da177e4SLinus Torvalds /* 6611da177e4SLinus Torvalds * __mmdrop is not safe to call from softirq context on x86 due to 6621da177e4SLinus Torvalds * pgd_dtor so postpone it to the async context 6631da177e4SLinus Torvalds */ 6641da177e4SLinus Torvalds if (sig->oom_mm) 6651da177e4SLinus Torvalds mmdrop_async(sig->oom_mm); 6661da177e4SLinus Torvalds kmem_cache_free(signal_cachep, sig); 6671da177e4SLinus Torvalds } 6681da177e4SLinus Torvalds 6691da177e4SLinus Torvalds static inline void put_signal_struct(struct signal_struct *sig) 6701da177e4SLinus Torvalds { 6711da177e4SLinus Torvalds if (atomic_dec_and_test(&sig->sigcnt)) 6721da177e4SLinus Torvalds free_signal_struct(sig); 6731da177e4SLinus Torvalds } 6741da177e4SLinus Torvalds 6751da177e4SLinus Torvalds void __put_task_struct(struct task_struct *tsk) 6761da177e4SLinus Torvalds { 6771da177e4SLinus Torvalds WARN_ON(!tsk->exit_state); 6781da177e4SLinus Torvalds WARN_ON(atomic_read(&tsk->usage)); 6791da177e4SLinus Torvalds WARN_ON(tsk == current); 6801da177e4SLinus Torvalds 6811da177e4SLinus Torvalds cgroup_free(tsk); 6821da177e4SLinus Torvalds task_numa_free(tsk); 6831da177e4SLinus Torvalds security_task_free(tsk); 6841da177e4SLinus Torvalds exit_creds(tsk); 6851da177e4SLinus Torvalds delayacct_tsk_free(tsk); 6861da177e4SLinus Torvalds put_signal_struct(tsk->signal); 6871da177e4SLinus Torvalds 6881da177e4SLinus Torvalds if (!profile_handoff_task(tsk)) 6891da177e4SLinus Torvalds free_task(tsk); 6901da177e4SLinus Torvalds } 6911da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(__put_task_struct); 6921da177e4SLinus Torvalds 6931da177e4SLinus Torvalds void __init __weak arch_task_cache_init(void) { } 6941da177e4SLinus Torvalds 6951da177e4SLinus Torvalds /* 6961da177e4SLinus Torvalds * set_max_threads 6971da177e4SLinus Torvalds */ 6981da177e4SLinus Torvalds static void set_max_threads(unsigned int max_threads_suggested) 6991da177e4SLinus Torvalds { 7001da177e4SLinus Torvalds u64 threads; 7011da177e4SLinus Torvalds 7021da177e4SLinus Torvalds /* 7031da177e4SLinus Torvalds * The number of threads shall be limited such that the thread 7041da177e4SLinus Torvalds * structures may only consume a small part of the available memory. 7051da177e4SLinus Torvalds */ 7061da177e4SLinus Torvalds if (fls64(totalram_pages) + fls64(PAGE_SIZE) > 64) 7071da177e4SLinus Torvalds threads = MAX_THREADS; 7081da177e4SLinus Torvalds else 7091da177e4SLinus Torvalds threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE, 7101da177e4SLinus Torvalds (u64) THREAD_SIZE * 8UL); 7111da177e4SLinus Torvalds 7121da177e4SLinus Torvalds if (threads > max_threads_suggested) 7131da177e4SLinus Torvalds threads = max_threads_suggested; 7141da177e4SLinus Torvalds 7151da177e4SLinus Torvalds max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); 7161da177e4SLinus Torvalds } 7171da177e4SLinus Torvalds 7181da177e4SLinus Torvalds #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT 7191da177e4SLinus Torvalds /* Initialized by the architecture: */ 7201da177e4SLinus Torvalds int arch_task_struct_size __read_mostly; 7211da177e4SLinus Torvalds #endif 7221da177e4SLinus Torvalds 7235905429aSKees Cook static void task_struct_whitelist(unsigned long *offset, unsigned long *size) 7245905429aSKees Cook { 7255905429aSKees Cook /* Fetch thread_struct whitelist for the architecture. */ 7265905429aSKees Cook arch_thread_struct_whitelist(offset, size); 7275905429aSKees Cook 7285905429aSKees Cook /* 7295905429aSKees Cook * Handle zero-sized whitelist or empty thread_struct, otherwise 7305905429aSKees Cook * adjust offset to position of thread_struct in task_struct. 7315905429aSKees Cook */ 7325905429aSKees Cook if (unlikely(*size == 0)) 7335905429aSKees Cook *offset = 0; 7345905429aSKees Cook else 7355905429aSKees Cook *offset += offsetof(struct task_struct, thread); 7365905429aSKees Cook } 7375905429aSKees Cook 7381da177e4SLinus Torvalds void __init fork_init(void) 7391da177e4SLinus Torvalds { 7401da177e4SLinus Torvalds int i; 7411da177e4SLinus Torvalds #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 7421da177e4SLinus Torvalds #ifndef ARCH_MIN_TASKALIGN 7431da177e4SLinus Torvalds #define ARCH_MIN_TASKALIGN 0 7441da177e4SLinus Torvalds #endif 7451da177e4SLinus Torvalds int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN); 7465905429aSKees Cook unsigned long useroffset, usersize; 7471da177e4SLinus Torvalds 7481da177e4SLinus Torvalds /* create a slab on which task_structs can be allocated */ 7495905429aSKees Cook task_struct_whitelist(&useroffset, &usersize); 7505905429aSKees Cook task_struct_cachep = kmem_cache_create_usercopy("task_struct", 7511da177e4SLinus Torvalds arch_task_struct_size, align, 7525905429aSKees Cook SLAB_PANIC|SLAB_ACCOUNT, 7535905429aSKees Cook useroffset, usersize, NULL); 7541da177e4SLinus Torvalds #endif 7551da177e4SLinus Torvalds 7561da177e4SLinus Torvalds /* do the arch specific task caches init */ 7571da177e4SLinus Torvalds arch_task_cache_init(); 7581da177e4SLinus Torvalds 7591da177e4SLinus Torvalds set_max_threads(MAX_THREADS); 7601da177e4SLinus Torvalds 7611da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; 7621da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; 7631da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_SIGPENDING] = 7641da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_NPROC]; 7651da177e4SLinus Torvalds 7661da177e4SLinus Torvalds for (i = 0; i < UCOUNT_COUNTS; i++) { 7671da177e4SLinus Torvalds init_user_ns.ucount_max[i] = max_threads/2; 7681da177e4SLinus Torvalds } 7691da177e4SLinus Torvalds 7701da177e4SLinus Torvalds #ifdef CONFIG_VMAP_STACK 7711da177e4SLinus Torvalds cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache", 7721da177e4SLinus Torvalds NULL, free_vm_stack_cache); 7731da177e4SLinus Torvalds #endif 7741da177e4SLinus Torvalds 7751da177e4SLinus Torvalds lockdep_init_task(&init_task); 7761da177e4SLinus Torvalds } 7771da177e4SLinus Torvalds 7781da177e4SLinus Torvalds int __weak arch_dup_task_struct(struct task_struct *dst, 7791da177e4SLinus Torvalds struct task_struct *src) 7801da177e4SLinus Torvalds { 7811da177e4SLinus Torvalds *dst = *src; 7821da177e4SLinus Torvalds return 0; 7831da177e4SLinus Torvalds } 7841da177e4SLinus Torvalds 7851da177e4SLinus Torvalds void set_task_stack_end_magic(struct task_struct *tsk) 7861da177e4SLinus Torvalds { 7871da177e4SLinus Torvalds unsigned long *stackend; 7881da177e4SLinus Torvalds 7891da177e4SLinus Torvalds stackend = end_of_stack(tsk); 7901da177e4SLinus Torvalds *stackend = STACK_END_MAGIC; /* for overflow detection */ 7911da177e4SLinus Torvalds } 7921da177e4SLinus Torvalds 7931da177e4SLinus Torvalds static struct task_struct *dup_task_struct(struct task_struct *orig, int node) 7941da177e4SLinus Torvalds { 7951da177e4SLinus Torvalds struct task_struct *tsk; 7961da177e4SLinus Torvalds unsigned long *stack; 7971da177e4SLinus Torvalds struct vm_struct *stack_vm_area; 7981da177e4SLinus Torvalds int err; 7991da177e4SLinus Torvalds 8001da177e4SLinus Torvalds if (node == NUMA_NO_NODE) 8011da177e4SLinus Torvalds node = tsk_fork_get_node(orig); 8021da177e4SLinus Torvalds tsk = alloc_task_struct_node(node); 8031da177e4SLinus Torvalds if (!tsk) 8041da177e4SLinus Torvalds return NULL; 8051da177e4SLinus Torvalds 8061da177e4SLinus Torvalds stack = alloc_thread_stack_node(tsk, node); 8071da177e4SLinus Torvalds if (!stack) 8081da177e4SLinus Torvalds goto free_tsk; 8091da177e4SLinus Torvalds 8101da177e4SLinus Torvalds stack_vm_area = task_stack_vm_area(tsk); 8111da177e4SLinus Torvalds 8121da177e4SLinus Torvalds err = arch_dup_task_struct(tsk, orig); 8131da177e4SLinus Torvalds 8141da177e4SLinus Torvalds /* 8151da177e4SLinus Torvalds * arch_dup_task_struct() clobbers the stack-related fields. Make 8161da177e4SLinus Torvalds * sure they're properly initialized before using any stack-related 8171da177e4SLinus Torvalds * functions again. 8181da177e4SLinus Torvalds */ 8191da177e4SLinus Torvalds tsk->stack = stack; 8201da177e4SLinus Torvalds #ifdef CONFIG_VMAP_STACK 8211da177e4SLinus Torvalds tsk->stack_vm_area = stack_vm_area; 8221da177e4SLinus Torvalds #endif 8231da177e4SLinus Torvalds #ifdef CONFIG_THREAD_INFO_IN_TASK 8241da177e4SLinus Torvalds atomic_set(&tsk->stack_refcount, 1); 8251da177e4SLinus Torvalds #endif 8261da177e4SLinus Torvalds 8271da177e4SLinus Torvalds if (err) 8281da177e4SLinus Torvalds goto free_stack; 8291da177e4SLinus Torvalds 8301da177e4SLinus Torvalds #ifdef CONFIG_SECCOMP 8311da177e4SLinus Torvalds /* 8321da177e4SLinus Torvalds * We must handle setting up seccomp filters once we're under 8331da177e4SLinus Torvalds * the sighand lock in case orig has changed between now and 8341da177e4SLinus Torvalds * then. Until then, filter must be NULL to avoid messing up 8351da177e4SLinus Torvalds * the usage counts on the error path calling free_task. 8361da177e4SLinus Torvalds */ 8371da177e4SLinus Torvalds tsk->seccomp.filter = NULL; 8381da177e4SLinus Torvalds #endif 8391da177e4SLinus Torvalds 8401da177e4SLinus Torvalds setup_thread_stack(tsk, orig); 8411da177e4SLinus Torvalds clear_user_return_notifier(tsk); 8421da177e4SLinus Torvalds clear_tsk_need_resched(tsk); 8431da177e4SLinus Torvalds set_task_stack_end_magic(tsk); 8441da177e4SLinus Torvalds 845050e9baaSLinus Torvalds #ifdef CONFIG_STACKPROTECTOR 8461da177e4SLinus Torvalds tsk->stack_canary = get_random_canary(); 8471da177e4SLinus Torvalds #endif 8481da177e4SLinus Torvalds 8491da177e4SLinus Torvalds /* 8501da177e4SLinus Torvalds * One for us, one for whoever does the "release_task()" (usually 8511da177e4SLinus Torvalds * parent) 8521da177e4SLinus Torvalds */ 8531da177e4SLinus Torvalds atomic_set(&tsk->usage, 2); 8541da177e4SLinus Torvalds #ifdef CONFIG_BLK_DEV_IO_TRACE 8551da177e4SLinus Torvalds tsk->btrace_seq = 0; 8561da177e4SLinus Torvalds #endif 8571da177e4SLinus Torvalds tsk->splice_pipe = NULL; 8581da177e4SLinus Torvalds tsk->task_frag.page = NULL; 8591da177e4SLinus Torvalds tsk->wake_q.next = NULL; 8601da177e4SLinus Torvalds 8611da177e4SLinus Torvalds account_kernel_stack(tsk, 1); 8621da177e4SLinus Torvalds 8631da177e4SLinus Torvalds kcov_task_init(tsk); 8641da177e4SLinus Torvalds 8651da177e4SLinus Torvalds #ifdef CONFIG_FAULT_INJECTION 8661da177e4SLinus Torvalds tsk->fail_nth = 0; 8671da177e4SLinus Torvalds #endif 8681da177e4SLinus Torvalds 8691da177e4SLinus Torvalds return tsk; 8701da177e4SLinus Torvalds 8711da177e4SLinus Torvalds free_stack: 8721da177e4SLinus Torvalds free_thread_stack(tsk); 8731da177e4SLinus Torvalds free_tsk: 8741da177e4SLinus Torvalds free_task_struct(tsk); 8751da177e4SLinus Torvalds return NULL; 8761da177e4SLinus Torvalds } 8771da177e4SLinus Torvalds 8781da177e4SLinus Torvalds __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); 8791da177e4SLinus Torvalds 8804cb0e11bSHidehiro Kawai static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; 8814cb0e11bSHidehiro Kawai 8824cb0e11bSHidehiro Kawai static int __init coredump_filter_setup(char *s) 8834cb0e11bSHidehiro Kawai { 8844cb0e11bSHidehiro Kawai default_dump_filter = 8854cb0e11bSHidehiro Kawai (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & 8864cb0e11bSHidehiro Kawai MMF_DUMP_FILTER_MASK; 8874cb0e11bSHidehiro Kawai return 1; 8884cb0e11bSHidehiro Kawai } 8894cb0e11bSHidehiro Kawai 8904cb0e11bSHidehiro Kawai __setup("coredump_filter=", coredump_filter_setup); 8914cb0e11bSHidehiro Kawai 8921da177e4SLinus Torvalds #include <linux/init_task.h> 8931da177e4SLinus Torvalds 894858f0993SAlexey Dobriyan static void mm_init_aio(struct mm_struct *mm) 895858f0993SAlexey Dobriyan { 896858f0993SAlexey Dobriyan #ifdef CONFIG_AIO 897858f0993SAlexey Dobriyan spin_lock_init(&mm->ioctx_lock); 898db446a08SBenjamin LaHaise mm->ioctx_table = NULL; 899858f0993SAlexey Dobriyan #endif 900858f0993SAlexey Dobriyan } 901858f0993SAlexey Dobriyan 90233144e84SVladimir Davydov static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) 90333144e84SVladimir Davydov { 90433144e84SVladimir Davydov #ifdef CONFIG_MEMCG 90533144e84SVladimir Davydov mm->owner = p; 90633144e84SVladimir Davydov #endif 90733144e84SVladimir Davydov } 90833144e84SVladimir Davydov 909355627f5SEric Biggers static void mm_init_uprobes_state(struct mm_struct *mm) 910355627f5SEric Biggers { 911355627f5SEric Biggers #ifdef CONFIG_UPROBES 912355627f5SEric Biggers mm->uprobes_state.xol_area = NULL; 913355627f5SEric Biggers #endif 914355627f5SEric Biggers } 915355627f5SEric Biggers 916bfedb589SEric W. Biederman static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, 917bfedb589SEric W. Biederman struct user_namespace *user_ns) 9181da177e4SLinus Torvalds { 91941f727fdSVladimir Davydov mm->mmap = NULL; 92041f727fdSVladimir Davydov mm->mm_rb = RB_ROOT; 92141f727fdSVladimir Davydov mm->vmacache_seqnum = 0; 9221da177e4SLinus Torvalds atomic_set(&mm->mm_users, 1); 9231da177e4SLinus Torvalds atomic_set(&mm->mm_count, 1); 9241da177e4SLinus Torvalds init_rwsem(&mm->mmap_sem); 9251da177e4SLinus Torvalds INIT_LIST_HEAD(&mm->mmlist); 926999d9fc1SOleg Nesterov mm->core_state = NULL; 927af5b0f6aSKirill A. Shutemov mm_pgtables_bytes_init(mm); 92841f727fdSVladimir Davydov mm->map_count = 0; 92941f727fdSVladimir Davydov mm->locked_vm = 0; 930ce65cefaSVladimir Davydov mm->pinned_vm = 0; 931d559db08SKAMEZAWA Hiroyuki memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); 9321da177e4SLinus Torvalds spin_lock_init(&mm->page_table_lock); 93388aa7cc6SYang Shi spin_lock_init(&mm->arg_lock); 93441f727fdSVladimir Davydov mm_init_cpumask(mm); 935858f0993SAlexey Dobriyan mm_init_aio(mm); 936cf475ad2SBalbir Singh mm_init_owner(mm, p); 9372b7e8665SEric Biggers RCU_INIT_POINTER(mm->exe_file, NULL); 93841f727fdSVladimir Davydov mmu_notifier_mm_init(mm); 939133ff0eaSJérôme Glisse hmm_mm_init(mm); 94016af97dcSNadav Amit init_tlb_flush_pending(mm); 94141f727fdSVladimir Davydov #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 94241f727fdSVladimir Davydov mm->pmd_huge_pte = NULL; 94341f727fdSVladimir Davydov #endif 944355627f5SEric Biggers mm_init_uprobes_state(mm); 9451da177e4SLinus Torvalds 946a0715cc2SAlex Thorlton if (current->mm) { 947a0715cc2SAlex Thorlton mm->flags = current->mm->flags & MMF_INIT_MASK; 948a0715cc2SAlex Thorlton mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; 949a0715cc2SAlex Thorlton } else { 950a0715cc2SAlex Thorlton mm->flags = default_dump_filter; 9511da177e4SLinus Torvalds mm->def_flags = 0; 952a0715cc2SAlex Thorlton } 953a0715cc2SAlex Thorlton 95441f727fdSVladimir Davydov if (mm_alloc_pgd(mm)) 95541f727fdSVladimir Davydov goto fail_nopgd; 95678fb7466SPavel Emelianov 95741f727fdSVladimir Davydov if (init_new_context(p, mm)) 95841f727fdSVladimir Davydov goto fail_nocontext; 95941f727fdSVladimir Davydov 960bfedb589SEric W. Biederman mm->user_ns = get_user_ns(user_ns); 96141f727fdSVladimir Davydov return mm; 96241f727fdSVladimir Davydov 96341f727fdSVladimir Davydov fail_nocontext: 96441f727fdSVladimir Davydov mm_free_pgd(mm); 96541f727fdSVladimir Davydov fail_nopgd: 9661da177e4SLinus Torvalds free_mm(mm); 9671da177e4SLinus Torvalds return NULL; 9681da177e4SLinus Torvalds } 9691da177e4SLinus Torvalds 9701da177e4SLinus Torvalds /* 9711da177e4SLinus Torvalds * Allocate and initialize an mm_struct. 9721da177e4SLinus Torvalds */ 9731da177e4SLinus Torvalds struct mm_struct *mm_alloc(void) 9741da177e4SLinus Torvalds { 9751da177e4SLinus Torvalds struct mm_struct *mm; 9761da177e4SLinus Torvalds 9771da177e4SLinus Torvalds mm = allocate_mm(); 978de03c72cSKOSAKI Motohiro if (!mm) 979de03c72cSKOSAKI Motohiro return NULL; 980de03c72cSKOSAKI Motohiro 9811da177e4SLinus Torvalds memset(mm, 0, sizeof(*mm)); 982bfedb589SEric W. Biederman return mm_init(mm, current, current_user_ns()); 9831da177e4SLinus Torvalds } 9841da177e4SLinus Torvalds 985ec8d7c14SMichal Hocko static inline void __mmput(struct mm_struct *mm) 9861da177e4SLinus Torvalds { 987ec8d7c14SMichal Hocko VM_BUG_ON(atomic_read(&mm->mm_users)); 9880ae26f1bSAndrew Morton 989d4b3b638SSrikar Dronamraju uprobe_clear_state(mm); 9901da177e4SLinus Torvalds exit_aio(mm); 9911c2fb7a4SAndrea Arcangeli ksm_exit(mm); 992ba76149fSAndrea Arcangeli khugepaged_exit(mm); /* must run before exit_mmap */ 9931da177e4SLinus Torvalds exit_mmap(mm); 9946fcb52a5SAaron Lu mm_put_huge_zero_page(mm); 995925d1c40SMatt Helsley set_mm_exe_file(mm, NULL); 9961da177e4SLinus Torvalds if (!list_empty(&mm->mmlist)) { 9971da177e4SLinus Torvalds spin_lock(&mmlist_lock); 9981da177e4SLinus Torvalds list_del(&mm->mmlist); 9991da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 10001da177e4SLinus Torvalds } 1001801460d0SHiroshi Shimamoto if (mm->binfmt) 1002801460d0SHiroshi Shimamoto module_put(mm->binfmt->module); 10031da177e4SLinus Torvalds mmdrop(mm); 10041da177e4SLinus Torvalds } 1005ec8d7c14SMichal Hocko 1006ec8d7c14SMichal Hocko /* 1007ec8d7c14SMichal Hocko * Decrement the use count and release all resources for an mm. 1008ec8d7c14SMichal Hocko */ 1009ec8d7c14SMichal Hocko void mmput(struct mm_struct *mm) 1010ec8d7c14SMichal Hocko { 1011ec8d7c14SMichal Hocko might_sleep(); 1012ec8d7c14SMichal Hocko 1013ec8d7c14SMichal Hocko if (atomic_dec_and_test(&mm->mm_users)) 1014ec8d7c14SMichal Hocko __mmput(mm); 10151da177e4SLinus Torvalds } 10161da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(mmput); 10171da177e4SLinus Torvalds 1018a1b2289cSSherry Yang #ifdef CONFIG_MMU 1019a1b2289cSSherry Yang static void mmput_async_fn(struct work_struct *work) 1020a1b2289cSSherry Yang { 1021a1b2289cSSherry Yang struct mm_struct *mm = container_of(work, struct mm_struct, 1022a1b2289cSSherry Yang async_put_work); 1023a1b2289cSSherry Yang 1024a1b2289cSSherry Yang __mmput(mm); 1025a1b2289cSSherry Yang } 1026a1b2289cSSherry Yang 1027a1b2289cSSherry Yang void mmput_async(struct mm_struct *mm) 1028a1b2289cSSherry Yang { 1029a1b2289cSSherry Yang if (atomic_dec_and_test(&mm->mm_users)) { 1030a1b2289cSSherry Yang INIT_WORK(&mm->async_put_work, mmput_async_fn); 1031a1b2289cSSherry Yang schedule_work(&mm->async_put_work); 1032a1b2289cSSherry Yang } 1033a1b2289cSSherry Yang } 1034a1b2289cSSherry Yang #endif 1035a1b2289cSSherry Yang 103690f31d0eSKonstantin Khlebnikov /** 103790f31d0eSKonstantin Khlebnikov * set_mm_exe_file - change a reference to the mm's executable file 103890f31d0eSKonstantin Khlebnikov * 103990f31d0eSKonstantin Khlebnikov * This changes mm's executable file (shown as symlink /proc/[pid]/exe). 104090f31d0eSKonstantin Khlebnikov * 10416e399cd1SDavidlohr Bueso * Main users are mmput() and sys_execve(). Callers prevent concurrent 10426e399cd1SDavidlohr Bueso * invocations: in mmput() nobody alive left, in execve task is single 10436e399cd1SDavidlohr Bueso * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the 10446e399cd1SDavidlohr Bueso * mm->exe_file, but does so without using set_mm_exe_file() in order 10456e399cd1SDavidlohr Bueso * to do avoid the need for any locks. 104690f31d0eSKonstantin Khlebnikov */ 104738646013SJiri Slaby void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) 104838646013SJiri Slaby { 10496e399cd1SDavidlohr Bueso struct file *old_exe_file; 10506e399cd1SDavidlohr Bueso 10516e399cd1SDavidlohr Bueso /* 10526e399cd1SDavidlohr Bueso * It is safe to dereference the exe_file without RCU as 10536e399cd1SDavidlohr Bueso * this function is only called if nobody else can access 10546e399cd1SDavidlohr Bueso * this mm -- see comment above for justification. 10556e399cd1SDavidlohr Bueso */ 10566e399cd1SDavidlohr Bueso old_exe_file = rcu_dereference_raw(mm->exe_file); 105790f31d0eSKonstantin Khlebnikov 105838646013SJiri Slaby if (new_exe_file) 105938646013SJiri Slaby get_file(new_exe_file); 106090f31d0eSKonstantin Khlebnikov rcu_assign_pointer(mm->exe_file, new_exe_file); 106190f31d0eSKonstantin Khlebnikov if (old_exe_file) 106290f31d0eSKonstantin Khlebnikov fput(old_exe_file); 106338646013SJiri Slaby } 106438646013SJiri Slaby 106590f31d0eSKonstantin Khlebnikov /** 106690f31d0eSKonstantin Khlebnikov * get_mm_exe_file - acquire a reference to the mm's executable file 106790f31d0eSKonstantin Khlebnikov * 106890f31d0eSKonstantin Khlebnikov * Returns %NULL if mm has no associated executable file. 106990f31d0eSKonstantin Khlebnikov * User must release file via fput(). 107090f31d0eSKonstantin Khlebnikov */ 107138646013SJiri Slaby struct file *get_mm_exe_file(struct mm_struct *mm) 107238646013SJiri Slaby { 107338646013SJiri Slaby struct file *exe_file; 107438646013SJiri Slaby 107590f31d0eSKonstantin Khlebnikov rcu_read_lock(); 107690f31d0eSKonstantin Khlebnikov exe_file = rcu_dereference(mm->exe_file); 107790f31d0eSKonstantin Khlebnikov if (exe_file && !get_file_rcu(exe_file)) 107890f31d0eSKonstantin Khlebnikov exe_file = NULL; 107990f31d0eSKonstantin Khlebnikov rcu_read_unlock(); 108038646013SJiri Slaby return exe_file; 108138646013SJiri Slaby } 108211163348SDavidlohr Bueso EXPORT_SYMBOL(get_mm_exe_file); 108338646013SJiri Slaby 10841da177e4SLinus Torvalds /** 1085cd81a917SMateusz Guzik * get_task_exe_file - acquire a reference to the task's executable file 1086cd81a917SMateusz Guzik * 1087cd81a917SMateusz Guzik * Returns %NULL if task's mm (if any) has no associated executable file or 1088cd81a917SMateusz Guzik * this is a kernel thread with borrowed mm (see the comment above get_task_mm). 1089cd81a917SMateusz Guzik * User must release file via fput(). 1090cd81a917SMateusz Guzik */ 1091cd81a917SMateusz Guzik struct file *get_task_exe_file(struct task_struct *task) 1092cd81a917SMateusz Guzik { 1093cd81a917SMateusz Guzik struct file *exe_file = NULL; 1094cd81a917SMateusz Guzik struct mm_struct *mm; 1095cd81a917SMateusz Guzik 1096cd81a917SMateusz Guzik task_lock(task); 1097cd81a917SMateusz Guzik mm = task->mm; 1098cd81a917SMateusz Guzik if (mm) { 1099cd81a917SMateusz Guzik if (!(task->flags & PF_KTHREAD)) 1100cd81a917SMateusz Guzik exe_file = get_mm_exe_file(mm); 1101cd81a917SMateusz Guzik } 1102cd81a917SMateusz Guzik task_unlock(task); 1103cd81a917SMateusz Guzik return exe_file; 1104cd81a917SMateusz Guzik } 1105cd81a917SMateusz Guzik EXPORT_SYMBOL(get_task_exe_file); 1106cd81a917SMateusz Guzik 1107cd81a917SMateusz Guzik /** 11081da177e4SLinus Torvalds * get_task_mm - acquire a reference to the task's mm 11091da177e4SLinus Torvalds * 1110246bb0b1SOleg Nesterov * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning 11111da177e4SLinus Torvalds * this kernel workthread has transiently adopted a user mm with use_mm, 11121da177e4SLinus Torvalds * to do its AIO) is not set and if so returns a reference to it, after 11131da177e4SLinus Torvalds * bumping up the use count. User must release the mm via mmput() 11141da177e4SLinus Torvalds * after use. Typically used by /proc and ptrace. 11151da177e4SLinus Torvalds */ 11161da177e4SLinus Torvalds struct mm_struct *get_task_mm(struct task_struct *task) 11171da177e4SLinus Torvalds { 11181da177e4SLinus Torvalds struct mm_struct *mm; 11191da177e4SLinus Torvalds 11201da177e4SLinus Torvalds task_lock(task); 11211da177e4SLinus Torvalds mm = task->mm; 11221da177e4SLinus Torvalds if (mm) { 1123246bb0b1SOleg Nesterov if (task->flags & PF_KTHREAD) 11241da177e4SLinus Torvalds mm = NULL; 11251da177e4SLinus Torvalds else 11263fce371bSVegard Nossum mmget(mm); 11271da177e4SLinus Torvalds } 11281da177e4SLinus Torvalds task_unlock(task); 11291da177e4SLinus Torvalds return mm; 11301da177e4SLinus Torvalds } 11311da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(get_task_mm); 11321da177e4SLinus Torvalds 11338cdb878dSChristopher Yeoh struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) 11348cdb878dSChristopher Yeoh { 11358cdb878dSChristopher Yeoh struct mm_struct *mm; 11368cdb878dSChristopher Yeoh int err; 11378cdb878dSChristopher Yeoh 11388cdb878dSChristopher Yeoh err = mutex_lock_killable(&task->signal->cred_guard_mutex); 11398cdb878dSChristopher Yeoh if (err) 11408cdb878dSChristopher Yeoh return ERR_PTR(err); 11418cdb878dSChristopher Yeoh 11428cdb878dSChristopher Yeoh mm = get_task_mm(task); 11438cdb878dSChristopher Yeoh if (mm && mm != current->mm && 11448cdb878dSChristopher Yeoh !ptrace_may_access(task, mode)) { 11458cdb878dSChristopher Yeoh mmput(mm); 11468cdb878dSChristopher Yeoh mm = ERR_PTR(-EACCES); 11478cdb878dSChristopher Yeoh } 11488cdb878dSChristopher Yeoh mutex_unlock(&task->signal->cred_guard_mutex); 11498cdb878dSChristopher Yeoh 11508cdb878dSChristopher Yeoh return mm; 11518cdb878dSChristopher Yeoh } 11528cdb878dSChristopher Yeoh 115357b59c4aSOleg Nesterov static void complete_vfork_done(struct task_struct *tsk) 1154c415c3b4SOleg Nesterov { 1155d68b46feSOleg Nesterov struct completion *vfork; 1156c415c3b4SOleg Nesterov 1157d68b46feSOleg Nesterov task_lock(tsk); 1158d68b46feSOleg Nesterov vfork = tsk->vfork_done; 1159d68b46feSOleg Nesterov if (likely(vfork)) { 1160c415c3b4SOleg Nesterov tsk->vfork_done = NULL; 1161d68b46feSOleg Nesterov complete(vfork); 1162d68b46feSOleg Nesterov } 1163d68b46feSOleg Nesterov task_unlock(tsk); 1164d68b46feSOleg Nesterov } 1165d68b46feSOleg Nesterov 1166d68b46feSOleg Nesterov static int wait_for_vfork_done(struct task_struct *child, 1167d68b46feSOleg Nesterov struct completion *vfork) 1168d68b46feSOleg Nesterov { 1169d68b46feSOleg Nesterov int killed; 1170d68b46feSOleg Nesterov 1171d68b46feSOleg Nesterov freezer_do_not_count(); 1172d68b46feSOleg Nesterov killed = wait_for_completion_killable(vfork); 1173d68b46feSOleg Nesterov freezer_count(); 1174d68b46feSOleg Nesterov 1175d68b46feSOleg Nesterov if (killed) { 1176d68b46feSOleg Nesterov task_lock(child); 1177d68b46feSOleg Nesterov child->vfork_done = NULL; 1178d68b46feSOleg Nesterov task_unlock(child); 1179d68b46feSOleg Nesterov } 1180d68b46feSOleg Nesterov 1181d68b46feSOleg Nesterov put_task_struct(child); 1182d68b46feSOleg Nesterov return killed; 1183c415c3b4SOleg Nesterov } 1184c415c3b4SOleg Nesterov 11851da177e4SLinus Torvalds /* Please note the differences between mmput and mm_release. 11861da177e4SLinus Torvalds * mmput is called whenever we stop holding onto a mm_struct, 11871da177e4SLinus Torvalds * error success whatever. 11881da177e4SLinus Torvalds * 11891da177e4SLinus Torvalds * mm_release is called after a mm_struct has been removed 11901da177e4SLinus Torvalds * from the current process. 11911da177e4SLinus Torvalds * 11921da177e4SLinus Torvalds * This difference is important for error handling, when we 11931da177e4SLinus Torvalds * only half set up a mm_struct for a new process and need to restore 11941da177e4SLinus Torvalds * the old one. Because we mmput the new mm_struct before 11951da177e4SLinus Torvalds * restoring the old one. . . 11961da177e4SLinus Torvalds * Eric Biederman 10 January 1998 11971da177e4SLinus Torvalds */ 11981da177e4SLinus Torvalds void mm_release(struct task_struct *tsk, struct mm_struct *mm) 11991da177e4SLinus Torvalds { 12008141c7f3SLinus Torvalds /* Get rid of any futexes when releasing the mm */ 12018141c7f3SLinus Torvalds #ifdef CONFIG_FUTEX 1202fc6b177dSPeter Zijlstra if (unlikely(tsk->robust_list)) { 12038141c7f3SLinus Torvalds exit_robust_list(tsk); 1204fc6b177dSPeter Zijlstra tsk->robust_list = NULL; 1205fc6b177dSPeter Zijlstra } 12068141c7f3SLinus Torvalds #ifdef CONFIG_COMPAT 1207fc6b177dSPeter Zijlstra if (unlikely(tsk->compat_robust_list)) { 12088141c7f3SLinus Torvalds compat_exit_robust_list(tsk); 1209fc6b177dSPeter Zijlstra tsk->compat_robust_list = NULL; 1210fc6b177dSPeter Zijlstra } 12118141c7f3SLinus Torvalds #endif 1212322a2c10SThomas Gleixner if (unlikely(!list_empty(&tsk->pi_state_list))) 1213322a2c10SThomas Gleixner exit_pi_state_list(tsk); 12148141c7f3SLinus Torvalds #endif 12158141c7f3SLinus Torvalds 12160326f5a9SSrikar Dronamraju uprobe_free_utask(tsk); 12170326f5a9SSrikar Dronamraju 12181da177e4SLinus Torvalds /* Get rid of any cached register state */ 12191da177e4SLinus Torvalds deactivate_mm(tsk, mm); 12201da177e4SLinus Torvalds 1221fec1d011SRoland McGrath /* 1222735f2770SMichal Hocko * Signal userspace if we're not exiting with a core dump 1223735f2770SMichal Hocko * because we want to leave the value intact for debugging 1224735f2770SMichal Hocko * purposes. 1225fec1d011SRoland McGrath */ 12269c8a8228SEric Dumazet if (tsk->clear_child_tid) { 1227735f2770SMichal Hocko if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) && 12289c8a8228SEric Dumazet atomic_read(&mm->mm_users) > 1) { 12291da177e4SLinus Torvalds /* 12301da177e4SLinus Torvalds * We don't check the error code - if userspace has 12311da177e4SLinus Torvalds * not set up a proper pointer then tough luck. 12321da177e4SLinus Torvalds */ 12339c8a8228SEric Dumazet put_user(0, tsk->clear_child_tid); 12342de0db99SDominik Brodowski do_futex(tsk->clear_child_tid, FUTEX_WAKE, 12352de0db99SDominik Brodowski 1, NULL, NULL, 0, 0); 12369c8a8228SEric Dumazet } 12379c8a8228SEric Dumazet tsk->clear_child_tid = NULL; 12381da177e4SLinus Torvalds } 1239f7505d64SKonstantin Khlebnikov 1240f7505d64SKonstantin Khlebnikov /* 1241f7505d64SKonstantin Khlebnikov * All done, finally we can wake up parent and return this mm to him. 1242f7505d64SKonstantin Khlebnikov * Also kthread_stop() uses this completion for synchronization. 1243f7505d64SKonstantin Khlebnikov */ 1244f7505d64SKonstantin Khlebnikov if (tsk->vfork_done) 1245f7505d64SKonstantin Khlebnikov complete_vfork_done(tsk); 12461da177e4SLinus Torvalds } 12471da177e4SLinus Torvalds 1248a0a7ec30SJANAK DESAI /* 1249a0a7ec30SJANAK DESAI * Allocate a new mm structure and copy contents from the 1250a0a7ec30SJANAK DESAI * mm structure of the passed in task structure. 1251a0a7ec30SJANAK DESAI */ 1252ff252c1fSDaeSeok Youn static struct mm_struct *dup_mm(struct task_struct *tsk) 1253a0a7ec30SJANAK DESAI { 1254a0a7ec30SJANAK DESAI struct mm_struct *mm, *oldmm = current->mm; 1255a0a7ec30SJANAK DESAI int err; 1256a0a7ec30SJANAK DESAI 1257a0a7ec30SJANAK DESAI mm = allocate_mm(); 1258a0a7ec30SJANAK DESAI if (!mm) 1259a0a7ec30SJANAK DESAI goto fail_nomem; 1260a0a7ec30SJANAK DESAI 1261a0a7ec30SJANAK DESAI memcpy(mm, oldmm, sizeof(*mm)); 1262a0a7ec30SJANAK DESAI 1263bfedb589SEric W. Biederman if (!mm_init(mm, tsk, mm->user_ns)) 1264a0a7ec30SJANAK DESAI goto fail_nomem; 1265a0a7ec30SJANAK DESAI 1266a0a7ec30SJANAK DESAI err = dup_mmap(mm, oldmm); 1267a0a7ec30SJANAK DESAI if (err) 1268a0a7ec30SJANAK DESAI goto free_pt; 1269a0a7ec30SJANAK DESAI 1270a0a7ec30SJANAK DESAI mm->hiwater_rss = get_mm_rss(mm); 1271a0a7ec30SJANAK DESAI mm->hiwater_vm = mm->total_vm; 1272a0a7ec30SJANAK DESAI 1273801460d0SHiroshi Shimamoto if (mm->binfmt && !try_module_get(mm->binfmt->module)) 1274801460d0SHiroshi Shimamoto goto free_pt; 1275801460d0SHiroshi Shimamoto 1276a0a7ec30SJANAK DESAI return mm; 1277a0a7ec30SJANAK DESAI 1278a0a7ec30SJANAK DESAI free_pt: 1279801460d0SHiroshi Shimamoto /* don't put binfmt in mmput, we haven't got module yet */ 1280801460d0SHiroshi Shimamoto mm->binfmt = NULL; 1281a0a7ec30SJANAK DESAI mmput(mm); 1282a0a7ec30SJANAK DESAI 1283a0a7ec30SJANAK DESAI fail_nomem: 1284a0a7ec30SJANAK DESAI return NULL; 1285a0a7ec30SJANAK DESAI } 1286a0a7ec30SJANAK DESAI 12871da177e4SLinus Torvalds static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) 12881da177e4SLinus Torvalds { 12891da177e4SLinus Torvalds struct mm_struct *mm, *oldmm; 12901da177e4SLinus Torvalds int retval; 12911da177e4SLinus Torvalds 12921da177e4SLinus Torvalds tsk->min_flt = tsk->maj_flt = 0; 12931da177e4SLinus Torvalds tsk->nvcsw = tsk->nivcsw = 0; 129417406b82SMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK 129517406b82SMandeep Singh Baines tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; 129617406b82SMandeep Singh Baines #endif 12971da177e4SLinus Torvalds 12981da177e4SLinus Torvalds tsk->mm = NULL; 12991da177e4SLinus Torvalds tsk->active_mm = NULL; 13001da177e4SLinus Torvalds 13011da177e4SLinus Torvalds /* 13021da177e4SLinus Torvalds * Are we cloning a kernel thread? 13031da177e4SLinus Torvalds * 13041da177e4SLinus Torvalds * We need to steal a active VM for that.. 13051da177e4SLinus Torvalds */ 13061da177e4SLinus Torvalds oldmm = current->mm; 13071da177e4SLinus Torvalds if (!oldmm) 13081da177e4SLinus Torvalds return 0; 13091da177e4SLinus Torvalds 1310615d6e87SDavidlohr Bueso /* initialize the new vmacache entries */ 1311615d6e87SDavidlohr Bueso vmacache_flush(tsk); 1312615d6e87SDavidlohr Bueso 13131da177e4SLinus Torvalds if (clone_flags & CLONE_VM) { 13143fce371bSVegard Nossum mmget(oldmm); 13151da177e4SLinus Torvalds mm = oldmm; 13161da177e4SLinus Torvalds goto good_mm; 13171da177e4SLinus Torvalds } 13181da177e4SLinus Torvalds 13191da177e4SLinus Torvalds retval = -ENOMEM; 1320a0a7ec30SJANAK DESAI mm = dup_mm(tsk); 13211da177e4SLinus Torvalds if (!mm) 13221da177e4SLinus Torvalds goto fail_nomem; 13231da177e4SLinus Torvalds 13241da177e4SLinus Torvalds good_mm: 13251da177e4SLinus Torvalds tsk->mm = mm; 13261da177e4SLinus Torvalds tsk->active_mm = mm; 13271da177e4SLinus Torvalds return 0; 13281da177e4SLinus Torvalds 13291da177e4SLinus Torvalds fail_nomem: 13301da177e4SLinus Torvalds return retval; 13311da177e4SLinus Torvalds } 13321da177e4SLinus Torvalds 1333a39bc516SAlexey Dobriyan static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) 13341da177e4SLinus Torvalds { 1335498052bbSAl Viro struct fs_struct *fs = current->fs; 13361da177e4SLinus Torvalds if (clone_flags & CLONE_FS) { 1337498052bbSAl Viro /* tsk->fs is already what we want */ 13382a4419b5SNick Piggin spin_lock(&fs->lock); 1339498052bbSAl Viro if (fs->in_exec) { 13402a4419b5SNick Piggin spin_unlock(&fs->lock); 1341498052bbSAl Viro return -EAGAIN; 1342498052bbSAl Viro } 1343498052bbSAl Viro fs->users++; 13442a4419b5SNick Piggin spin_unlock(&fs->lock); 13451da177e4SLinus Torvalds return 0; 13461da177e4SLinus Torvalds } 1347498052bbSAl Viro tsk->fs = copy_fs_struct(fs); 13481da177e4SLinus Torvalds if (!tsk->fs) 13491da177e4SLinus Torvalds return -ENOMEM; 13501da177e4SLinus Torvalds return 0; 13511da177e4SLinus Torvalds } 13521da177e4SLinus Torvalds 1353a016f338SJANAK DESAI static int copy_files(unsigned long clone_flags, struct task_struct *tsk) 1354a016f338SJANAK DESAI { 1355a016f338SJANAK DESAI struct files_struct *oldf, *newf; 1356a016f338SJANAK DESAI int error = 0; 1357a016f338SJANAK DESAI 1358a016f338SJANAK DESAI /* 1359a016f338SJANAK DESAI * A background process may not have any files ... 1360a016f338SJANAK DESAI */ 1361a016f338SJANAK DESAI oldf = current->files; 1362a016f338SJANAK DESAI if (!oldf) 1363a016f338SJANAK DESAI goto out; 1364a016f338SJANAK DESAI 1365a016f338SJANAK DESAI if (clone_flags & CLONE_FILES) { 1366a016f338SJANAK DESAI atomic_inc(&oldf->count); 1367a016f338SJANAK DESAI goto out; 1368a016f338SJANAK DESAI } 1369a016f338SJANAK DESAI 1370a016f338SJANAK DESAI newf = dup_fd(oldf, &error); 1371a016f338SJANAK DESAI if (!newf) 1372a016f338SJANAK DESAI goto out; 1373a016f338SJANAK DESAI 1374a016f338SJANAK DESAI tsk->files = newf; 1375a016f338SJANAK DESAI error = 0; 1376a016f338SJANAK DESAI out: 1377a016f338SJANAK DESAI return error; 1378a016f338SJANAK DESAI } 1379a016f338SJANAK DESAI 1380fadad878SJens Axboe static int copy_io(unsigned long clone_flags, struct task_struct *tsk) 1381fd0928dfSJens Axboe { 1382fd0928dfSJens Axboe #ifdef CONFIG_BLOCK 1383fd0928dfSJens Axboe struct io_context *ioc = current->io_context; 13846e736be7STejun Heo struct io_context *new_ioc; 1385fd0928dfSJens Axboe 1386fd0928dfSJens Axboe if (!ioc) 1387fd0928dfSJens Axboe return 0; 1388fadad878SJens Axboe /* 1389fadad878SJens Axboe * Share io context with parent, if CLONE_IO is set 1390fadad878SJens Axboe */ 1391fadad878SJens Axboe if (clone_flags & CLONE_IO) { 13923d48749dSTejun Heo ioc_task_link(ioc); 13933d48749dSTejun Heo tsk->io_context = ioc; 1394fadad878SJens Axboe } else if (ioprio_valid(ioc->ioprio)) { 13956e736be7STejun Heo new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE); 13966e736be7STejun Heo if (unlikely(!new_ioc)) 1397fd0928dfSJens Axboe return -ENOMEM; 1398fd0928dfSJens Axboe 13996e736be7STejun Heo new_ioc->ioprio = ioc->ioprio; 140011a3122fSTejun Heo put_io_context(new_ioc); 1401fd0928dfSJens Axboe } 1402fd0928dfSJens Axboe #endif 1403fd0928dfSJens Axboe return 0; 1404fd0928dfSJens Axboe } 1405fd0928dfSJens Axboe 1406a39bc516SAlexey Dobriyan static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) 14071da177e4SLinus Torvalds { 14081da177e4SLinus Torvalds struct sighand_struct *sig; 14091da177e4SLinus Torvalds 141060348802SZhaolei if (clone_flags & CLONE_SIGHAND) { 14111da177e4SLinus Torvalds atomic_inc(¤t->sighand->count); 14121da177e4SLinus Torvalds return 0; 14131da177e4SLinus Torvalds } 14141da177e4SLinus Torvalds sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); 1415e56d0903SIngo Molnar rcu_assign_pointer(tsk->sighand, sig); 14161da177e4SLinus Torvalds if (!sig) 14171da177e4SLinus Torvalds return -ENOMEM; 14189d7fb042SPeter Zijlstra 14191da177e4SLinus Torvalds atomic_set(&sig->count, 1); 14201da177e4SLinus Torvalds memcpy(sig->action, current->sighand->action, sizeof(sig->action)); 14211da177e4SLinus Torvalds return 0; 14221da177e4SLinus Torvalds } 14231da177e4SLinus Torvalds 1424a7e5328aSOleg Nesterov void __cleanup_sighand(struct sighand_struct *sighand) 1425c81addc9SOleg Nesterov { 1426d80e731eSOleg Nesterov if (atomic_dec_and_test(&sighand->count)) { 1427d80e731eSOleg Nesterov signalfd_cleanup(sighand); 1428392809b2SOleg Nesterov /* 14295f0d5a3aSPaul E. McKenney * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it 1430392809b2SOleg Nesterov * without an RCU grace period, see __lock_task_sighand(). 1431392809b2SOleg Nesterov */ 1432c81addc9SOleg Nesterov kmem_cache_free(sighand_cachep, sighand); 1433c81addc9SOleg Nesterov } 1434d80e731eSOleg Nesterov } 1435c81addc9SOleg Nesterov 1436b18b6a9cSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS 1437f06febc9SFrank Mayhar /* 1438f06febc9SFrank Mayhar * Initialize POSIX timer handling for a thread group. 1439f06febc9SFrank Mayhar */ 1440f06febc9SFrank Mayhar static void posix_cpu_timers_init_group(struct signal_struct *sig) 1441f06febc9SFrank Mayhar { 144278d7d407SJiri Slaby unsigned long cpu_limit; 144378d7d407SJiri Slaby 1444316c1608SJason Low cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); 144578d7d407SJiri Slaby if (cpu_limit != RLIM_INFINITY) { 1446ebd7e7fcSFrederic Weisbecker sig->cputime_expires.prof_exp = cpu_limit * NSEC_PER_SEC; 1447d5c373ebSJason Low sig->cputimer.running = true; 14486279a751SOleg Nesterov } 14496279a751SOleg Nesterov 1450f06febc9SFrank Mayhar /* The timer lists. */ 1451f06febc9SFrank Mayhar INIT_LIST_HEAD(&sig->cpu_timers[0]); 1452f06febc9SFrank Mayhar INIT_LIST_HEAD(&sig->cpu_timers[1]); 1453f06febc9SFrank Mayhar INIT_LIST_HEAD(&sig->cpu_timers[2]); 1454f06febc9SFrank Mayhar } 1455b18b6a9cSNicolas Pitre #else 1456b18b6a9cSNicolas Pitre static inline void posix_cpu_timers_init_group(struct signal_struct *sig) { } 1457b18b6a9cSNicolas Pitre #endif 1458f06febc9SFrank Mayhar 1459a39bc516SAlexey Dobriyan static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) 14601da177e4SLinus Torvalds { 14611da177e4SLinus Torvalds struct signal_struct *sig; 14621da177e4SLinus Torvalds 14634ab6c083SOleg Nesterov if (clone_flags & CLONE_THREAD) 1464490dea45SPeter Zijlstra return 0; 14656279a751SOleg Nesterov 1466a56704efSVeaceslav Falico sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL); 14671da177e4SLinus Torvalds tsk->signal = sig; 14681da177e4SLinus Torvalds if (!sig) 14691da177e4SLinus Torvalds return -ENOMEM; 14701da177e4SLinus Torvalds 1471b3ac022cSOleg Nesterov sig->nr_threads = 1; 14721da177e4SLinus Torvalds atomic_set(&sig->live, 1); 1473b3ac022cSOleg Nesterov atomic_set(&sig->sigcnt, 1); 14740c740d0aSOleg Nesterov 14750c740d0aSOleg Nesterov /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */ 14760c740d0aSOleg Nesterov sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node); 14770c740d0aSOleg Nesterov tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head); 14780c740d0aSOleg Nesterov 14791da177e4SLinus Torvalds init_waitqueue_head(&sig->wait_chldexit); 1480db51aeccSOleg Nesterov sig->curr_target = tsk; 14811da177e4SLinus Torvalds init_sigpending(&sig->shared_pending); 1482e78c3496SRik van Riel seqlock_init(&sig->stats_lock); 14839d7fb042SPeter Zijlstra prev_cputime_init(&sig->prev_cputime); 14841da177e4SLinus Torvalds 1485baa73d9eSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS 1486b18b6a9cSNicolas Pitre INIT_LIST_HEAD(&sig->posix_timers); 1487c9cb2e3dSThomas Gleixner hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 14881da177e4SLinus Torvalds sig->real_timer.function = it_real_fn; 1489baa73d9eSNicolas Pitre #endif 14901da177e4SLinus Torvalds 14911da177e4SLinus Torvalds task_lock(current->group_leader); 14921da177e4SLinus Torvalds memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 14931da177e4SLinus Torvalds task_unlock(current->group_leader); 14941da177e4SLinus Torvalds 14956279a751SOleg Nesterov posix_cpu_timers_init_group(sig); 14966279a751SOleg Nesterov 1497522ed776SMiloslav Trmac tty_audit_fork(sig); 14985091faa4SMike Galbraith sched_autogroup_fork(sig); 1499522ed776SMiloslav Trmac 1500a63d83f4SDavid Rientjes sig->oom_score_adj = current->signal->oom_score_adj; 1501dabb16f6SMandeep Singh Baines sig->oom_score_adj_min = current->signal->oom_score_adj_min; 150228b83c51SKOSAKI Motohiro 15039b1bf12dSKOSAKI Motohiro mutex_init(&sig->cred_guard_mutex); 15049b1bf12dSKOSAKI Motohiro 15051da177e4SLinus Torvalds return 0; 15061da177e4SLinus Torvalds } 15071da177e4SLinus Torvalds 1508dbd95212SKees Cook static void copy_seccomp(struct task_struct *p) 1509dbd95212SKees Cook { 1510dbd95212SKees Cook #ifdef CONFIG_SECCOMP 1511dbd95212SKees Cook /* 1512dbd95212SKees Cook * Must be called with sighand->lock held, which is common to 1513dbd95212SKees Cook * all threads in the group. Holding cred_guard_mutex is not 1514dbd95212SKees Cook * needed because this new task is not yet running and cannot 1515dbd95212SKees Cook * be racing exec. 1516dbd95212SKees Cook */ 151769f6a34bSGuenter Roeck assert_spin_locked(¤t->sighand->siglock); 1518dbd95212SKees Cook 1519dbd95212SKees Cook /* Ref-count the new filter user, and assign it. */ 1520dbd95212SKees Cook get_seccomp_filter(current); 1521dbd95212SKees Cook p->seccomp = current->seccomp; 1522dbd95212SKees Cook 1523dbd95212SKees Cook /* 1524dbd95212SKees Cook * Explicitly enable no_new_privs here in case it got set 1525dbd95212SKees Cook * between the task_struct being duplicated and holding the 1526dbd95212SKees Cook * sighand lock. The seccomp state and nnp must be in sync. 1527dbd95212SKees Cook */ 1528dbd95212SKees Cook if (task_no_new_privs(current)) 1529dbd95212SKees Cook task_set_no_new_privs(p); 1530dbd95212SKees Cook 1531dbd95212SKees Cook /* 1532dbd95212SKees Cook * If the parent gained a seccomp mode after copying thread 1533dbd95212SKees Cook * flags and between before we held the sighand lock, we have 1534dbd95212SKees Cook * to manually enable the seccomp thread flag here. 1535dbd95212SKees Cook */ 1536dbd95212SKees Cook if (p->seccomp.mode != SECCOMP_MODE_DISABLED) 1537dbd95212SKees Cook set_tsk_thread_flag(p, TIF_SECCOMP); 1538dbd95212SKees Cook #endif 1539dbd95212SKees Cook } 1540dbd95212SKees Cook 154117da2bd9SHeiko Carstens SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) 15421da177e4SLinus Torvalds { 15431da177e4SLinus Torvalds current->clear_child_tid = tidptr; 15441da177e4SLinus Torvalds 1545b488893aSPavel Emelyanov return task_pid_vnr(current); 15461da177e4SLinus Torvalds } 15471da177e4SLinus Torvalds 1548a39bc516SAlexey Dobriyan static void rt_mutex_init_task(struct task_struct *p) 154923f78d4aSIngo Molnar { 15501d615482SThomas Gleixner raw_spin_lock_init(&p->pi_lock); 1551e29e175bSZilvinas Valinskas #ifdef CONFIG_RT_MUTEXES 1552a23ba907SDavidlohr Bueso p->pi_waiters = RB_ROOT_CACHED; 1553e96a7705SXunlei Pang p->pi_top_task = NULL; 155423f78d4aSIngo Molnar p->pi_blocked_on = NULL; 155523f78d4aSIngo Molnar #endif 155623f78d4aSIngo Molnar } 155723f78d4aSIngo Molnar 1558b18b6a9cSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS 15591da177e4SLinus Torvalds /* 1560f06febc9SFrank Mayhar * Initialize POSIX timer handling for a single task. 1561f06febc9SFrank Mayhar */ 1562f06febc9SFrank Mayhar static void posix_cpu_timers_init(struct task_struct *tsk) 1563f06febc9SFrank Mayhar { 156464861634SMartin Schwidefsky tsk->cputime_expires.prof_exp = 0; 156564861634SMartin Schwidefsky tsk->cputime_expires.virt_exp = 0; 1566f06febc9SFrank Mayhar tsk->cputime_expires.sched_exp = 0; 1567f06febc9SFrank Mayhar INIT_LIST_HEAD(&tsk->cpu_timers[0]); 1568f06febc9SFrank Mayhar INIT_LIST_HEAD(&tsk->cpu_timers[1]); 1569f06febc9SFrank Mayhar INIT_LIST_HEAD(&tsk->cpu_timers[2]); 1570f06febc9SFrank Mayhar } 1571b18b6a9cSNicolas Pitre #else 1572b18b6a9cSNicolas Pitre static inline void posix_cpu_timers_init(struct task_struct *tsk) { } 1573b18b6a9cSNicolas Pitre #endif 1574f06febc9SFrank Mayhar 157581907739SOleg Nesterov static inline void 157681907739SOleg Nesterov init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) 157781907739SOleg Nesterov { 157881907739SOleg Nesterov task->pids[type].pid = pid; 157981907739SOleg Nesterov } 158081907739SOleg Nesterov 15816bfbaa51SIngo Molnar static inline void rcu_copy_process(struct task_struct *p) 15826bfbaa51SIngo Molnar { 15836bfbaa51SIngo Molnar #ifdef CONFIG_PREEMPT_RCU 15846bfbaa51SIngo Molnar p->rcu_read_lock_nesting = 0; 15856bfbaa51SIngo Molnar p->rcu_read_unlock_special.s = 0; 15866bfbaa51SIngo Molnar p->rcu_blocked_node = NULL; 15876bfbaa51SIngo Molnar INIT_LIST_HEAD(&p->rcu_node_entry); 15886bfbaa51SIngo Molnar #endif /* #ifdef CONFIG_PREEMPT_RCU */ 15896bfbaa51SIngo Molnar #ifdef CONFIG_TASKS_RCU 15906bfbaa51SIngo Molnar p->rcu_tasks_holdout = false; 15916bfbaa51SIngo Molnar INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); 15926bfbaa51SIngo Molnar p->rcu_tasks_idle_cpu = -1; 15936bfbaa51SIngo Molnar #endif /* #ifdef CONFIG_TASKS_RCU */ 15946bfbaa51SIngo Molnar } 15956bfbaa51SIngo Molnar 1596f06febc9SFrank Mayhar /* 15971da177e4SLinus Torvalds * This creates a new process as a copy of the old one, 15981da177e4SLinus Torvalds * but does not actually start it yet. 15991da177e4SLinus Torvalds * 16001da177e4SLinus Torvalds * It copies the registers, and all the appropriate 16011da177e4SLinus Torvalds * parts of the process environment (as per the clone 16021da177e4SLinus Torvalds * flags). The actual kick-off is left to the caller. 16031da177e4SLinus Torvalds */ 16040766f788SEmese Revfy static __latent_entropy struct task_struct *copy_process( 16050766f788SEmese Revfy unsigned long clone_flags, 16061da177e4SLinus Torvalds unsigned long stack_start, 16071da177e4SLinus Torvalds unsigned long stack_size, 16081da177e4SLinus Torvalds int __user *child_tidptr, 160909a05394SRoland McGrath struct pid *pid, 16103033f14aSJosh Triplett int trace, 1611725fc629SAndi Kleen unsigned long tls, 1612725fc629SAndi Kleen int node) 16131da177e4SLinus Torvalds { 16141da177e4SLinus Torvalds int retval; 1615a24efe62SMariusz Kozlowski struct task_struct *p; 16161da177e4SLinus Torvalds 1617667b6094SMarcos Paulo de Souza /* 1618667b6094SMarcos Paulo de Souza * Don't allow sharing the root directory with processes in a different 1619667b6094SMarcos Paulo de Souza * namespace 1620667b6094SMarcos Paulo de Souza */ 16211da177e4SLinus Torvalds if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) 16221da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 16231da177e4SLinus Torvalds 1624e66eded8SEric W. Biederman if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) 1625e66eded8SEric W. Biederman return ERR_PTR(-EINVAL); 1626e66eded8SEric W. Biederman 16271da177e4SLinus Torvalds /* 16281da177e4SLinus Torvalds * Thread groups must share signals as well, and detached threads 16291da177e4SLinus Torvalds * can only be started up within the thread group. 16301da177e4SLinus Torvalds */ 16311da177e4SLinus Torvalds if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) 16321da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 16331da177e4SLinus Torvalds 16341da177e4SLinus Torvalds /* 16351da177e4SLinus Torvalds * Shared signal handlers imply shared VM. By way of the above, 16361da177e4SLinus Torvalds * thread groups also imply shared VM. Blocking this case allows 16371da177e4SLinus Torvalds * for various simplifications in other code. 16381da177e4SLinus Torvalds */ 16391da177e4SLinus Torvalds if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) 16401da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 16411da177e4SLinus Torvalds 1642123be07bSSukadev Bhattiprolu /* 1643123be07bSSukadev Bhattiprolu * Siblings of global init remain as zombies on exit since they are 1644123be07bSSukadev Bhattiprolu * not reaped by their parent (swapper). To solve this and to avoid 1645123be07bSSukadev Bhattiprolu * multi-rooted process trees, prevent global and container-inits 1646123be07bSSukadev Bhattiprolu * from creating siblings. 1647123be07bSSukadev Bhattiprolu */ 1648123be07bSSukadev Bhattiprolu if ((clone_flags & CLONE_PARENT) && 1649123be07bSSukadev Bhattiprolu current->signal->flags & SIGNAL_UNKILLABLE) 1650123be07bSSukadev Bhattiprolu return ERR_PTR(-EINVAL); 1651123be07bSSukadev Bhattiprolu 16528382fcacSEric W. Biederman /* 165340a0d32dSOleg Nesterov * If the new process will be in a different pid or user namespace 1654faf00da5SEric W. Biederman * do not allow it to share a thread group with the forking task. 16558382fcacSEric W. Biederman */ 1656faf00da5SEric W. Biederman if (clone_flags & CLONE_THREAD) { 165740a0d32dSOleg Nesterov if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || 165840a0d32dSOleg Nesterov (task_active_pid_ns(current) != 1659c2b1df2eSAndy Lutomirski current->nsproxy->pid_ns_for_children)) 16608382fcacSEric W. Biederman return ERR_PTR(-EINVAL); 166140a0d32dSOleg Nesterov } 16628382fcacSEric W. Biederman 16631da177e4SLinus Torvalds retval = -ENOMEM; 1664725fc629SAndi Kleen p = dup_task_struct(current, node); 16651da177e4SLinus Torvalds if (!p) 16661da177e4SLinus Torvalds goto fork_out; 16671da177e4SLinus Torvalds 16684d6501dcSVegard Nossum /* 16694d6501dcSVegard Nossum * This _must_ happen before we call free_task(), i.e. before we jump 16704d6501dcSVegard Nossum * to any of the bad_fork_* labels. This is to avoid freeing 16714d6501dcSVegard Nossum * p->set_child_tid which is (ab)used as a kthread's data pointer for 16724d6501dcSVegard Nossum * kernel threads (PF_KTHREAD). 16734d6501dcSVegard Nossum */ 16744d6501dcSVegard Nossum p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; 16754d6501dcSVegard Nossum /* 16764d6501dcSVegard Nossum * Clear TID on mm_release()? 16774d6501dcSVegard Nossum */ 16784d6501dcSVegard Nossum p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; 16794d6501dcSVegard Nossum 1680f7e8b616SSteven Rostedt ftrace_graph_init_task(p); 1681f7e8b616SSteven Rostedt 1682bea493a0SPeter Zijlstra rt_mutex_init_task(p); 1683bea493a0SPeter Zijlstra 1684d12c1a37SIngo Molnar #ifdef CONFIG_PROVE_LOCKING 1685de30a2b3SIngo Molnar DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); 1686de30a2b3SIngo Molnar DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 1687de30a2b3SIngo Molnar #endif 16881da177e4SLinus Torvalds retval = -EAGAIN; 16893b11a1deSDavid Howells if (atomic_read(&p->real_cred->user->processes) >= 169078d7d407SJiri Slaby task_rlimit(p, RLIMIT_NPROC)) { 1691b57922b6SEric Paris if (p->real_cred->user != INIT_USER && 1692b57922b6SEric Paris !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) 16931da177e4SLinus Torvalds goto bad_fork_free; 16941da177e4SLinus Torvalds } 169572fa5997SVasiliy Kulikov current->flags &= ~PF_NPROC_EXCEEDED; 16961da177e4SLinus Torvalds 1697f1752eecSDavid Howells retval = copy_creds(p, clone_flags); 1698f1752eecSDavid Howells if (retval < 0) 1699f1752eecSDavid Howells goto bad_fork_free; 17001da177e4SLinus Torvalds 17011da177e4SLinus Torvalds /* 17021da177e4SLinus Torvalds * If multiple threads are within copy_process(), then this check 17031da177e4SLinus Torvalds * triggers too late. This doesn't hurt, the check is only there 17041da177e4SLinus Torvalds * to stop root fork bombs. 17051da177e4SLinus Torvalds */ 170604ec93feSLi Zefan retval = -EAGAIN; 17071da177e4SLinus Torvalds if (nr_threads >= max_threads) 17081da177e4SLinus Torvalds goto bad_fork_cleanup_count; 17091da177e4SLinus Torvalds 1710ca74e92bSShailabh Nagar delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ 1711c1de45caSPeter Zijlstra p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE); 1712514ddb44SDavid Rientjes p->flags |= PF_FORKNOEXEC; 17131da177e4SLinus Torvalds INIT_LIST_HEAD(&p->children); 17141da177e4SLinus Torvalds INIT_LIST_HEAD(&p->sibling); 1715f41d911fSPaul E. McKenney rcu_copy_process(p); 17161da177e4SLinus Torvalds p->vfork_done = NULL; 17171da177e4SLinus Torvalds spin_lock_init(&p->alloc_lock); 17181da177e4SLinus Torvalds 17191da177e4SLinus Torvalds init_sigpending(&p->pending); 17201da177e4SLinus Torvalds 172164861634SMartin Schwidefsky p->utime = p->stime = p->gtime = 0; 172240565b5aSStanislaw Gruszka #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 172364861634SMartin Schwidefsky p->utimescaled = p->stimescaled = 0; 172440565b5aSStanislaw Gruszka #endif 17259d7fb042SPeter Zijlstra prev_cputime_init(&p->prev_cputime); 17269d7fb042SPeter Zijlstra 17276a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1728bac5b6b6SFrederic Weisbecker seqcount_init(&p->vtime.seqcount); 1729bac5b6b6SFrederic Weisbecker p->vtime.starttime = 0; 1730bac5b6b6SFrederic Weisbecker p->vtime.state = VTIME_INACTIVE; 17316a61671bSFrederic Weisbecker #endif 17326a61671bSFrederic Weisbecker 1733a3a2e76cSKAMEZAWA Hiroyuki #if defined(SPLIT_RSS_COUNTING) 1734a3a2e76cSKAMEZAWA Hiroyuki memset(&p->rss_stat, 0, sizeof(p->rss_stat)); 1735a3a2e76cSKAMEZAWA Hiroyuki #endif 1736172ba844SBalbir Singh 17376976675dSArjan van de Ven p->default_timer_slack_ns = current->timer_slack_ns; 17386976675dSArjan van de Ven 17395995477aSAndrea Righi task_io_accounting_init(&p->ioac); 17401da177e4SLinus Torvalds acct_clear_integrals(p); 17411da177e4SLinus Torvalds 1742f06febc9SFrank Mayhar posix_cpu_timers_init(p); 17431da177e4SLinus Torvalds 1744ccbf62d8SThomas Gleixner p->start_time = ktime_get_ns(); 174557e0be04SThomas Gleixner p->real_start_time = ktime_get_boot_ns(); 17461da177e4SLinus Torvalds p->io_context = NULL; 1747c0b0ae8aSRichard Guy Briggs audit_set_context(p, NULL); 1748b4f48b63SPaul Menage cgroup_fork(p); 17491da177e4SLinus Torvalds #ifdef CONFIG_NUMA 1750846a16bfSLee Schermerhorn p->mempolicy = mpol_dup(p->mempolicy); 17511da177e4SLinus Torvalds if (IS_ERR(p->mempolicy)) { 17521da177e4SLinus Torvalds retval = PTR_ERR(p->mempolicy); 17531da177e4SLinus Torvalds p->mempolicy = NULL; 1754e8604cb4SLi Zefan goto bad_fork_cleanup_threadgroup_lock; 17551da177e4SLinus Torvalds } 17561da177e4SLinus Torvalds #endif 1757778d3b0fSMichal Hocko #ifdef CONFIG_CPUSETS 1758778d3b0fSMichal Hocko p->cpuset_mem_spread_rotor = NUMA_NO_NODE; 1759778d3b0fSMichal Hocko p->cpuset_slab_spread_rotor = NUMA_NO_NODE; 1760cc9a6c87SMel Gorman seqcount_init(&p->mems_allowed_seq); 1761778d3b0fSMichal Hocko #endif 1762de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS 1763de30a2b3SIngo Molnar p->irq_events = 0; 1764de30a2b3SIngo Molnar p->hardirqs_enabled = 0; 1765de30a2b3SIngo Molnar p->hardirq_enable_ip = 0; 1766de30a2b3SIngo Molnar p->hardirq_enable_event = 0; 1767de30a2b3SIngo Molnar p->hardirq_disable_ip = _THIS_IP_; 1768de30a2b3SIngo Molnar p->hardirq_disable_event = 0; 1769de30a2b3SIngo Molnar p->softirqs_enabled = 1; 1770de30a2b3SIngo Molnar p->softirq_enable_ip = _THIS_IP_; 1771de30a2b3SIngo Molnar p->softirq_enable_event = 0; 1772de30a2b3SIngo Molnar p->softirq_disable_ip = 0; 1773de30a2b3SIngo Molnar p->softirq_disable_event = 0; 1774de30a2b3SIngo Molnar p->hardirq_context = 0; 1775de30a2b3SIngo Molnar p->softirq_context = 0; 1776de30a2b3SIngo Molnar #endif 17778bcbde54SDavid Hildenbrand 17788bcbde54SDavid Hildenbrand p->pagefault_disabled = 0; 17798bcbde54SDavid Hildenbrand 1780fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP 1781fbb9ce95SIngo Molnar p->lockdep_depth = 0; /* no locks held yet */ 1782fbb9ce95SIngo Molnar p->curr_chain_key = 0; 1783fbb9ce95SIngo Molnar p->lockdep_recursion = 0; 1784b09be676SByungchul Park lockdep_init_task(p); 1785fbb9ce95SIngo Molnar #endif 17861da177e4SLinus Torvalds 1787408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES 1788408894eeSIngo Molnar p->blocked_on = NULL; /* not blocked yet */ 1789408894eeSIngo Molnar #endif 1790cafe5635SKent Overstreet #ifdef CONFIG_BCACHE 1791cafe5635SKent Overstreet p->sequential_io = 0; 1792cafe5635SKent Overstreet p->sequential_io_avg = 0; 1793cafe5635SKent Overstreet #endif 17940f481406SMarkus Metzger 17953c90e6e9SSrivatsa Vaddagiri /* Perform scheduler related setup. Assign this task to a CPU. */ 1796aab03e05SDario Faggioli retval = sched_fork(clone_flags, p); 1797aab03e05SDario Faggioli if (retval) 1798aab03e05SDario Faggioli goto bad_fork_cleanup_policy; 17996ab423e0SPeter Zijlstra 1800cdd6c482SIngo Molnar retval = perf_event_init_task(p); 18016ab423e0SPeter Zijlstra if (retval) 18026ab423e0SPeter Zijlstra goto bad_fork_cleanup_policy; 1803fb0a685cSDaniel Rebelo de Oliveira retval = audit_alloc(p); 1804fb0a685cSDaniel Rebelo de Oliveira if (retval) 18056c72e350SPeter Zijlstra goto bad_fork_cleanup_perf; 18061da177e4SLinus Torvalds /* copy all the process information */ 1807ab602f79SJack Miller shm_init_task(p); 1808e4e55b47STetsuo Handa retval = security_task_alloc(p, clone_flags); 1809fb0a685cSDaniel Rebelo de Oliveira if (retval) 18101da177e4SLinus Torvalds goto bad_fork_cleanup_audit; 1811e4e55b47STetsuo Handa retval = copy_semundo(clone_flags, p); 1812e4e55b47STetsuo Handa if (retval) 1813e4e55b47STetsuo Handa goto bad_fork_cleanup_security; 1814fb0a685cSDaniel Rebelo de Oliveira retval = copy_files(clone_flags, p); 1815fb0a685cSDaniel Rebelo de Oliveira if (retval) 18161da177e4SLinus Torvalds goto bad_fork_cleanup_semundo; 1817fb0a685cSDaniel Rebelo de Oliveira retval = copy_fs(clone_flags, p); 1818fb0a685cSDaniel Rebelo de Oliveira if (retval) 18191da177e4SLinus Torvalds goto bad_fork_cleanup_files; 1820fb0a685cSDaniel Rebelo de Oliveira retval = copy_sighand(clone_flags, p); 1821fb0a685cSDaniel Rebelo de Oliveira if (retval) 18221da177e4SLinus Torvalds goto bad_fork_cleanup_fs; 1823fb0a685cSDaniel Rebelo de Oliveira retval = copy_signal(clone_flags, p); 1824fb0a685cSDaniel Rebelo de Oliveira if (retval) 18251da177e4SLinus Torvalds goto bad_fork_cleanup_sighand; 1826fb0a685cSDaniel Rebelo de Oliveira retval = copy_mm(clone_flags, p); 1827fb0a685cSDaniel Rebelo de Oliveira if (retval) 18281da177e4SLinus Torvalds goto bad_fork_cleanup_signal; 1829fb0a685cSDaniel Rebelo de Oliveira retval = copy_namespaces(clone_flags, p); 1830fb0a685cSDaniel Rebelo de Oliveira if (retval) 1831d84f4f99SDavid Howells goto bad_fork_cleanup_mm; 1832fb0a685cSDaniel Rebelo de Oliveira retval = copy_io(clone_flags, p); 1833fb0a685cSDaniel Rebelo de Oliveira if (retval) 1834fd0928dfSJens Axboe goto bad_fork_cleanup_namespaces; 18353033f14aSJosh Triplett retval = copy_thread_tls(clone_flags, stack_start, stack_size, p, tls); 18361da177e4SLinus Torvalds if (retval) 1837fd0928dfSJens Axboe goto bad_fork_cleanup_io; 18381da177e4SLinus Torvalds 1839425fb2b4SPavel Emelyanov if (pid != &init_struct_pid) { 1840c2b1df2eSAndy Lutomirski pid = alloc_pid(p->nsproxy->pid_ns_for_children); 184135f71bc0SMichal Hocko if (IS_ERR(pid)) { 184235f71bc0SMichal Hocko retval = PTR_ERR(pid); 18430740aa5fSJiri Slaby goto bad_fork_cleanup_thread; 1844425fb2b4SPavel Emelyanov } 184535f71bc0SMichal Hocko } 1846425fb2b4SPavel Emelyanov 184773c10101SJens Axboe #ifdef CONFIG_BLOCK 184873c10101SJens Axboe p->plug = NULL; 184973c10101SJens Axboe #endif 185042b2dd0aSAlexey Dobriyan #ifdef CONFIG_FUTEX 18518f17d3a5SIngo Molnar p->robust_list = NULL; 18528f17d3a5SIngo Molnar #ifdef CONFIG_COMPAT 18538f17d3a5SIngo Molnar p->compat_robust_list = NULL; 18548f17d3a5SIngo Molnar #endif 1855c87e2837SIngo Molnar INIT_LIST_HEAD(&p->pi_state_list); 1856c87e2837SIngo Molnar p->pi_state_cache = NULL; 185742b2dd0aSAlexey Dobriyan #endif 18581da177e4SLinus Torvalds /* 1859f9a3879aSGOTO Masanori * sigaltstack should be cleared when sharing the same VM 1860f9a3879aSGOTO Masanori */ 1861f9a3879aSGOTO Masanori if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) 18622a742138SStas Sergeev sas_ss_reset(p); 1863f9a3879aSGOTO Masanori 1864f9a3879aSGOTO Masanori /* 18656580807dSOleg Nesterov * Syscall tracing and stepping should be turned off in the 18666580807dSOleg Nesterov * child regardless of CLONE_PTRACE. 18671da177e4SLinus Torvalds */ 18686580807dSOleg Nesterov user_disable_single_step(p); 18691da177e4SLinus Torvalds clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); 1870ed75e8d5SLaurent Vivier #ifdef TIF_SYSCALL_EMU 1871ed75e8d5SLaurent Vivier clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); 1872ed75e8d5SLaurent Vivier #endif 18739745512cSArjan van de Ven clear_all_latency_tracing(p); 18741da177e4SLinus Torvalds 18751da177e4SLinus Torvalds /* ok, now we should be set up.. */ 187618c830dfSOleg Nesterov p->pid = pid_nr(pid); 187718c830dfSOleg Nesterov if (clone_flags & CLONE_THREAD) { 18785f8aadd8SOleg Nesterov p->exit_signal = -1; 187918c830dfSOleg Nesterov p->group_leader = current->group_leader; 188018c830dfSOleg Nesterov p->tgid = current->tgid; 188118c830dfSOleg Nesterov } else { 188218c830dfSOleg Nesterov if (clone_flags & CLONE_PARENT) 18835f8aadd8SOleg Nesterov p->exit_signal = current->group_leader->exit_signal; 18845f8aadd8SOleg Nesterov else 18855f8aadd8SOleg Nesterov p->exit_signal = (clone_flags & CSIGNAL); 188618c830dfSOleg Nesterov p->group_leader = p; 188718c830dfSOleg Nesterov p->tgid = p->pid; 188818c830dfSOleg Nesterov } 18895f8aadd8SOleg Nesterov 18909d823e8fSWu Fengguang p->nr_dirtied = 0; 18919d823e8fSWu Fengguang p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); 189283712358SWu Fengguang p->dirty_paused_when = 0; 18939d823e8fSWu Fengguang 1894bb8cbbfeSOleg Nesterov p->pdeath_signal = 0; 189547e65328SOleg Nesterov INIT_LIST_HEAD(&p->thread_group); 1896158e1645SAl Viro p->task_works = NULL; 18971da177e4SLinus Torvalds 1898780de9ddSIngo Molnar cgroup_threadgroup_change_begin(current); 189918c830dfSOleg Nesterov /* 19007e47682eSAleksa Sarai * Ensure that the cgroup subsystem policies allow the new process to be 19017e47682eSAleksa Sarai * forked. It should be noted the the new process's css_set can be changed 19027e47682eSAleksa Sarai * between here and cgroup_post_fork() if an organisation operation is in 19037e47682eSAleksa Sarai * progress. 19047e47682eSAleksa Sarai */ 1905b53202e6SOleg Nesterov retval = cgroup_can_fork(p); 19067e47682eSAleksa Sarai if (retval) 19077e47682eSAleksa Sarai goto bad_fork_free_pid; 19087e47682eSAleksa Sarai 19097e47682eSAleksa Sarai /* 191018c830dfSOleg Nesterov * Make it visible to the rest of the system, but dont wake it up yet. 191118c830dfSOleg Nesterov * Need tasklist lock for parent etc handling! 191218c830dfSOleg Nesterov */ 19131da177e4SLinus Torvalds write_lock_irq(&tasklist_lock); 19141da177e4SLinus Torvalds 19151da177e4SLinus Torvalds /* CLONE_PARENT re-uses the old parent */ 19162d5516cbSOleg Nesterov if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { 19171da177e4SLinus Torvalds p->real_parent = current->real_parent; 19182d5516cbSOleg Nesterov p->parent_exec_id = current->parent_exec_id; 19192d5516cbSOleg Nesterov } else { 19201da177e4SLinus Torvalds p->real_parent = current; 19212d5516cbSOleg Nesterov p->parent_exec_id = current->self_exec_id; 19222d5516cbSOleg Nesterov } 19231da177e4SLinus Torvalds 1924d83a7cb3SJosh Poimboeuf klp_copy_process(p); 1925d83a7cb3SJosh Poimboeuf 19261da177e4SLinus Torvalds spin_lock(¤t->sighand->siglock); 19274a2c7a78SOleg Nesterov 19284a2c7a78SOleg Nesterov /* 1929dbd95212SKees Cook * Copy seccomp details explicitly here, in case they were changed 1930dbd95212SKees Cook * before holding sighand lock. 1931dbd95212SKees Cook */ 1932dbd95212SKees Cook copy_seccomp(p); 1933dbd95212SKees Cook 1934d7822b1eSMathieu Desnoyers rseq_fork(p, clone_flags); 1935d7822b1eSMathieu Desnoyers 1936dbd95212SKees Cook /* 19374a2c7a78SOleg Nesterov * Process group and session signals need to be delivered to just the 19384a2c7a78SOleg Nesterov * parent before the fork or both the parent and the child after the 19394a2c7a78SOleg Nesterov * fork. Restart if a signal comes in before we add the new process to 19404a2c7a78SOleg Nesterov * it's process group. 19414a2c7a78SOleg Nesterov * A fatal signal pending means that current will exit, so the new 19424a2c7a78SOleg Nesterov * thread can't slip out of an OOM kill (or normal SIGKILL). 19434a2c7a78SOleg Nesterov */ 19444a2c7a78SOleg Nesterov recalc_sigpending(); 19454a2c7a78SOleg Nesterov if (signal_pending(current)) { 19464a2c7a78SOleg Nesterov retval = -ERESTARTNOINTR; 19477e47682eSAleksa Sarai goto bad_fork_cancel_cgroup; 19484a2c7a78SOleg Nesterov } 1949e8cfbc24SGargi Sharma if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) { 19503fd37226SKirill Tkhai retval = -ENOMEM; 19513fd37226SKirill Tkhai goto bad_fork_cancel_cgroup; 19523fd37226SKirill Tkhai } 19534a2c7a78SOleg Nesterov 195473b9ebfeSOleg Nesterov if (likely(p->pid)) { 19554b9d33e6STejun Heo ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); 19561da177e4SLinus Torvalds 195781907739SOleg Nesterov init_task_pid(p, PIDTYPE_PID, pid); 19581da177e4SLinus Torvalds if (thread_group_leader(p)) { 195981907739SOleg Nesterov init_task_pid(p, PIDTYPE_PGID, task_pgrp(current)); 196081907739SOleg Nesterov init_task_pid(p, PIDTYPE_SID, task_session(current)); 196181907739SOleg Nesterov 19621c4042c2SEric W. Biederman if (is_child_reaper(pid)) { 196317cf22c3SEric W. Biederman ns_of_pid(pid)->child_reaper = p; 19641c4042c2SEric W. Biederman p->signal->flags |= SIGNAL_UNKILLABLE; 19651c4042c2SEric W. Biederman } 19665cd17569SEric W. Biederman 1967fea9d175SOleg Nesterov p->signal->leader_pid = pid; 19689c9f4dedSAlan Cox p->signal->tty = tty_kref_get(current->signal->tty); 1969749860ceSPavel Tikhomirov /* 1970749860ceSPavel Tikhomirov * Inherit has_child_subreaper flag under the same 1971749860ceSPavel Tikhomirov * tasklist_lock with adding child to the process tree 1972749860ceSPavel Tikhomirov * for propagate_has_child_subreaper optimization. 1973749860ceSPavel Tikhomirov */ 1974749860ceSPavel Tikhomirov p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper || 1975749860ceSPavel Tikhomirov p->real_parent->signal->is_child_subreaper; 19769cd80bbbSOleg Nesterov list_add_tail(&p->sibling, &p->real_parent->children); 19775e85d4abSEric W. Biederman list_add_tail_rcu(&p->tasks, &init_task.tasks); 197881907739SOleg Nesterov attach_pid(p, PIDTYPE_PGID); 197981907739SOleg Nesterov attach_pid(p, PIDTYPE_SID); 1980909ea964SChristoph Lameter __this_cpu_inc(process_counts); 198180628ca0SOleg Nesterov } else { 198280628ca0SOleg Nesterov current->signal->nr_threads++; 198380628ca0SOleg Nesterov atomic_inc(¤t->signal->live); 198480628ca0SOleg Nesterov atomic_inc(¤t->signal->sigcnt); 198580628ca0SOleg Nesterov list_add_tail_rcu(&p->thread_group, 198680628ca0SOleg Nesterov &p->group_leader->thread_group); 19870c740d0aSOleg Nesterov list_add_tail_rcu(&p->thread_node, 19880c740d0aSOleg Nesterov &p->signal->thread_head); 19891da177e4SLinus Torvalds } 199081907739SOleg Nesterov attach_pid(p, PIDTYPE_PID); 19911da177e4SLinus Torvalds nr_threads++; 199273b9ebfeSOleg Nesterov } 199373b9ebfeSOleg Nesterov 19941da177e4SLinus Torvalds total_forks++; 19953f17da69SOleg Nesterov spin_unlock(¤t->sighand->siglock); 19964af4206bSOleg Nesterov syscall_tracepoint_update(p); 19971da177e4SLinus Torvalds write_unlock_irq(&tasklist_lock); 19984af4206bSOleg Nesterov 1999c13cf856SAndrew Morton proc_fork_connector(p); 2000b53202e6SOleg Nesterov cgroup_post_fork(p); 2001780de9ddSIngo Molnar cgroup_threadgroup_change_end(current); 2002cdd6c482SIngo Molnar perf_event_fork(p); 200343d2b113SKAMEZAWA Hiroyuki 200443d2b113SKAMEZAWA Hiroyuki trace_task_newtask(p, clone_flags); 20053ab67966SOleg Nesterov uprobe_copy_process(p, clone_flags); 200643d2b113SKAMEZAWA Hiroyuki 20071da177e4SLinus Torvalds return p; 20081da177e4SLinus Torvalds 20097e47682eSAleksa Sarai bad_fork_cancel_cgroup: 20103fd37226SKirill Tkhai spin_unlock(¤t->sighand->siglock); 20113fd37226SKirill Tkhai write_unlock_irq(&tasklist_lock); 2012b53202e6SOleg Nesterov cgroup_cancel_fork(p); 2013425fb2b4SPavel Emelyanov bad_fork_free_pid: 2014780de9ddSIngo Molnar cgroup_threadgroup_change_end(current); 2015425fb2b4SPavel Emelyanov if (pid != &init_struct_pid) 2016425fb2b4SPavel Emelyanov free_pid(pid); 20170740aa5fSJiri Slaby bad_fork_cleanup_thread: 20180740aa5fSJiri Slaby exit_thread(p); 2019fd0928dfSJens Axboe bad_fork_cleanup_io: 2020b69f2292SLouis Rilling if (p->io_context) 2021b69f2292SLouis Rilling exit_io_context(p); 2022ab516013SSerge E. Hallyn bad_fork_cleanup_namespaces: 2023444f378bSLinus Torvalds exit_task_namespaces(p); 20241da177e4SLinus Torvalds bad_fork_cleanup_mm: 2025c9f01245SDavid Rientjes if (p->mm) 20261da177e4SLinus Torvalds mmput(p->mm); 20271da177e4SLinus Torvalds bad_fork_cleanup_signal: 20284ab6c083SOleg Nesterov if (!(clone_flags & CLONE_THREAD)) 20291c5354deSMike Galbraith free_signal_struct(p->signal); 20301da177e4SLinus Torvalds bad_fork_cleanup_sighand: 2031a7e5328aSOleg Nesterov __cleanup_sighand(p->sighand); 20321da177e4SLinus Torvalds bad_fork_cleanup_fs: 20331da177e4SLinus Torvalds exit_fs(p); /* blocking */ 20341da177e4SLinus Torvalds bad_fork_cleanup_files: 20351da177e4SLinus Torvalds exit_files(p); /* blocking */ 20361da177e4SLinus Torvalds bad_fork_cleanup_semundo: 20371da177e4SLinus Torvalds exit_sem(p); 2038e4e55b47STetsuo Handa bad_fork_cleanup_security: 2039e4e55b47STetsuo Handa security_task_free(p); 20401da177e4SLinus Torvalds bad_fork_cleanup_audit: 20411da177e4SLinus Torvalds audit_free(p); 20426c72e350SPeter Zijlstra bad_fork_cleanup_perf: 2043cdd6c482SIngo Molnar perf_event_free_task(p); 20446c72e350SPeter Zijlstra bad_fork_cleanup_policy: 2045b09be676SByungchul Park lockdep_free_task(p); 20461da177e4SLinus Torvalds #ifdef CONFIG_NUMA 2047f0be3d32SLee Schermerhorn mpol_put(p->mempolicy); 2048e8604cb4SLi Zefan bad_fork_cleanup_threadgroup_lock: 20491da177e4SLinus Torvalds #endif 205035df17c5SShailabh Nagar delayacct_tsk_free(p); 20511da177e4SLinus Torvalds bad_fork_cleanup_count: 2052d84f4f99SDavid Howells atomic_dec(&p->cred->user->processes); 2053e0e81739SDavid Howells exit_creds(p); 20541da177e4SLinus Torvalds bad_fork_free: 2055405c0759SAndy Lutomirski p->state = TASK_DEAD; 205668f24b08SAndy Lutomirski put_task_stack(p); 20571da177e4SLinus Torvalds free_task(p); 2058fe7d37d1SOleg Nesterov fork_out: 2059fe7d37d1SOleg Nesterov return ERR_PTR(retval); 20601da177e4SLinus Torvalds } 20611da177e4SLinus Torvalds 2062f106eee1SOleg Nesterov static inline void init_idle_pids(struct pid_link *links) 2063f106eee1SOleg Nesterov { 2064f106eee1SOleg Nesterov enum pid_type type; 2065f106eee1SOleg Nesterov 2066f106eee1SOleg Nesterov for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { 2067f106eee1SOleg Nesterov INIT_HLIST_NODE(&links[type].node); /* not really needed */ 2068f106eee1SOleg Nesterov links[type].pid = &init_struct_pid; 2069f106eee1SOleg Nesterov } 2070f106eee1SOleg Nesterov } 2071f106eee1SOleg Nesterov 20720db0628dSPaul Gortmaker struct task_struct *fork_idle(int cpu) 20731da177e4SLinus Torvalds { 207436c8b586SIngo Molnar struct task_struct *task; 2075725fc629SAndi Kleen task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0, 2076725fc629SAndi Kleen cpu_to_node(cpu)); 2077f106eee1SOleg Nesterov if (!IS_ERR(task)) { 2078f106eee1SOleg Nesterov init_idle_pids(task->pids); 20791da177e4SLinus Torvalds init_idle(task, cpu); 2080f106eee1SOleg Nesterov } 208173b9ebfeSOleg Nesterov 20821da177e4SLinus Torvalds return task; 20831da177e4SLinus Torvalds } 20841da177e4SLinus Torvalds 20851da177e4SLinus Torvalds /* 20861da177e4SLinus Torvalds * Ok, this is the main fork-routine. 20871da177e4SLinus Torvalds * 20881da177e4SLinus Torvalds * It copies the process, and if successful kick-starts 20891da177e4SLinus Torvalds * it and waits for it to finish using the VM if required. 20901da177e4SLinus Torvalds */ 20913033f14aSJosh Triplett long _do_fork(unsigned long clone_flags, 20921da177e4SLinus Torvalds unsigned long stack_start, 20931da177e4SLinus Torvalds unsigned long stack_size, 20941da177e4SLinus Torvalds int __user *parent_tidptr, 20953033f14aSJosh Triplett int __user *child_tidptr, 20963033f14aSJosh Triplett unsigned long tls) 20971da177e4SLinus Torvalds { 20989f5325aaSMarcos Paulo de Souza struct completion vfork; 20999f5325aaSMarcos Paulo de Souza struct pid *pid; 21001da177e4SLinus Torvalds struct task_struct *p; 21011da177e4SLinus Torvalds int trace = 0; 210292476d7fSEric W. Biederman long nr; 21031da177e4SLinus Torvalds 2104bdff746aSAndrew Morton /* 21054b9d33e6STejun Heo * Determine whether and which event to report to ptracer. When 21064b9d33e6STejun Heo * called from kernel_thread or CLONE_UNTRACED is explicitly 21074b9d33e6STejun Heo * requested, no event is reported; otherwise, report if the event 21084b9d33e6STejun Heo * for the type of forking is enabled. 210909a05394SRoland McGrath */ 2110e80d6661SAl Viro if (!(clone_flags & CLONE_UNTRACED)) { 21114b9d33e6STejun Heo if (clone_flags & CLONE_VFORK) 21124b9d33e6STejun Heo trace = PTRACE_EVENT_VFORK; 21134b9d33e6STejun Heo else if ((clone_flags & CSIGNAL) != SIGCHLD) 21144b9d33e6STejun Heo trace = PTRACE_EVENT_CLONE; 21154b9d33e6STejun Heo else 21164b9d33e6STejun Heo trace = PTRACE_EVENT_FORK; 21174b9d33e6STejun Heo 21184b9d33e6STejun Heo if (likely(!ptrace_event_enabled(current, trace))) 21194b9d33e6STejun Heo trace = 0; 21204b9d33e6STejun Heo } 21211da177e4SLinus Torvalds 212262e791c1SAl Viro p = copy_process(clone_flags, stack_start, stack_size, 2123725fc629SAndi Kleen child_tidptr, NULL, trace, tls, NUMA_NO_NODE); 212438addce8SEmese Revfy add_latent_entropy(); 21259f5325aaSMarcos Paulo de Souza 21269f5325aaSMarcos Paulo de Souza if (IS_ERR(p)) 21279f5325aaSMarcos Paulo de Souza return PTR_ERR(p); 21289f5325aaSMarcos Paulo de Souza 21291da177e4SLinus Torvalds /* 21301da177e4SLinus Torvalds * Do this prior waking up the new thread - the thread pointer 21311da177e4SLinus Torvalds * might get invalid after that point, if the thread exits quickly. 21321da177e4SLinus Torvalds */ 21330a16b607SMathieu Desnoyers trace_sched_process_fork(current, p); 21340a16b607SMathieu Desnoyers 21354e52365fSMatthew Dempsky pid = get_task_pid(p, PIDTYPE_PID); 21364e52365fSMatthew Dempsky nr = pid_vnr(pid); 213730e49c26SPavel Emelyanov 213830e49c26SPavel Emelyanov if (clone_flags & CLONE_PARENT_SETTID) 213930e49c26SPavel Emelyanov put_user(nr, parent_tidptr); 2140a6f5e063SSukadev Bhattiprolu 21411da177e4SLinus Torvalds if (clone_flags & CLONE_VFORK) { 21421da177e4SLinus Torvalds p->vfork_done = &vfork; 21431da177e4SLinus Torvalds init_completion(&vfork); 2144d68b46feSOleg Nesterov get_task_struct(p); 21451da177e4SLinus Torvalds } 21461da177e4SLinus Torvalds 21473e51e3edSSamir Bellabes wake_up_new_task(p); 21481da177e4SLinus Torvalds 21494b9d33e6STejun Heo /* forking complete and child started to run, tell ptracer */ 21504b9d33e6STejun Heo if (unlikely(trace)) 21514e52365fSMatthew Dempsky ptrace_event_pid(trace, pid); 215209a05394SRoland McGrath 21531da177e4SLinus Torvalds if (clone_flags & CLONE_VFORK) { 2154d68b46feSOleg Nesterov if (!wait_for_vfork_done(p, &vfork)) 21554e52365fSMatthew Dempsky ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); 21569f59ce5dSChuck Ebbert } 21574e52365fSMatthew Dempsky 21584e52365fSMatthew Dempsky put_pid(pid); 215992476d7fSEric W. Biederman return nr; 21601da177e4SLinus Torvalds } 21611da177e4SLinus Torvalds 21623033f14aSJosh Triplett #ifndef CONFIG_HAVE_COPY_THREAD_TLS 21633033f14aSJosh Triplett /* For compatibility with architectures that call do_fork directly rather than 21643033f14aSJosh Triplett * using the syscall entry points below. */ 21653033f14aSJosh Triplett long do_fork(unsigned long clone_flags, 21663033f14aSJosh Triplett unsigned long stack_start, 21673033f14aSJosh Triplett unsigned long stack_size, 21683033f14aSJosh Triplett int __user *parent_tidptr, 21693033f14aSJosh Triplett int __user *child_tidptr) 21703033f14aSJosh Triplett { 21713033f14aSJosh Triplett return _do_fork(clone_flags, stack_start, stack_size, 21723033f14aSJosh Triplett parent_tidptr, child_tidptr, 0); 21733033f14aSJosh Triplett } 21743033f14aSJosh Triplett #endif 21753033f14aSJosh Triplett 21762aa3a7f8SAl Viro /* 21772aa3a7f8SAl Viro * Create a kernel thread. 21782aa3a7f8SAl Viro */ 21792aa3a7f8SAl Viro pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 21802aa3a7f8SAl Viro { 21813033f14aSJosh Triplett return _do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn, 21823033f14aSJosh Triplett (unsigned long)arg, NULL, NULL, 0); 21832aa3a7f8SAl Viro } 21842aa3a7f8SAl Viro 2185d2125043SAl Viro #ifdef __ARCH_WANT_SYS_FORK 2186d2125043SAl Viro SYSCALL_DEFINE0(fork) 2187d2125043SAl Viro { 2188d2125043SAl Viro #ifdef CONFIG_MMU 21893033f14aSJosh Triplett return _do_fork(SIGCHLD, 0, 0, NULL, NULL, 0); 2190d2125043SAl Viro #else 2191d2125043SAl Viro /* can not support in nommu mode */ 21925d59e182SDaeseok Youn return -EINVAL; 2193d2125043SAl Viro #endif 2194d2125043SAl Viro } 2195d2125043SAl Viro #endif 2196d2125043SAl Viro 2197d2125043SAl Viro #ifdef __ARCH_WANT_SYS_VFORK 2198d2125043SAl Viro SYSCALL_DEFINE0(vfork) 2199d2125043SAl Viro { 22003033f14aSJosh Triplett return _do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0, 22013033f14aSJosh Triplett 0, NULL, NULL, 0); 2202d2125043SAl Viro } 2203d2125043SAl Viro #endif 2204d2125043SAl Viro 2205d2125043SAl Viro #ifdef __ARCH_WANT_SYS_CLONE 2206d2125043SAl Viro #ifdef CONFIG_CLONE_BACKWARDS 2207d2125043SAl Viro SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 2208d2125043SAl Viro int __user *, parent_tidptr, 22093033f14aSJosh Triplett unsigned long, tls, 2210d2125043SAl Viro int __user *, child_tidptr) 2211d2125043SAl Viro #elif defined(CONFIG_CLONE_BACKWARDS2) 2212d2125043SAl Viro SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, 2213d2125043SAl Viro int __user *, parent_tidptr, 2214d2125043SAl Viro int __user *, child_tidptr, 22153033f14aSJosh Triplett unsigned long, tls) 2216dfa9771aSMichal Simek #elif defined(CONFIG_CLONE_BACKWARDS3) 2217dfa9771aSMichal Simek SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, 2218dfa9771aSMichal Simek int, stack_size, 2219dfa9771aSMichal Simek int __user *, parent_tidptr, 2220dfa9771aSMichal Simek int __user *, child_tidptr, 22213033f14aSJosh Triplett unsigned long, tls) 2222d2125043SAl Viro #else 2223d2125043SAl Viro SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 2224d2125043SAl Viro int __user *, parent_tidptr, 2225d2125043SAl Viro int __user *, child_tidptr, 22263033f14aSJosh Triplett unsigned long, tls) 2227d2125043SAl Viro #endif 2228d2125043SAl Viro { 22293033f14aSJosh Triplett return _do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr, tls); 2230d2125043SAl Viro } 2231d2125043SAl Viro #endif 2232d2125043SAl Viro 22330f1b92cbSOleg Nesterov void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data) 22340f1b92cbSOleg Nesterov { 22350f1b92cbSOleg Nesterov struct task_struct *leader, *parent, *child; 22360f1b92cbSOleg Nesterov int res; 22370f1b92cbSOleg Nesterov 22380f1b92cbSOleg Nesterov read_lock(&tasklist_lock); 22390f1b92cbSOleg Nesterov leader = top = top->group_leader; 22400f1b92cbSOleg Nesterov down: 22410f1b92cbSOleg Nesterov for_each_thread(leader, parent) { 22420f1b92cbSOleg Nesterov list_for_each_entry(child, &parent->children, sibling) { 22430f1b92cbSOleg Nesterov res = visitor(child, data); 22440f1b92cbSOleg Nesterov if (res) { 22450f1b92cbSOleg Nesterov if (res < 0) 22460f1b92cbSOleg Nesterov goto out; 22470f1b92cbSOleg Nesterov leader = child; 22480f1b92cbSOleg Nesterov goto down; 22490f1b92cbSOleg Nesterov } 22500f1b92cbSOleg Nesterov up: 22510f1b92cbSOleg Nesterov ; 22520f1b92cbSOleg Nesterov } 22530f1b92cbSOleg Nesterov } 22540f1b92cbSOleg Nesterov 22550f1b92cbSOleg Nesterov if (leader != top) { 22560f1b92cbSOleg Nesterov child = leader; 22570f1b92cbSOleg Nesterov parent = child->real_parent; 22580f1b92cbSOleg Nesterov leader = parent->group_leader; 22590f1b92cbSOleg Nesterov goto up; 22600f1b92cbSOleg Nesterov } 22610f1b92cbSOleg Nesterov out: 22620f1b92cbSOleg Nesterov read_unlock(&tasklist_lock); 22630f1b92cbSOleg Nesterov } 22640f1b92cbSOleg Nesterov 22655fd63b30SRavikiran G Thirumalai #ifndef ARCH_MIN_MMSTRUCT_ALIGN 22665fd63b30SRavikiran G Thirumalai #define ARCH_MIN_MMSTRUCT_ALIGN 0 22675fd63b30SRavikiran G Thirumalai #endif 22685fd63b30SRavikiran G Thirumalai 226951cc5068SAlexey Dobriyan static void sighand_ctor(void *data) 2270aa1757f9SOleg Nesterov { 2271aa1757f9SOleg Nesterov struct sighand_struct *sighand = data; 2272aa1757f9SOleg Nesterov 2273aa1757f9SOleg Nesterov spin_lock_init(&sighand->siglock); 2274b8fceee1SDavide Libenzi init_waitqueue_head(&sighand->signalfd_wqh); 2275fba2afaaSDavide Libenzi } 2276aa1757f9SOleg Nesterov 22771da177e4SLinus Torvalds void __init proc_caches_init(void) 22781da177e4SLinus Torvalds { 22791da177e4SLinus Torvalds sighand_cachep = kmem_cache_create("sighand_cache", 22801da177e4SLinus Torvalds sizeof(struct sighand_struct), 0, 22815f0d5a3aSPaul E. McKenney SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| 228275f296d9SLevin, Alexander (Sasha Levin) SLAB_ACCOUNT, sighand_ctor); 22831da177e4SLinus Torvalds signal_cachep = kmem_cache_create("signal_cache", 22841da177e4SLinus Torvalds sizeof(struct signal_struct), 0, 228575f296d9SLevin, Alexander (Sasha Levin) SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 22865d097056SVladimir Davydov NULL); 22871da177e4SLinus Torvalds files_cachep = kmem_cache_create("files_cache", 22881da177e4SLinus Torvalds sizeof(struct files_struct), 0, 228975f296d9SLevin, Alexander (Sasha Levin) SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 22905d097056SVladimir Davydov NULL); 22911da177e4SLinus Torvalds fs_cachep = kmem_cache_create("fs_cache", 22921da177e4SLinus Torvalds sizeof(struct fs_struct), 0, 229375f296d9SLevin, Alexander (Sasha Levin) SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 22945d097056SVladimir Davydov NULL); 22956345d24dSLinus Torvalds /* 22966345d24dSLinus Torvalds * FIXME! The "sizeof(struct mm_struct)" currently includes the 22976345d24dSLinus Torvalds * whole struct cpumask for the OFFSTACK case. We could change 22986345d24dSLinus Torvalds * this to *only* allocate as much of it as required by the 22996345d24dSLinus Torvalds * maximum number of CPU's we can ever have. The cpumask_allocation 23006345d24dSLinus Torvalds * is at the end of the structure, exactly for that reason. 23016345d24dSLinus Torvalds */ 230207dcd7feSDavid Windsor mm_cachep = kmem_cache_create_usercopy("mm_struct", 23035fd63b30SRavikiran G Thirumalai sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, 230475f296d9SLevin, Alexander (Sasha Levin) SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 230507dcd7feSDavid Windsor offsetof(struct mm_struct, saved_auxv), 230607dcd7feSDavid Windsor sizeof_field(struct mm_struct, saved_auxv), 23075d097056SVladimir Davydov NULL); 23085d097056SVladimir Davydov vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); 23098feae131SDavid Howells mmap_init(); 231066577193SAl Viro nsproxy_cache_init(); 23111da177e4SLinus Torvalds } 2312cf2e340fSJANAK DESAI 2313cf2e340fSJANAK DESAI /* 23149bfb23fcSOleg Nesterov * Check constraints on flags passed to the unshare system call. 2315cf2e340fSJANAK DESAI */ 23169bfb23fcSOleg Nesterov static int check_unshare_flags(unsigned long unshare_flags) 2317cf2e340fSJANAK DESAI { 23189bfb23fcSOleg Nesterov if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| 23199bfb23fcSOleg Nesterov CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| 232050804fe3SEric W. Biederman CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET| 2321a79a908fSAditya Kali CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP)) 2322cf2e340fSJANAK DESAI return -EINVAL; 23239bfb23fcSOleg Nesterov /* 232412c641abSEric W. Biederman * Not implemented, but pretend it works if there is nothing 232512c641abSEric W. Biederman * to unshare. Note that unsharing the address space or the 232612c641abSEric W. Biederman * signal handlers also need to unshare the signal queues (aka 232712c641abSEric W. Biederman * CLONE_THREAD). 23289bfb23fcSOleg Nesterov */ 23299bfb23fcSOleg Nesterov if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { 233012c641abSEric W. Biederman if (!thread_group_empty(current)) 233112c641abSEric W. Biederman return -EINVAL; 233212c641abSEric W. Biederman } 233312c641abSEric W. Biederman if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) { 233412c641abSEric W. Biederman if (atomic_read(¤t->sighand->count) > 1) 233512c641abSEric W. Biederman return -EINVAL; 233612c641abSEric W. Biederman } 233712c641abSEric W. Biederman if (unshare_flags & CLONE_VM) { 233812c641abSEric W. Biederman if (!current_is_single_threaded()) 23399bfb23fcSOleg Nesterov return -EINVAL; 23409bfb23fcSOleg Nesterov } 2341cf2e340fSJANAK DESAI 2342cf2e340fSJANAK DESAI return 0; 2343cf2e340fSJANAK DESAI } 2344cf2e340fSJANAK DESAI 2345cf2e340fSJANAK DESAI /* 234699d1419dSJANAK DESAI * Unshare the filesystem structure if it is being shared 2347cf2e340fSJANAK DESAI */ 2348cf2e340fSJANAK DESAI static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) 2349cf2e340fSJANAK DESAI { 2350cf2e340fSJANAK DESAI struct fs_struct *fs = current->fs; 2351cf2e340fSJANAK DESAI 2352498052bbSAl Viro if (!(unshare_flags & CLONE_FS) || !fs) 2353498052bbSAl Viro return 0; 2354498052bbSAl Viro 2355498052bbSAl Viro /* don't need lock here; in the worst case we'll do useless copy */ 2356498052bbSAl Viro if (fs->users == 1) 2357498052bbSAl Viro return 0; 2358498052bbSAl Viro 2359498052bbSAl Viro *new_fsp = copy_fs_struct(fs); 236099d1419dSJANAK DESAI if (!*new_fsp) 236199d1419dSJANAK DESAI return -ENOMEM; 2362cf2e340fSJANAK DESAI 2363cf2e340fSJANAK DESAI return 0; 2364cf2e340fSJANAK DESAI } 2365cf2e340fSJANAK DESAI 2366cf2e340fSJANAK DESAI /* 2367a016f338SJANAK DESAI * Unshare file descriptor table if it is being shared 2368cf2e340fSJANAK DESAI */ 2369cf2e340fSJANAK DESAI static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) 2370cf2e340fSJANAK DESAI { 2371cf2e340fSJANAK DESAI struct files_struct *fd = current->files; 2372a016f338SJANAK DESAI int error = 0; 2373cf2e340fSJANAK DESAI 2374cf2e340fSJANAK DESAI if ((unshare_flags & CLONE_FILES) && 2375a016f338SJANAK DESAI (fd && atomic_read(&fd->count) > 1)) { 2376a016f338SJANAK DESAI *new_fdp = dup_fd(fd, &error); 2377a016f338SJANAK DESAI if (!*new_fdp) 2378a016f338SJANAK DESAI return error; 2379a016f338SJANAK DESAI } 2380cf2e340fSJANAK DESAI 2381cf2e340fSJANAK DESAI return 0; 2382cf2e340fSJANAK DESAI } 2383cf2e340fSJANAK DESAI 2384cf2e340fSJANAK DESAI /* 2385cf2e340fSJANAK DESAI * unshare allows a process to 'unshare' part of the process 2386cf2e340fSJANAK DESAI * context which was originally shared using clone. copy_* 2387cf2e340fSJANAK DESAI * functions used by do_fork() cannot be used here directly 2388cf2e340fSJANAK DESAI * because they modify an inactive task_struct that is being 2389cf2e340fSJANAK DESAI * constructed. Here we are modifying the current, active, 2390cf2e340fSJANAK DESAI * task_struct. 2391cf2e340fSJANAK DESAI */ 23929b32105eSDominik Brodowski int ksys_unshare(unsigned long unshare_flags) 2393cf2e340fSJANAK DESAI { 2394cf2e340fSJANAK DESAI struct fs_struct *fs, *new_fs = NULL; 2395cf2e340fSJANAK DESAI struct files_struct *fd, *new_fd = NULL; 2396b2e0d987SEric W. Biederman struct cred *new_cred = NULL; 2397cf7b708cSPavel Emelyanov struct nsproxy *new_nsproxy = NULL; 23989edff4abSManfred Spraul int do_sysvsem = 0; 23999bfb23fcSOleg Nesterov int err; 2400cf2e340fSJANAK DESAI 240150804fe3SEric W. Biederman /* 2402faf00da5SEric W. Biederman * If unsharing a user namespace must also unshare the thread group 2403faf00da5SEric W. Biederman * and unshare the filesystem root and working directories. 2404b2e0d987SEric W. Biederman */ 2405b2e0d987SEric W. Biederman if (unshare_flags & CLONE_NEWUSER) 2406e66eded8SEric W. Biederman unshare_flags |= CLONE_THREAD | CLONE_FS; 2407b2e0d987SEric W. Biederman /* 240850804fe3SEric W. Biederman * If unsharing vm, must also unshare signal handlers. 240950804fe3SEric W. Biederman */ 241050804fe3SEric W. Biederman if (unshare_flags & CLONE_VM) 241150804fe3SEric W. Biederman unshare_flags |= CLONE_SIGHAND; 24126013f67fSManfred Spraul /* 241312c641abSEric W. Biederman * If unsharing a signal handlers, must also unshare the signal queues. 241412c641abSEric W. Biederman */ 241512c641abSEric W. Biederman if (unshare_flags & CLONE_SIGHAND) 241612c641abSEric W. Biederman unshare_flags |= CLONE_THREAD; 241712c641abSEric W. Biederman /* 24189bfb23fcSOleg Nesterov * If unsharing namespace, must also unshare filesystem information. 24199bfb23fcSOleg Nesterov */ 24209bfb23fcSOleg Nesterov if (unshare_flags & CLONE_NEWNS) 24219bfb23fcSOleg Nesterov unshare_flags |= CLONE_FS; 242250804fe3SEric W. Biederman 242350804fe3SEric W. Biederman err = check_unshare_flags(unshare_flags); 242450804fe3SEric W. Biederman if (err) 242550804fe3SEric W. Biederman goto bad_unshare_out; 24269bfb23fcSOleg Nesterov /* 24276013f67fSManfred Spraul * CLONE_NEWIPC must also detach from the undolist: after switching 24286013f67fSManfred Spraul * to a new ipc namespace, the semaphore arrays from the old 24296013f67fSManfred Spraul * namespace are unreachable. 24306013f67fSManfred Spraul */ 24316013f67fSManfred Spraul if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) 24329edff4abSManfred Spraul do_sysvsem = 1; 2433fb0a685cSDaniel Rebelo de Oliveira err = unshare_fs(unshare_flags, &new_fs); 2434fb0a685cSDaniel Rebelo de Oliveira if (err) 24359bfb23fcSOleg Nesterov goto bad_unshare_out; 2436fb0a685cSDaniel Rebelo de Oliveira err = unshare_fd(unshare_flags, &new_fd); 2437fb0a685cSDaniel Rebelo de Oliveira if (err) 24389bfb23fcSOleg Nesterov goto bad_unshare_cleanup_fs; 2439b2e0d987SEric W. Biederman err = unshare_userns(unshare_flags, &new_cred); 2440fb0a685cSDaniel Rebelo de Oliveira if (err) 24419edff4abSManfred Spraul goto bad_unshare_cleanup_fd; 2442b2e0d987SEric W. Biederman err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, 2443b2e0d987SEric W. Biederman new_cred, new_fs); 2444b2e0d987SEric W. Biederman if (err) 2445b2e0d987SEric W. Biederman goto bad_unshare_cleanup_cred; 2446cf2e340fSJANAK DESAI 2447b2e0d987SEric W. Biederman if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) { 24489edff4abSManfred Spraul if (do_sysvsem) { 24499edff4abSManfred Spraul /* 24509edff4abSManfred Spraul * CLONE_SYSVSEM is equivalent to sys_exit(). 24519edff4abSManfred Spraul */ 24529edff4abSManfred Spraul exit_sem(current); 24539edff4abSManfred Spraul } 2454ab602f79SJack Miller if (unshare_flags & CLONE_NEWIPC) { 2455ab602f79SJack Miller /* Orphan segments in old ns (see sem above). */ 2456ab602f79SJack Miller exit_shm(current); 2457ab602f79SJack Miller shm_init_task(current); 2458ab602f79SJack Miller } 2459ab516013SSerge E. Hallyn 24606f977e6bSAlan Cox if (new_nsproxy) 2461cf7b708cSPavel Emelyanov switch_task_namespaces(current, new_nsproxy); 2462cf2e340fSJANAK DESAI 2463cf7b708cSPavel Emelyanov task_lock(current); 2464cf7b708cSPavel Emelyanov 2465cf2e340fSJANAK DESAI if (new_fs) { 2466cf2e340fSJANAK DESAI fs = current->fs; 24672a4419b5SNick Piggin spin_lock(&fs->lock); 2468cf2e340fSJANAK DESAI current->fs = new_fs; 2469498052bbSAl Viro if (--fs->users) 2470498052bbSAl Viro new_fs = NULL; 2471498052bbSAl Viro else 2472cf2e340fSJANAK DESAI new_fs = fs; 24732a4419b5SNick Piggin spin_unlock(&fs->lock); 2474cf2e340fSJANAK DESAI } 2475cf2e340fSJANAK DESAI 2476cf2e340fSJANAK DESAI if (new_fd) { 2477cf2e340fSJANAK DESAI fd = current->files; 2478cf2e340fSJANAK DESAI current->files = new_fd; 2479cf2e340fSJANAK DESAI new_fd = fd; 2480cf2e340fSJANAK DESAI } 2481cf2e340fSJANAK DESAI 2482cf2e340fSJANAK DESAI task_unlock(current); 2483b2e0d987SEric W. Biederman 2484b2e0d987SEric W. Biederman if (new_cred) { 2485b2e0d987SEric W. Biederman /* Install the new user namespace */ 2486b2e0d987SEric W. Biederman commit_creds(new_cred); 2487b2e0d987SEric W. Biederman new_cred = NULL; 2488b2e0d987SEric W. Biederman } 2489cf2e340fSJANAK DESAI } 2490cf2e340fSJANAK DESAI 2491e4222673SHari Bathini perf_event_namespaces(current); 2492e4222673SHari Bathini 2493b2e0d987SEric W. Biederman bad_unshare_cleanup_cred: 2494b2e0d987SEric W. Biederman if (new_cred) 2495b2e0d987SEric W. Biederman put_cred(new_cred); 2496cf2e340fSJANAK DESAI bad_unshare_cleanup_fd: 2497cf2e340fSJANAK DESAI if (new_fd) 2498cf2e340fSJANAK DESAI put_files_struct(new_fd); 2499cf2e340fSJANAK DESAI 2500cf2e340fSJANAK DESAI bad_unshare_cleanup_fs: 2501cf2e340fSJANAK DESAI if (new_fs) 2502498052bbSAl Viro free_fs_struct(new_fs); 2503cf2e340fSJANAK DESAI 2504cf2e340fSJANAK DESAI bad_unshare_out: 2505cf2e340fSJANAK DESAI return err; 2506cf2e340fSJANAK DESAI } 25073b125388SAl Viro 25089b32105eSDominik Brodowski SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) 25099b32105eSDominik Brodowski { 25109b32105eSDominik Brodowski return ksys_unshare(unshare_flags); 25119b32105eSDominik Brodowski } 25129b32105eSDominik Brodowski 25133b125388SAl Viro /* 25143b125388SAl Viro * Helper to unshare the files of the current task. 25153b125388SAl Viro * We don't want to expose copy_files internals to 25163b125388SAl Viro * the exec layer of the kernel. 25173b125388SAl Viro */ 25183b125388SAl Viro 25193b125388SAl Viro int unshare_files(struct files_struct **displaced) 25203b125388SAl Viro { 25213b125388SAl Viro struct task_struct *task = current; 252250704516SAl Viro struct files_struct *copy = NULL; 25233b125388SAl Viro int error; 25243b125388SAl Viro 25253b125388SAl Viro error = unshare_fd(CLONE_FILES, ©); 25263b125388SAl Viro if (error || !copy) { 25273b125388SAl Viro *displaced = NULL; 25283b125388SAl Viro return error; 25293b125388SAl Viro } 25303b125388SAl Viro *displaced = task->files; 25313b125388SAl Viro task_lock(task); 25323b125388SAl Viro task->files = copy; 25333b125388SAl Viro task_unlock(task); 25343b125388SAl Viro return 0; 25353b125388SAl Viro } 253616db3d3fSHeinrich Schuchardt 253716db3d3fSHeinrich Schuchardt int sysctl_max_threads(struct ctl_table *table, int write, 253816db3d3fSHeinrich Schuchardt void __user *buffer, size_t *lenp, loff_t *ppos) 253916db3d3fSHeinrich Schuchardt { 254016db3d3fSHeinrich Schuchardt struct ctl_table t; 254116db3d3fSHeinrich Schuchardt int ret; 254216db3d3fSHeinrich Schuchardt int threads = max_threads; 254316db3d3fSHeinrich Schuchardt int min = MIN_THREADS; 254416db3d3fSHeinrich Schuchardt int max = MAX_THREADS; 254516db3d3fSHeinrich Schuchardt 254616db3d3fSHeinrich Schuchardt t = *table; 254716db3d3fSHeinrich Schuchardt t.data = &threads; 254816db3d3fSHeinrich Schuchardt t.extra1 = &min; 254916db3d3fSHeinrich Schuchardt t.extra2 = &max; 255016db3d3fSHeinrich Schuchardt 255116db3d3fSHeinrich Schuchardt ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 255216db3d3fSHeinrich Schuchardt if (ret || !write) 255316db3d3fSHeinrich Schuchardt return ret; 255416db3d3fSHeinrich Schuchardt 255516db3d3fSHeinrich Schuchardt set_max_threads(threads); 255616db3d3fSHeinrich Schuchardt 255716db3d3fSHeinrich Schuchardt return 0; 255816db3d3fSHeinrich Schuchardt } 2559