11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/fork.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1991, 1992 Linus Torvalds 51da177e4SLinus Torvalds */ 61da177e4SLinus Torvalds 71da177e4SLinus Torvalds /* 81da177e4SLinus Torvalds * 'fork.c' contains the help-routines for the 'fork' system call 91da177e4SLinus Torvalds * (see also entry.S and others). 101da177e4SLinus Torvalds * Fork is rather simple, once you get the hang of it, but the memory 111da177e4SLinus Torvalds * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' 121da177e4SLinus Torvalds */ 131da177e4SLinus Torvalds 141da177e4SLinus Torvalds #include <linux/slab.h> 154eb5aaa3SIngo Molnar #include <linux/sched/autogroup.h> 166e84f315SIngo Molnar #include <linux/sched/mm.h> 17f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 188703e8a4SIngo Molnar #include <linux/sched/user.h> 196a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 2003441a34SIngo Molnar #include <linux/sched/stat.h> 2129930025SIngo Molnar #include <linux/sched/task.h> 2268db0cf1SIngo Molnar #include <linux/sched/task_stack.h> 2332ef5517SIngo Molnar #include <linux/sched/cputime.h> 24037741a6SIngo Molnar #include <linux/rtmutex.h> 251da177e4SLinus Torvalds #include <linux/init.h> 261da177e4SLinus Torvalds #include <linux/unistd.h> 271da177e4SLinus Torvalds #include <linux/module.h> 281da177e4SLinus Torvalds #include <linux/vmalloc.h> 291da177e4SLinus Torvalds #include <linux/completion.h> 301da177e4SLinus Torvalds #include <linux/personality.h> 311da177e4SLinus Torvalds #include <linux/mempolicy.h> 321da177e4SLinus Torvalds #include <linux/sem.h> 331da177e4SLinus Torvalds #include <linux/file.h> 349f3acc31SAl Viro #include <linux/fdtable.h> 35da9cbc87SJens Axboe #include <linux/iocontext.h> 361da177e4SLinus Torvalds #include <linux/key.h> 371da177e4SLinus Torvalds #include <linux/binfmts.h> 381da177e4SLinus Torvalds #include <linux/mman.h> 39cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 40133ff0eaSJérôme Glisse #include <linux/hmm.h> 411da177e4SLinus Torvalds #include <linux/fs.h> 42615d6e87SDavidlohr Bueso #include <linux/mm.h> 43615d6e87SDavidlohr Bueso #include <linux/vmacache.h> 44ab516013SSerge E. Hallyn #include <linux/nsproxy.h> 45c59ede7bSRandy.Dunlap #include <linux/capability.h> 461da177e4SLinus Torvalds #include <linux/cpu.h> 47b4f48b63SPaul Menage #include <linux/cgroup.h> 481da177e4SLinus Torvalds #include <linux/security.h> 49a1e78772SMel Gorman #include <linux/hugetlb.h> 50e2cfabdfSWill Drewry #include <linux/seccomp.h> 511da177e4SLinus Torvalds #include <linux/swap.h> 521da177e4SLinus Torvalds #include <linux/syscalls.h> 531da177e4SLinus Torvalds #include <linux/jiffies.h> 541da177e4SLinus Torvalds #include <linux/futex.h> 558141c7f3SLinus Torvalds #include <linux/compat.h> 56207205a2SEric Dumazet #include <linux/kthread.h> 577c3ab738SAndrew Morton #include <linux/task_io_accounting_ops.h> 58ab2af1f5SDipankar Sarma #include <linux/rcupdate.h> 591da177e4SLinus Torvalds #include <linux/ptrace.h> 601da177e4SLinus Torvalds #include <linux/mount.h> 611da177e4SLinus Torvalds #include <linux/audit.h> 6278fb7466SPavel Emelianov #include <linux/memcontrol.h> 63f201ae23SFrederic Weisbecker #include <linux/ftrace.h> 645e2bf014SMike Galbraith #include <linux/proc_fs.h> 651da177e4SLinus Torvalds #include <linux/profile.h> 661da177e4SLinus Torvalds #include <linux/rmap.h> 67f8af4da3SHugh Dickins #include <linux/ksm.h> 681da177e4SLinus Torvalds #include <linux/acct.h> 69893e26e6SPavel Emelyanov #include <linux/userfaultfd_k.h> 708f0ab514SJay Lan #include <linux/tsacct_kern.h> 719f46080cSMatt Helsley #include <linux/cn_proc.h> 72ba96a0c8SRafael J. Wysocki #include <linux/freezer.h> 73ca74e92bSShailabh Nagar #include <linux/delayacct.h> 74ad4ecbcbSShailabh Nagar #include <linux/taskstats_kern.h> 750a425405SArjan van de Ven #include <linux/random.h> 76522ed776SMiloslav Trmac #include <linux/tty.h> 77fd0928dfSJens Axboe #include <linux/blkdev.h> 785ad4e53bSAl Viro #include <linux/fs_struct.h> 797c9f8861SEric Sandeen #include <linux/magic.h> 80d70f2a14SAndrew Morton #include <linux/sched/mm.h> 81cdd6c482SIngo Molnar #include <linux/perf_event.h> 8242c4ab41SStanislaw Gruszka #include <linux/posix-timers.h> 838e7cac79SAvi Kivity #include <linux/user-return-notifier.h> 843d5992d2SYing Han #include <linux/oom.h> 85ba76149fSAndrea Arcangeli #include <linux/khugepaged.h> 86d80e731eSOleg Nesterov #include <linux/signalfd.h> 870326f5a9SSrikar Dronamraju #include <linux/uprobes.h> 88a27bb332SKent Overstreet #include <linux/aio.h> 8952f5684cSGideon Israel Dsouza #include <linux/compiler.h> 9016db3d3fSHeinrich Schuchardt #include <linux/sysctl.h> 915c9a8750SDmitry Vyukov #include <linux/kcov.h> 92d83a7cb3SJosh Poimboeuf #include <linux/livepatch.h> 9348ac3c18SMark Rutland #include <linux/thread_info.h> 94afaef01cSAlexander Popov #include <linux/stackleak.h> 951da177e4SLinus Torvalds 961da177e4SLinus Torvalds #include <asm/pgtable.h> 971da177e4SLinus Torvalds #include <asm/pgalloc.h> 987c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 991da177e4SLinus Torvalds #include <asm/mmu_context.h> 1001da177e4SLinus Torvalds #include <asm/cacheflush.h> 1011da177e4SLinus Torvalds #include <asm/tlbflush.h> 1021da177e4SLinus Torvalds 103ad8d75ffSSteven Rostedt #include <trace/events/sched.h> 104ad8d75ffSSteven Rostedt 10543d2b113SKAMEZAWA Hiroyuki #define CREATE_TRACE_POINTS 10643d2b113SKAMEZAWA Hiroyuki #include <trace/events/task.h> 10743d2b113SKAMEZAWA Hiroyuki 1081da177e4SLinus Torvalds /* 109ac1b398dSHeinrich Schuchardt * Minimum number of threads to boot the kernel 110ac1b398dSHeinrich Schuchardt */ 111ac1b398dSHeinrich Schuchardt #define MIN_THREADS 20 112ac1b398dSHeinrich Schuchardt 113ac1b398dSHeinrich Schuchardt /* 114ac1b398dSHeinrich Schuchardt * Maximum number of threads 115ac1b398dSHeinrich Schuchardt */ 116ac1b398dSHeinrich Schuchardt #define MAX_THREADS FUTEX_TID_MASK 117ac1b398dSHeinrich Schuchardt 118ac1b398dSHeinrich Schuchardt /* 1191da177e4SLinus Torvalds * Protected counters by write_lock_irq(&tasklist_lock) 1201da177e4SLinus Torvalds */ 1211da177e4SLinus Torvalds unsigned long total_forks; /* Handle normal Linux uptimes. */ 1221da177e4SLinus Torvalds int nr_threads; /* The idle threads do not count.. */ 1231da177e4SLinus Torvalds 1241da177e4SLinus Torvalds int max_threads; /* tunable limit on nr_threads */ 1251da177e4SLinus Torvalds 1261da177e4SLinus Torvalds DEFINE_PER_CPU(unsigned long, process_counts) = 0; 1271da177e4SLinus Torvalds 1281da177e4SLinus Torvalds __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 129db1466b3SPaul E. McKenney 130db1466b3SPaul E. McKenney #ifdef CONFIG_PROVE_RCU 131db1466b3SPaul E. McKenney int lockdep_tasklist_lock_is_held(void) 132db1466b3SPaul E. McKenney { 133db1466b3SPaul E. McKenney return lockdep_is_held(&tasklist_lock); 134db1466b3SPaul E. McKenney } 135db1466b3SPaul E. McKenney EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); 136db1466b3SPaul E. McKenney #endif /* #ifdef CONFIG_PROVE_RCU */ 1371da177e4SLinus Torvalds 1381da177e4SLinus Torvalds int nr_processes(void) 1391da177e4SLinus Torvalds { 1401da177e4SLinus Torvalds int cpu; 1411da177e4SLinus Torvalds int total = 0; 1421da177e4SLinus Torvalds 1431d510750SIan Campbell for_each_possible_cpu(cpu) 1441da177e4SLinus Torvalds total += per_cpu(process_counts, cpu); 1451da177e4SLinus Torvalds 1461da177e4SLinus Torvalds return total; 1471da177e4SLinus Torvalds } 1481da177e4SLinus Torvalds 149f19b9f74SAkinobu Mita void __weak arch_release_task_struct(struct task_struct *tsk) 150f19b9f74SAkinobu Mita { 151f19b9f74SAkinobu Mita } 152f19b9f74SAkinobu Mita 153f5e10287SThomas Gleixner #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 154e18b890bSChristoph Lameter static struct kmem_cache *task_struct_cachep; 15541101809SThomas Gleixner 15641101809SThomas Gleixner static inline struct task_struct *alloc_task_struct_node(int node) 15741101809SThomas Gleixner { 15841101809SThomas Gleixner return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); 15941101809SThomas Gleixner } 16041101809SThomas Gleixner 16141101809SThomas Gleixner static inline void free_task_struct(struct task_struct *tsk) 16241101809SThomas Gleixner { 16341101809SThomas Gleixner kmem_cache_free(task_struct_cachep, tsk); 16441101809SThomas Gleixner } 1651da177e4SLinus Torvalds #endif 1661da177e4SLinus Torvalds 167b235beeaSLinus Torvalds void __weak arch_release_thread_stack(unsigned long *stack) 168f19b9f74SAkinobu Mita { 169f19b9f74SAkinobu Mita } 170f19b9f74SAkinobu Mita 171b235beeaSLinus Torvalds #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR 17241101809SThomas Gleixner 1730d15d74aSThomas Gleixner /* 1740d15d74aSThomas Gleixner * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a 1750d15d74aSThomas Gleixner * kmemcache based allocator. 1760d15d74aSThomas Gleixner */ 177ba14a194SAndy Lutomirski # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) 178ac496bf4SAndy Lutomirski 179ac496bf4SAndy Lutomirski #ifdef CONFIG_VMAP_STACK 180ac496bf4SAndy Lutomirski /* 181ac496bf4SAndy Lutomirski * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB 182ac496bf4SAndy Lutomirski * flush. Try to minimize the number of calls by caching stacks. 183ac496bf4SAndy Lutomirski */ 184ac496bf4SAndy Lutomirski #define NR_CACHED_STACKS 2 185ac496bf4SAndy Lutomirski static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]); 18619659c59SHoeun Ryu 18719659c59SHoeun Ryu static int free_vm_stack_cache(unsigned int cpu) 18819659c59SHoeun Ryu { 18919659c59SHoeun Ryu struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu); 19019659c59SHoeun Ryu int i; 19119659c59SHoeun Ryu 19219659c59SHoeun Ryu for (i = 0; i < NR_CACHED_STACKS; i++) { 19319659c59SHoeun Ryu struct vm_struct *vm_stack = cached_vm_stacks[i]; 19419659c59SHoeun Ryu 19519659c59SHoeun Ryu if (!vm_stack) 19619659c59SHoeun Ryu continue; 19719659c59SHoeun Ryu 19819659c59SHoeun Ryu vfree(vm_stack->addr); 19919659c59SHoeun Ryu cached_vm_stacks[i] = NULL; 20019659c59SHoeun Ryu } 20119659c59SHoeun Ryu 20219659c59SHoeun Ryu return 0; 20319659c59SHoeun Ryu } 204ac496bf4SAndy Lutomirski #endif 205ac496bf4SAndy Lutomirski 206ba14a194SAndy Lutomirski static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) 207b69c49b7SFUJITA Tomonori { 208ba14a194SAndy Lutomirski #ifdef CONFIG_VMAP_STACK 209ac496bf4SAndy Lutomirski void *stack; 210ac496bf4SAndy Lutomirski int i; 211ac496bf4SAndy Lutomirski 212ac496bf4SAndy Lutomirski for (i = 0; i < NR_CACHED_STACKS; i++) { 213112166f8SChristoph Lameter struct vm_struct *s; 214112166f8SChristoph Lameter 215112166f8SChristoph Lameter s = this_cpu_xchg(cached_stacks[i], NULL); 216ac496bf4SAndy Lutomirski 217ac496bf4SAndy Lutomirski if (!s) 218ac496bf4SAndy Lutomirski continue; 219ac496bf4SAndy Lutomirski 220ca182551SKonstantin Khlebnikov /* Clear stale pointers from reused stack. */ 221ca182551SKonstantin Khlebnikov memset(s->addr, 0, THREAD_SIZE); 222e01e8063SKees Cook 223ac496bf4SAndy Lutomirski tsk->stack_vm_area = s; 224ac496bf4SAndy Lutomirski return s->addr; 225ac496bf4SAndy Lutomirski } 226ac496bf4SAndy Lutomirski 2279b6f7e16SRoman Gushchin /* 2289b6f7e16SRoman Gushchin * Allocated stacks are cached and later reused by new threads, 2299b6f7e16SRoman Gushchin * so memcg accounting is performed manually on assigning/releasing 2309b6f7e16SRoman Gushchin * stacks to tasks. Drop __GFP_ACCOUNT. 2319b6f7e16SRoman Gushchin */ 23248ac3c18SMark Rutland stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN, 233ba14a194SAndy Lutomirski VMALLOC_START, VMALLOC_END, 2349b6f7e16SRoman Gushchin THREADINFO_GFP & ~__GFP_ACCOUNT, 235ba14a194SAndy Lutomirski PAGE_KERNEL, 236ac496bf4SAndy Lutomirski 0, node, __builtin_return_address(0)); 237ba14a194SAndy Lutomirski 238ba14a194SAndy Lutomirski /* 239ba14a194SAndy Lutomirski * We can't call find_vm_area() in interrupt context, and 240ba14a194SAndy Lutomirski * free_thread_stack() can be called in interrupt context, 241ba14a194SAndy Lutomirski * so cache the vm_struct. 242ba14a194SAndy Lutomirski */ 2435eed6f1dSRik van Riel if (stack) { 244ba14a194SAndy Lutomirski tsk->stack_vm_area = find_vm_area(stack); 2455eed6f1dSRik van Riel tsk->stack = stack; 2465eed6f1dSRik van Riel } 247ba14a194SAndy Lutomirski return stack; 248ba14a194SAndy Lutomirski #else 2494949148aSVladimir Davydov struct page *page = alloc_pages_node(node, THREADINFO_GFP, 2502889f608SThomas Gleixner THREAD_SIZE_ORDER); 251b6a84016SEric Dumazet 252b6a84016SEric Dumazet return page ? page_address(page) : NULL; 253ba14a194SAndy Lutomirski #endif 254b69c49b7SFUJITA Tomonori } 255b69c49b7SFUJITA Tomonori 256ba14a194SAndy Lutomirski static inline void free_thread_stack(struct task_struct *tsk) 257b69c49b7SFUJITA Tomonori { 258ac496bf4SAndy Lutomirski #ifdef CONFIG_VMAP_STACK 2599b6f7e16SRoman Gushchin struct vm_struct *vm = task_stack_vm_area(tsk); 2609b6f7e16SRoman Gushchin 2619b6f7e16SRoman Gushchin if (vm) { 262ac496bf4SAndy Lutomirski int i; 263ac496bf4SAndy Lutomirski 2649b6f7e16SRoman Gushchin for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { 2659b6f7e16SRoman Gushchin mod_memcg_page_state(vm->pages[i], 2669b6f7e16SRoman Gushchin MEMCG_KERNEL_STACK_KB, 2679b6f7e16SRoman Gushchin -(int)(PAGE_SIZE / 1024)); 2689b6f7e16SRoman Gushchin 2699b6f7e16SRoman Gushchin memcg_kmem_uncharge(vm->pages[i], 0); 2709b6f7e16SRoman Gushchin } 2719b6f7e16SRoman Gushchin 272ac496bf4SAndy Lutomirski for (i = 0; i < NR_CACHED_STACKS; i++) { 273112166f8SChristoph Lameter if (this_cpu_cmpxchg(cached_stacks[i], 274112166f8SChristoph Lameter NULL, tsk->stack_vm_area) != NULL) 275ac496bf4SAndy Lutomirski continue; 276ac496bf4SAndy Lutomirski 277ac496bf4SAndy Lutomirski return; 278ac496bf4SAndy Lutomirski } 279ac496bf4SAndy Lutomirski 2800f110a9bSAndrey Ryabinin vfree_atomic(tsk->stack); 281ac496bf4SAndy Lutomirski return; 282ac496bf4SAndy Lutomirski } 283ac496bf4SAndy Lutomirski #endif 284ac496bf4SAndy Lutomirski 285ba14a194SAndy Lutomirski __free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER); 286b69c49b7SFUJITA Tomonori } 2870d15d74aSThomas Gleixner # else 288b235beeaSLinus Torvalds static struct kmem_cache *thread_stack_cache; 2890d15d74aSThomas Gleixner 2909521d399SMichael Ellerman static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, 2910d15d74aSThomas Gleixner int node) 2920d15d74aSThomas Gleixner { 2935eed6f1dSRik van Riel unsigned long *stack; 2945eed6f1dSRik van Riel stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); 2955eed6f1dSRik van Riel tsk->stack = stack; 2965eed6f1dSRik van Riel return stack; 2970d15d74aSThomas Gleixner } 2980d15d74aSThomas Gleixner 299ba14a194SAndy Lutomirski static void free_thread_stack(struct task_struct *tsk) 3000d15d74aSThomas Gleixner { 301ba14a194SAndy Lutomirski kmem_cache_free(thread_stack_cache, tsk->stack); 3020d15d74aSThomas Gleixner } 3030d15d74aSThomas Gleixner 304b235beeaSLinus Torvalds void thread_stack_cache_init(void) 3050d15d74aSThomas Gleixner { 306f9d29946SDavid Windsor thread_stack_cache = kmem_cache_create_usercopy("thread_stack", 307f9d29946SDavid Windsor THREAD_SIZE, THREAD_SIZE, 0, 0, 308f9d29946SDavid Windsor THREAD_SIZE, NULL); 309b235beeaSLinus Torvalds BUG_ON(thread_stack_cache == NULL); 3100d15d74aSThomas Gleixner } 3110d15d74aSThomas Gleixner # endif 312b69c49b7SFUJITA Tomonori #endif 313b69c49b7SFUJITA Tomonori 3141da177e4SLinus Torvalds /* SLAB cache for signal_struct structures (tsk->signal) */ 315e18b890bSChristoph Lameter static struct kmem_cache *signal_cachep; 3161da177e4SLinus Torvalds 3171da177e4SLinus Torvalds /* SLAB cache for sighand_struct structures (tsk->sighand) */ 318e18b890bSChristoph Lameter struct kmem_cache *sighand_cachep; 3191da177e4SLinus Torvalds 3201da177e4SLinus Torvalds /* SLAB cache for files_struct structures (tsk->files) */ 321e18b890bSChristoph Lameter struct kmem_cache *files_cachep; 3221da177e4SLinus Torvalds 3231da177e4SLinus Torvalds /* SLAB cache for fs_struct structures (tsk->fs) */ 324e18b890bSChristoph Lameter struct kmem_cache *fs_cachep; 3251da177e4SLinus Torvalds 3261da177e4SLinus Torvalds /* SLAB cache for vm_area_struct structures */ 3273928d4f5SLinus Torvalds static struct kmem_cache *vm_area_cachep; 3281da177e4SLinus Torvalds 3291da177e4SLinus Torvalds /* SLAB cache for mm_struct structures (tsk->mm) */ 330e18b890bSChristoph Lameter static struct kmem_cache *mm_cachep; 3311da177e4SLinus Torvalds 332490fc053SLinus Torvalds struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) 3333928d4f5SLinus Torvalds { 334a670468fSAndrew Morton struct vm_area_struct *vma; 335490fc053SLinus Torvalds 336a670468fSAndrew Morton vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 337027232daSKirill A. Shutemov if (vma) 338027232daSKirill A. Shutemov vma_init(vma, mm); 339490fc053SLinus Torvalds return vma; 3403928d4f5SLinus Torvalds } 3413928d4f5SLinus Torvalds 3423928d4f5SLinus Torvalds struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) 3433928d4f5SLinus Torvalds { 34495faf699SLinus Torvalds struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 34595faf699SLinus Torvalds 34695faf699SLinus Torvalds if (new) { 34795faf699SLinus Torvalds *new = *orig; 34895faf699SLinus Torvalds INIT_LIST_HEAD(&new->anon_vma_chain); 34995faf699SLinus Torvalds } 35095faf699SLinus Torvalds return new; 3513928d4f5SLinus Torvalds } 3523928d4f5SLinus Torvalds 3533928d4f5SLinus Torvalds void vm_area_free(struct vm_area_struct *vma) 3543928d4f5SLinus Torvalds { 3553928d4f5SLinus Torvalds kmem_cache_free(vm_area_cachep, vma); 3563928d4f5SLinus Torvalds } 3573928d4f5SLinus Torvalds 358ba14a194SAndy Lutomirski static void account_kernel_stack(struct task_struct *tsk, int account) 359c6a7f572SKOSAKI Motohiro { 360ba14a194SAndy Lutomirski void *stack = task_stack_page(tsk); 361ba14a194SAndy Lutomirski struct vm_struct *vm = task_stack_vm_area(tsk); 362ba14a194SAndy Lutomirski 363ba14a194SAndy Lutomirski BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0); 364ba14a194SAndy Lutomirski 365ba14a194SAndy Lutomirski if (vm) { 366ba14a194SAndy Lutomirski int i; 367ba14a194SAndy Lutomirski 368ba14a194SAndy Lutomirski BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE); 369ba14a194SAndy Lutomirski 370ba14a194SAndy Lutomirski for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { 371ba14a194SAndy Lutomirski mod_zone_page_state(page_zone(vm->pages[i]), 372ba14a194SAndy Lutomirski NR_KERNEL_STACK_KB, 373ba14a194SAndy Lutomirski PAGE_SIZE / 1024 * account); 374ba14a194SAndy Lutomirski } 375ba14a194SAndy Lutomirski } else { 376ba14a194SAndy Lutomirski /* 377ba14a194SAndy Lutomirski * All stack pages are in the same zone and belong to the 378ba14a194SAndy Lutomirski * same memcg. 379ba14a194SAndy Lutomirski */ 380efdc9490SAndy Lutomirski struct page *first_page = virt_to_page(stack); 381c6a7f572SKOSAKI Motohiro 382efdc9490SAndy Lutomirski mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB, 383d30dd8beSAndy Lutomirski THREAD_SIZE / 1024 * account); 384efdc9490SAndy Lutomirski 385ed52be7bSJohannes Weiner mod_memcg_page_state(first_page, MEMCG_KERNEL_STACK_KB, 386efdc9490SAndy Lutomirski account * (THREAD_SIZE / 1024)); 387c6a7f572SKOSAKI Motohiro } 388ba14a194SAndy Lutomirski } 389c6a7f572SKOSAKI Motohiro 3909b6f7e16SRoman Gushchin static int memcg_charge_kernel_stack(struct task_struct *tsk) 3919b6f7e16SRoman Gushchin { 3929b6f7e16SRoman Gushchin #ifdef CONFIG_VMAP_STACK 3939b6f7e16SRoman Gushchin struct vm_struct *vm = task_stack_vm_area(tsk); 3949b6f7e16SRoman Gushchin int ret; 3959b6f7e16SRoman Gushchin 3969b6f7e16SRoman Gushchin if (vm) { 3979b6f7e16SRoman Gushchin int i; 3989b6f7e16SRoman Gushchin 3999b6f7e16SRoman Gushchin for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { 4009b6f7e16SRoman Gushchin /* 4019b6f7e16SRoman Gushchin * If memcg_kmem_charge() fails, page->mem_cgroup 4029b6f7e16SRoman Gushchin * pointer is NULL, and both memcg_kmem_uncharge() 4039b6f7e16SRoman Gushchin * and mod_memcg_page_state() in free_thread_stack() 4049b6f7e16SRoman Gushchin * will ignore this page. So it's safe. 4059b6f7e16SRoman Gushchin */ 4069b6f7e16SRoman Gushchin ret = memcg_kmem_charge(vm->pages[i], GFP_KERNEL, 0); 4079b6f7e16SRoman Gushchin if (ret) 4089b6f7e16SRoman Gushchin return ret; 4099b6f7e16SRoman Gushchin 4109b6f7e16SRoman Gushchin mod_memcg_page_state(vm->pages[i], 4119b6f7e16SRoman Gushchin MEMCG_KERNEL_STACK_KB, 4129b6f7e16SRoman Gushchin PAGE_SIZE / 1024); 4139b6f7e16SRoman Gushchin } 4149b6f7e16SRoman Gushchin } 4159b6f7e16SRoman Gushchin #endif 4169b6f7e16SRoman Gushchin return 0; 4179b6f7e16SRoman Gushchin } 4189b6f7e16SRoman Gushchin 41968f24b08SAndy Lutomirski static void release_task_stack(struct task_struct *tsk) 4201da177e4SLinus Torvalds { 421405c0759SAndy Lutomirski if (WARN_ON(tsk->state != TASK_DEAD)) 422405c0759SAndy Lutomirski return; /* Better to leak the stack than to free prematurely */ 423405c0759SAndy Lutomirski 424ba14a194SAndy Lutomirski account_kernel_stack(tsk, -1); 425b235beeaSLinus Torvalds arch_release_thread_stack(tsk->stack); 426ba14a194SAndy Lutomirski free_thread_stack(tsk); 42768f24b08SAndy Lutomirski tsk->stack = NULL; 42868f24b08SAndy Lutomirski #ifdef CONFIG_VMAP_STACK 42968f24b08SAndy Lutomirski tsk->stack_vm_area = NULL; 43068f24b08SAndy Lutomirski #endif 43168f24b08SAndy Lutomirski } 43268f24b08SAndy Lutomirski 43368f24b08SAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 43468f24b08SAndy Lutomirski void put_task_stack(struct task_struct *tsk) 43568f24b08SAndy Lutomirski { 43668f24b08SAndy Lutomirski if (atomic_dec_and_test(&tsk->stack_refcount)) 43768f24b08SAndy Lutomirski release_task_stack(tsk); 43868f24b08SAndy Lutomirski } 43968f24b08SAndy Lutomirski #endif 44068f24b08SAndy Lutomirski 44168f24b08SAndy Lutomirski void free_task(struct task_struct *tsk) 44268f24b08SAndy Lutomirski { 44368f24b08SAndy Lutomirski #ifndef CONFIG_THREAD_INFO_IN_TASK 44468f24b08SAndy Lutomirski /* 44568f24b08SAndy Lutomirski * The task is finally done with both the stack and thread_info, 44668f24b08SAndy Lutomirski * so free both. 44768f24b08SAndy Lutomirski */ 44868f24b08SAndy Lutomirski release_task_stack(tsk); 44968f24b08SAndy Lutomirski #else 45068f24b08SAndy Lutomirski /* 45168f24b08SAndy Lutomirski * If the task had a separate stack allocation, it should be gone 45268f24b08SAndy Lutomirski * by now. 45368f24b08SAndy Lutomirski */ 45468f24b08SAndy Lutomirski WARN_ON_ONCE(atomic_read(&tsk->stack_refcount) != 0); 45568f24b08SAndy Lutomirski #endif 45623f78d4aSIngo Molnar rt_mutex_debug_task_free(tsk); 457fb52607aSFrederic Weisbecker ftrace_graph_exit_task(tsk); 458e2cfabdfSWill Drewry put_seccomp_filter(tsk); 459f19b9f74SAkinobu Mita arch_release_task_struct(tsk); 4601da5c46fSOleg Nesterov if (tsk->flags & PF_KTHREAD) 4611da5c46fSOleg Nesterov free_kthread_struct(tsk); 4621da177e4SLinus Torvalds free_task_struct(tsk); 4631da177e4SLinus Torvalds } 4641da177e4SLinus Torvalds EXPORT_SYMBOL(free_task); 4651da177e4SLinus Torvalds 4661da177e4SLinus Torvalds #ifdef CONFIG_MMU 4670766f788SEmese Revfy static __latent_entropy int dup_mmap(struct mm_struct *mm, 4680766f788SEmese Revfy struct mm_struct *oldmm) 4691da177e4SLinus Torvalds { 470297c5eeeSLinus Torvalds struct vm_area_struct *mpnt, *tmp, *prev, **pprev; 4711da177e4SLinus Torvalds struct rb_node **rb_link, *rb_parent; 4721da177e4SLinus Torvalds int retval; 4731da177e4SLinus Torvalds unsigned long charge; 474893e26e6SPavel Emelyanov LIST_HEAD(uf); 4751da177e4SLinus Torvalds 47632cdba1eSOleg Nesterov uprobe_start_dup_mmap(); 4777c051267SMichal Hocko if (down_write_killable(&oldmm->mmap_sem)) { 4787c051267SMichal Hocko retval = -EINTR; 4797c051267SMichal Hocko goto fail_uprobe_end; 4807c051267SMichal Hocko } 481ec8c0446SRalf Baechle flush_cache_dup_mm(oldmm); 482f8ac4ec9SOleg Nesterov uprobe_dup_mmap(oldmm, mm); 483ad339451SIngo Molnar /* 484ad339451SIngo Molnar * Not linked in yet - no deadlock potential: 485ad339451SIngo Molnar */ 486ad339451SIngo Molnar down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); 4877ee78232SHugh Dickins 48890f31d0eSKonstantin Khlebnikov /* No ordering required: file already has been exposed. */ 48990f31d0eSKonstantin Khlebnikov RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); 49090f31d0eSKonstantin Khlebnikov 4914f7d4614SVladimir Davydov mm->total_vm = oldmm->total_vm; 49284638335SKonstantin Khlebnikov mm->data_vm = oldmm->data_vm; 4934f7d4614SVladimir Davydov mm->exec_vm = oldmm->exec_vm; 4944f7d4614SVladimir Davydov mm->stack_vm = oldmm->stack_vm; 4954f7d4614SVladimir Davydov 4961da177e4SLinus Torvalds rb_link = &mm->mm_rb.rb_node; 4971da177e4SLinus Torvalds rb_parent = NULL; 4981da177e4SLinus Torvalds pprev = &mm->mmap; 499f8af4da3SHugh Dickins retval = ksm_fork(mm, oldmm); 500f8af4da3SHugh Dickins if (retval) 501f8af4da3SHugh Dickins goto out; 502ba76149fSAndrea Arcangeli retval = khugepaged_fork(mm, oldmm); 503ba76149fSAndrea Arcangeli if (retval) 504ba76149fSAndrea Arcangeli goto out; 5051da177e4SLinus Torvalds 506297c5eeeSLinus Torvalds prev = NULL; 507fd3e42fcSHugh Dickins for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { 5081da177e4SLinus Torvalds struct file *file; 5091da177e4SLinus Torvalds 5101da177e4SLinus Torvalds if (mpnt->vm_flags & VM_DONTCOPY) { 51184638335SKonstantin Khlebnikov vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt)); 5121da177e4SLinus Torvalds continue; 5131da177e4SLinus Torvalds } 5141da177e4SLinus Torvalds charge = 0; 515655c79bbSTetsuo Handa /* 516655c79bbSTetsuo Handa * Don't duplicate many vmas if we've been oom-killed (for 517655c79bbSTetsuo Handa * example) 518655c79bbSTetsuo Handa */ 519655c79bbSTetsuo Handa if (fatal_signal_pending(current)) { 520655c79bbSTetsuo Handa retval = -EINTR; 521655c79bbSTetsuo Handa goto out; 522655c79bbSTetsuo Handa } 5231da177e4SLinus Torvalds if (mpnt->vm_flags & VM_ACCOUNT) { 524b2412b7fSHuang Shijie unsigned long len = vma_pages(mpnt); 525b2412b7fSHuang Shijie 526191c5424SAl Viro if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ 5271da177e4SLinus Torvalds goto fail_nomem; 5281da177e4SLinus Torvalds charge = len; 5291da177e4SLinus Torvalds } 5303928d4f5SLinus Torvalds tmp = vm_area_dup(mpnt); 5311da177e4SLinus Torvalds if (!tmp) 5321da177e4SLinus Torvalds goto fail_nomem; 533ef0855d3SOleg Nesterov retval = vma_dup_policy(mpnt, tmp); 534ef0855d3SOleg Nesterov if (retval) 5351da177e4SLinus Torvalds goto fail_nomem_policy; 536a247c3a9SAndrea Arcangeli tmp->vm_mm = mm; 537893e26e6SPavel Emelyanov retval = dup_userfaultfd(tmp, &uf); 538893e26e6SPavel Emelyanov if (retval) 539893e26e6SPavel Emelyanov goto fail_nomem_anon_vma_fork; 540d2cd9edeSRik van Riel if (tmp->vm_flags & VM_WIPEONFORK) { 541d2cd9edeSRik van Riel /* VM_WIPEONFORK gets a clean slate in the child. */ 542d2cd9edeSRik van Riel tmp->anon_vma = NULL; 543d2cd9edeSRik van Riel if (anon_vma_prepare(tmp)) 544d2cd9edeSRik van Riel goto fail_nomem_anon_vma_fork; 545d2cd9edeSRik van Riel } else if (anon_vma_fork(tmp, mpnt)) 5465beb4930SRik van Riel goto fail_nomem_anon_vma_fork; 547893e26e6SPavel Emelyanov tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT); 548297c5eeeSLinus Torvalds tmp->vm_next = tmp->vm_prev = NULL; 5491da177e4SLinus Torvalds file = tmp->vm_file; 5501da177e4SLinus Torvalds if (file) { 551496ad9aaSAl Viro struct inode *inode = file_inode(file); 552b88ed205SHugh Dickins struct address_space *mapping = file->f_mapping; 553b88ed205SHugh Dickins 5541da177e4SLinus Torvalds get_file(file); 5551da177e4SLinus Torvalds if (tmp->vm_flags & VM_DENYWRITE) 5561da177e4SLinus Torvalds atomic_dec(&inode->i_writecount); 55783cde9e8SDavidlohr Bueso i_mmap_lock_write(mapping); 558b88ed205SHugh Dickins if (tmp->vm_flags & VM_SHARED) 5594bb5f5d9SDavid Herrmann atomic_inc(&mapping->i_mmap_writable); 560b88ed205SHugh Dickins flush_dcache_mmap_lock(mapping); 561b88ed205SHugh Dickins /* insert tmp into the share list, just after mpnt */ 5629826a516SMichel Lespinasse vma_interval_tree_insert_after(tmp, mpnt, 5639826a516SMichel Lespinasse &mapping->i_mmap); 564b88ed205SHugh Dickins flush_dcache_mmap_unlock(mapping); 56583cde9e8SDavidlohr Bueso i_mmap_unlock_write(mapping); 5661da177e4SLinus Torvalds } 5671da177e4SLinus Torvalds 5681da177e4SLinus Torvalds /* 569a1e78772SMel Gorman * Clear hugetlb-related page reserves for children. This only 570a1e78772SMel Gorman * affects MAP_PRIVATE mappings. Faults generated by the child 571a1e78772SMel Gorman * are not guaranteed to succeed, even if read-only 572a1e78772SMel Gorman */ 573a1e78772SMel Gorman if (is_vm_hugetlb_page(tmp)) 574a1e78772SMel Gorman reset_vma_resv_huge_pages(tmp); 575a1e78772SMel Gorman 576a1e78772SMel Gorman /* 5777ee78232SHugh Dickins * Link in the new vma and copy the page table entries. 5781da177e4SLinus Torvalds */ 5791da177e4SLinus Torvalds *pprev = tmp; 5801da177e4SLinus Torvalds pprev = &tmp->vm_next; 581297c5eeeSLinus Torvalds tmp->vm_prev = prev; 582297c5eeeSLinus Torvalds prev = tmp; 5831da177e4SLinus Torvalds 5841da177e4SLinus Torvalds __vma_link_rb(mm, tmp, rb_link, rb_parent); 5851da177e4SLinus Torvalds rb_link = &tmp->vm_rb.rb_right; 5861da177e4SLinus Torvalds rb_parent = &tmp->vm_rb; 5871da177e4SLinus Torvalds 5881da177e4SLinus Torvalds mm->map_count++; 589d2cd9edeSRik van Riel if (!(tmp->vm_flags & VM_WIPEONFORK)) 5900b0db14cSHugh Dickins retval = copy_page_range(mm, oldmm, mpnt); 5911da177e4SLinus Torvalds 5921da177e4SLinus Torvalds if (tmp->vm_ops && tmp->vm_ops->open) 5931da177e4SLinus Torvalds tmp->vm_ops->open(tmp); 5941da177e4SLinus Torvalds 5951da177e4SLinus Torvalds if (retval) 5961da177e4SLinus Torvalds goto out; 5971da177e4SLinus Torvalds } 598d6dd61c8SJeremy Fitzhardinge /* a new mm has just been created */ 5991ed0cc5aSNadav Amit retval = arch_dup_mmap(oldmm, mm); 6001da177e4SLinus Torvalds out: 6017ee78232SHugh Dickins up_write(&mm->mmap_sem); 602fd3e42fcSHugh Dickins flush_tlb_mm(oldmm); 6031da177e4SLinus Torvalds up_write(&oldmm->mmap_sem); 604893e26e6SPavel Emelyanov dup_userfaultfd_complete(&uf); 6057c051267SMichal Hocko fail_uprobe_end: 60632cdba1eSOleg Nesterov uprobe_end_dup_mmap(); 6071da177e4SLinus Torvalds return retval; 6085beb4930SRik van Riel fail_nomem_anon_vma_fork: 609ef0855d3SOleg Nesterov mpol_put(vma_policy(tmp)); 6101da177e4SLinus Torvalds fail_nomem_policy: 6113928d4f5SLinus Torvalds vm_area_free(tmp); 6121da177e4SLinus Torvalds fail_nomem: 6131da177e4SLinus Torvalds retval = -ENOMEM; 6141da177e4SLinus Torvalds vm_unacct_memory(charge); 6151da177e4SLinus Torvalds goto out; 6161da177e4SLinus Torvalds } 6171da177e4SLinus Torvalds 6181da177e4SLinus Torvalds static inline int mm_alloc_pgd(struct mm_struct *mm) 6191da177e4SLinus Torvalds { 6201da177e4SLinus Torvalds mm->pgd = pgd_alloc(mm); 6211da177e4SLinus Torvalds if (unlikely(!mm->pgd)) 6221da177e4SLinus Torvalds return -ENOMEM; 6231da177e4SLinus Torvalds return 0; 6241da177e4SLinus Torvalds } 6251da177e4SLinus Torvalds 6261da177e4SLinus Torvalds static inline void mm_free_pgd(struct mm_struct *mm) 6271da177e4SLinus Torvalds { 6285e541973SBenjamin Herrenschmidt pgd_free(mm, mm->pgd); 6291da177e4SLinus Torvalds } 6301da177e4SLinus Torvalds #else 63190f31d0eSKonstantin Khlebnikov static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 63290f31d0eSKonstantin Khlebnikov { 63390f31d0eSKonstantin Khlebnikov down_write(&oldmm->mmap_sem); 63490f31d0eSKonstantin Khlebnikov RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); 63590f31d0eSKonstantin Khlebnikov up_write(&oldmm->mmap_sem); 63690f31d0eSKonstantin Khlebnikov return 0; 63790f31d0eSKonstantin Khlebnikov } 6381da177e4SLinus Torvalds #define mm_alloc_pgd(mm) (0) 6391da177e4SLinus Torvalds #define mm_free_pgd(mm) 6401da177e4SLinus Torvalds #endif /* CONFIG_MMU */ 6411da177e4SLinus Torvalds 642d70f2a14SAndrew Morton static void check_mm(struct mm_struct *mm) 643d70f2a14SAndrew Morton { 644d70f2a14SAndrew Morton int i; 645d70f2a14SAndrew Morton 646d70f2a14SAndrew Morton for (i = 0; i < NR_MM_COUNTERS; i++) { 647d70f2a14SAndrew Morton long x = atomic_long_read(&mm->rss_stat.count[i]); 648d70f2a14SAndrew Morton 649d70f2a14SAndrew Morton if (unlikely(x)) 650d70f2a14SAndrew Morton printk(KERN_ALERT "BUG: Bad rss-counter state " 651d70f2a14SAndrew Morton "mm:%p idx:%d val:%ld\n", mm, i, x); 652d70f2a14SAndrew Morton } 653d70f2a14SAndrew Morton 654d70f2a14SAndrew Morton if (mm_pgtables_bytes(mm)) 655d70f2a14SAndrew Morton pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n", 656d70f2a14SAndrew Morton mm_pgtables_bytes(mm)); 657d70f2a14SAndrew Morton 658d70f2a14SAndrew Morton #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 659d70f2a14SAndrew Morton VM_BUG_ON_MM(mm->pmd_huge_pte, mm); 660d70f2a14SAndrew Morton #endif 661d70f2a14SAndrew Morton } 6621da177e4SLinus Torvalds 663e94b1766SChristoph Lameter #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) 6641da177e4SLinus Torvalds #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) 6651da177e4SLinus Torvalds 666d70f2a14SAndrew Morton /* 667d70f2a14SAndrew Morton * Called when the last reference to the mm 668d70f2a14SAndrew Morton * is dropped: either by a lazy thread or by 669d70f2a14SAndrew Morton * mmput. Free the page directory and the mm. 670d70f2a14SAndrew Morton */ 671d34bc48fSAndrew Morton void __mmdrop(struct mm_struct *mm) 672d70f2a14SAndrew Morton { 673d70f2a14SAndrew Morton BUG_ON(mm == &init_mm); 6743eda69c9SMark Rutland WARN_ON_ONCE(mm == current->mm); 6753eda69c9SMark Rutland WARN_ON_ONCE(mm == current->active_mm); 676d70f2a14SAndrew Morton mm_free_pgd(mm); 677d70f2a14SAndrew Morton destroy_context(mm); 678d70f2a14SAndrew Morton hmm_mm_destroy(mm); 679d70f2a14SAndrew Morton mmu_notifier_mm_destroy(mm); 680d70f2a14SAndrew Morton check_mm(mm); 681d70f2a14SAndrew Morton put_user_ns(mm->user_ns); 682d70f2a14SAndrew Morton free_mm(mm); 683d70f2a14SAndrew Morton } 684d34bc48fSAndrew Morton EXPORT_SYMBOL_GPL(__mmdrop); 685d70f2a14SAndrew Morton 686d70f2a14SAndrew Morton static void mmdrop_async_fn(struct work_struct *work) 687d70f2a14SAndrew Morton { 688d70f2a14SAndrew Morton struct mm_struct *mm; 689d70f2a14SAndrew Morton 690d70f2a14SAndrew Morton mm = container_of(work, struct mm_struct, async_put_work); 691d70f2a14SAndrew Morton __mmdrop(mm); 692d70f2a14SAndrew Morton } 693d70f2a14SAndrew Morton 694d70f2a14SAndrew Morton static void mmdrop_async(struct mm_struct *mm) 695d70f2a14SAndrew Morton { 696d70f2a14SAndrew Morton if (unlikely(atomic_dec_and_test(&mm->mm_count))) { 697d70f2a14SAndrew Morton INIT_WORK(&mm->async_put_work, mmdrop_async_fn); 698d70f2a14SAndrew Morton schedule_work(&mm->async_put_work); 699d70f2a14SAndrew Morton } 700d70f2a14SAndrew Morton } 701d70f2a14SAndrew Morton 7021da177e4SLinus Torvalds static inline void free_signal_struct(struct signal_struct *sig) 7031da177e4SLinus Torvalds { 7041da177e4SLinus Torvalds taskstats_tgid_free(sig); 7051da177e4SLinus Torvalds sched_autogroup_exit(sig); 7061da177e4SLinus Torvalds /* 7071da177e4SLinus Torvalds * __mmdrop is not safe to call from softirq context on x86 due to 7081da177e4SLinus Torvalds * pgd_dtor so postpone it to the async context 7091da177e4SLinus Torvalds */ 7101da177e4SLinus Torvalds if (sig->oom_mm) 7111da177e4SLinus Torvalds mmdrop_async(sig->oom_mm); 7121da177e4SLinus Torvalds kmem_cache_free(signal_cachep, sig); 7131da177e4SLinus Torvalds } 7141da177e4SLinus Torvalds 7151da177e4SLinus Torvalds static inline void put_signal_struct(struct signal_struct *sig) 7161da177e4SLinus Torvalds { 7171da177e4SLinus Torvalds if (atomic_dec_and_test(&sig->sigcnt)) 7181da177e4SLinus Torvalds free_signal_struct(sig); 7191da177e4SLinus Torvalds } 7201da177e4SLinus Torvalds 7211da177e4SLinus Torvalds void __put_task_struct(struct task_struct *tsk) 7221da177e4SLinus Torvalds { 7231da177e4SLinus Torvalds WARN_ON(!tsk->exit_state); 7241da177e4SLinus Torvalds WARN_ON(atomic_read(&tsk->usage)); 7251da177e4SLinus Torvalds WARN_ON(tsk == current); 7261da177e4SLinus Torvalds 7271da177e4SLinus Torvalds cgroup_free(tsk); 7281da177e4SLinus Torvalds task_numa_free(tsk); 7291da177e4SLinus Torvalds security_task_free(tsk); 7301da177e4SLinus Torvalds exit_creds(tsk); 7311da177e4SLinus Torvalds delayacct_tsk_free(tsk); 7321da177e4SLinus Torvalds put_signal_struct(tsk->signal); 7331da177e4SLinus Torvalds 7341da177e4SLinus Torvalds if (!profile_handoff_task(tsk)) 7351da177e4SLinus Torvalds free_task(tsk); 7361da177e4SLinus Torvalds } 7371da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(__put_task_struct); 7381da177e4SLinus Torvalds 7391da177e4SLinus Torvalds void __init __weak arch_task_cache_init(void) { } 7401da177e4SLinus Torvalds 7411da177e4SLinus Torvalds /* 7421da177e4SLinus Torvalds * set_max_threads 7431da177e4SLinus Torvalds */ 7441da177e4SLinus Torvalds static void set_max_threads(unsigned int max_threads_suggested) 7451da177e4SLinus Torvalds { 7461da177e4SLinus Torvalds u64 threads; 747*3d6357deSArun KS unsigned long nr_pages = totalram_pages; 7481da177e4SLinus Torvalds 7491da177e4SLinus Torvalds /* 7501da177e4SLinus Torvalds * The number of threads shall be limited such that the thread 7511da177e4SLinus Torvalds * structures may only consume a small part of the available memory. 7521da177e4SLinus Torvalds */ 753*3d6357deSArun KS if (fls64(nr_pages) + fls64(PAGE_SIZE) > 64) 7541da177e4SLinus Torvalds threads = MAX_THREADS; 7551da177e4SLinus Torvalds else 756*3d6357deSArun KS threads = div64_u64((u64) nr_pages * (u64) PAGE_SIZE, 7571da177e4SLinus Torvalds (u64) THREAD_SIZE * 8UL); 7581da177e4SLinus Torvalds 7591da177e4SLinus Torvalds if (threads > max_threads_suggested) 7601da177e4SLinus Torvalds threads = max_threads_suggested; 7611da177e4SLinus Torvalds 7621da177e4SLinus Torvalds max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); 7631da177e4SLinus Torvalds } 7641da177e4SLinus Torvalds 7651da177e4SLinus Torvalds #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT 7661da177e4SLinus Torvalds /* Initialized by the architecture: */ 7671da177e4SLinus Torvalds int arch_task_struct_size __read_mostly; 7681da177e4SLinus Torvalds #endif 7691da177e4SLinus Torvalds 7705905429aSKees Cook static void task_struct_whitelist(unsigned long *offset, unsigned long *size) 7715905429aSKees Cook { 7725905429aSKees Cook /* Fetch thread_struct whitelist for the architecture. */ 7735905429aSKees Cook arch_thread_struct_whitelist(offset, size); 7745905429aSKees Cook 7755905429aSKees Cook /* 7765905429aSKees Cook * Handle zero-sized whitelist or empty thread_struct, otherwise 7775905429aSKees Cook * adjust offset to position of thread_struct in task_struct. 7785905429aSKees Cook */ 7795905429aSKees Cook if (unlikely(*size == 0)) 7805905429aSKees Cook *offset = 0; 7815905429aSKees Cook else 7825905429aSKees Cook *offset += offsetof(struct task_struct, thread); 7835905429aSKees Cook } 7845905429aSKees Cook 7851da177e4SLinus Torvalds void __init fork_init(void) 7861da177e4SLinus Torvalds { 7871da177e4SLinus Torvalds int i; 7881da177e4SLinus Torvalds #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 7891da177e4SLinus Torvalds #ifndef ARCH_MIN_TASKALIGN 7901da177e4SLinus Torvalds #define ARCH_MIN_TASKALIGN 0 7911da177e4SLinus Torvalds #endif 7921da177e4SLinus Torvalds int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN); 7935905429aSKees Cook unsigned long useroffset, usersize; 7941da177e4SLinus Torvalds 7951da177e4SLinus Torvalds /* create a slab on which task_structs can be allocated */ 7965905429aSKees Cook task_struct_whitelist(&useroffset, &usersize); 7975905429aSKees Cook task_struct_cachep = kmem_cache_create_usercopy("task_struct", 7981da177e4SLinus Torvalds arch_task_struct_size, align, 7995905429aSKees Cook SLAB_PANIC|SLAB_ACCOUNT, 8005905429aSKees Cook useroffset, usersize, NULL); 8011da177e4SLinus Torvalds #endif 8021da177e4SLinus Torvalds 8031da177e4SLinus Torvalds /* do the arch specific task caches init */ 8041da177e4SLinus Torvalds arch_task_cache_init(); 8051da177e4SLinus Torvalds 8061da177e4SLinus Torvalds set_max_threads(MAX_THREADS); 8071da177e4SLinus Torvalds 8081da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; 8091da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; 8101da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_SIGPENDING] = 8111da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_NPROC]; 8121da177e4SLinus Torvalds 8131da177e4SLinus Torvalds for (i = 0; i < UCOUNT_COUNTS; i++) { 8141da177e4SLinus Torvalds init_user_ns.ucount_max[i] = max_threads/2; 8151da177e4SLinus Torvalds } 8161da177e4SLinus Torvalds 8171da177e4SLinus Torvalds #ifdef CONFIG_VMAP_STACK 8181da177e4SLinus Torvalds cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache", 8191da177e4SLinus Torvalds NULL, free_vm_stack_cache); 8201da177e4SLinus Torvalds #endif 8211da177e4SLinus Torvalds 8221da177e4SLinus Torvalds lockdep_init_task(&init_task); 8231da177e4SLinus Torvalds } 8241da177e4SLinus Torvalds 8251da177e4SLinus Torvalds int __weak arch_dup_task_struct(struct task_struct *dst, 8261da177e4SLinus Torvalds struct task_struct *src) 8271da177e4SLinus Torvalds { 8281da177e4SLinus Torvalds *dst = *src; 8291da177e4SLinus Torvalds return 0; 8301da177e4SLinus Torvalds } 8311da177e4SLinus Torvalds 8321da177e4SLinus Torvalds void set_task_stack_end_magic(struct task_struct *tsk) 8331da177e4SLinus Torvalds { 8341da177e4SLinus Torvalds unsigned long *stackend; 8351da177e4SLinus Torvalds 8361da177e4SLinus Torvalds stackend = end_of_stack(tsk); 8371da177e4SLinus Torvalds *stackend = STACK_END_MAGIC; /* for overflow detection */ 8381da177e4SLinus Torvalds } 8391da177e4SLinus Torvalds 8401da177e4SLinus Torvalds static struct task_struct *dup_task_struct(struct task_struct *orig, int node) 8411da177e4SLinus Torvalds { 8421da177e4SLinus Torvalds struct task_struct *tsk; 8431da177e4SLinus Torvalds unsigned long *stack; 8441da177e4SLinus Torvalds struct vm_struct *stack_vm_area; 8451da177e4SLinus Torvalds int err; 8461da177e4SLinus Torvalds 8471da177e4SLinus Torvalds if (node == NUMA_NO_NODE) 8481da177e4SLinus Torvalds node = tsk_fork_get_node(orig); 8491da177e4SLinus Torvalds tsk = alloc_task_struct_node(node); 8501da177e4SLinus Torvalds if (!tsk) 8511da177e4SLinus Torvalds return NULL; 8521da177e4SLinus Torvalds 8531da177e4SLinus Torvalds stack = alloc_thread_stack_node(tsk, node); 8541da177e4SLinus Torvalds if (!stack) 8551da177e4SLinus Torvalds goto free_tsk; 8561da177e4SLinus Torvalds 8579b6f7e16SRoman Gushchin if (memcg_charge_kernel_stack(tsk)) 8589b6f7e16SRoman Gushchin goto free_stack; 8599b6f7e16SRoman Gushchin 8601da177e4SLinus Torvalds stack_vm_area = task_stack_vm_area(tsk); 8611da177e4SLinus Torvalds 8621da177e4SLinus Torvalds err = arch_dup_task_struct(tsk, orig); 8631da177e4SLinus Torvalds 8641da177e4SLinus Torvalds /* 8651da177e4SLinus Torvalds * arch_dup_task_struct() clobbers the stack-related fields. Make 8661da177e4SLinus Torvalds * sure they're properly initialized before using any stack-related 8671da177e4SLinus Torvalds * functions again. 8681da177e4SLinus Torvalds */ 8691da177e4SLinus Torvalds tsk->stack = stack; 8701da177e4SLinus Torvalds #ifdef CONFIG_VMAP_STACK 8711da177e4SLinus Torvalds tsk->stack_vm_area = stack_vm_area; 8721da177e4SLinus Torvalds #endif 8731da177e4SLinus Torvalds #ifdef CONFIG_THREAD_INFO_IN_TASK 8741da177e4SLinus Torvalds atomic_set(&tsk->stack_refcount, 1); 8751da177e4SLinus Torvalds #endif 8761da177e4SLinus Torvalds 8771da177e4SLinus Torvalds if (err) 8781da177e4SLinus Torvalds goto free_stack; 8791da177e4SLinus Torvalds 8801da177e4SLinus Torvalds #ifdef CONFIG_SECCOMP 8811da177e4SLinus Torvalds /* 8821da177e4SLinus Torvalds * We must handle setting up seccomp filters once we're under 8831da177e4SLinus Torvalds * the sighand lock in case orig has changed between now and 8841da177e4SLinus Torvalds * then. Until then, filter must be NULL to avoid messing up 8851da177e4SLinus Torvalds * the usage counts on the error path calling free_task. 8861da177e4SLinus Torvalds */ 8871da177e4SLinus Torvalds tsk->seccomp.filter = NULL; 8881da177e4SLinus Torvalds #endif 8891da177e4SLinus Torvalds 8901da177e4SLinus Torvalds setup_thread_stack(tsk, orig); 8911da177e4SLinus Torvalds clear_user_return_notifier(tsk); 8921da177e4SLinus Torvalds clear_tsk_need_resched(tsk); 8931da177e4SLinus Torvalds set_task_stack_end_magic(tsk); 8941da177e4SLinus Torvalds 895050e9baaSLinus Torvalds #ifdef CONFIG_STACKPROTECTOR 8961da177e4SLinus Torvalds tsk->stack_canary = get_random_canary(); 8971da177e4SLinus Torvalds #endif 8981da177e4SLinus Torvalds 8991da177e4SLinus Torvalds /* 9001da177e4SLinus Torvalds * One for us, one for whoever does the "release_task()" (usually 9011da177e4SLinus Torvalds * parent) 9021da177e4SLinus Torvalds */ 9031da177e4SLinus Torvalds atomic_set(&tsk->usage, 2); 9041da177e4SLinus Torvalds #ifdef CONFIG_BLK_DEV_IO_TRACE 9051da177e4SLinus Torvalds tsk->btrace_seq = 0; 9061da177e4SLinus Torvalds #endif 9071da177e4SLinus Torvalds tsk->splice_pipe = NULL; 9081da177e4SLinus Torvalds tsk->task_frag.page = NULL; 9091da177e4SLinus Torvalds tsk->wake_q.next = NULL; 9101da177e4SLinus Torvalds 9111da177e4SLinus Torvalds account_kernel_stack(tsk, 1); 9121da177e4SLinus Torvalds 9131da177e4SLinus Torvalds kcov_task_init(tsk); 9141da177e4SLinus Torvalds 9151da177e4SLinus Torvalds #ifdef CONFIG_FAULT_INJECTION 9161da177e4SLinus Torvalds tsk->fail_nth = 0; 9171da177e4SLinus Torvalds #endif 9181da177e4SLinus Torvalds 9192c323017SJosef Bacik #ifdef CONFIG_BLK_CGROUP 9202c323017SJosef Bacik tsk->throttle_queue = NULL; 9212c323017SJosef Bacik tsk->use_memdelay = 0; 9222c323017SJosef Bacik #endif 9232c323017SJosef Bacik 924d46eb14bSShakeel Butt #ifdef CONFIG_MEMCG 925d46eb14bSShakeel Butt tsk->active_memcg = NULL; 926d46eb14bSShakeel Butt #endif 9271da177e4SLinus Torvalds return tsk; 9281da177e4SLinus Torvalds 9291da177e4SLinus Torvalds free_stack: 9301da177e4SLinus Torvalds free_thread_stack(tsk); 9311da177e4SLinus Torvalds free_tsk: 9321da177e4SLinus Torvalds free_task_struct(tsk); 9331da177e4SLinus Torvalds return NULL; 9341da177e4SLinus Torvalds } 9351da177e4SLinus Torvalds 9361da177e4SLinus Torvalds __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); 9371da177e4SLinus Torvalds 9384cb0e11bSHidehiro Kawai static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; 9394cb0e11bSHidehiro Kawai 9404cb0e11bSHidehiro Kawai static int __init coredump_filter_setup(char *s) 9414cb0e11bSHidehiro Kawai { 9424cb0e11bSHidehiro Kawai default_dump_filter = 9434cb0e11bSHidehiro Kawai (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & 9444cb0e11bSHidehiro Kawai MMF_DUMP_FILTER_MASK; 9454cb0e11bSHidehiro Kawai return 1; 9464cb0e11bSHidehiro Kawai } 9474cb0e11bSHidehiro Kawai 9484cb0e11bSHidehiro Kawai __setup("coredump_filter=", coredump_filter_setup); 9494cb0e11bSHidehiro Kawai 9501da177e4SLinus Torvalds #include <linux/init_task.h> 9511da177e4SLinus Torvalds 952858f0993SAlexey Dobriyan static void mm_init_aio(struct mm_struct *mm) 953858f0993SAlexey Dobriyan { 954858f0993SAlexey Dobriyan #ifdef CONFIG_AIO 955858f0993SAlexey Dobriyan spin_lock_init(&mm->ioctx_lock); 956db446a08SBenjamin LaHaise mm->ioctx_table = NULL; 957858f0993SAlexey Dobriyan #endif 958858f0993SAlexey Dobriyan } 959858f0993SAlexey Dobriyan 96033144e84SVladimir Davydov static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) 96133144e84SVladimir Davydov { 96233144e84SVladimir Davydov #ifdef CONFIG_MEMCG 96333144e84SVladimir Davydov mm->owner = p; 96433144e84SVladimir Davydov #endif 96533144e84SVladimir Davydov } 96633144e84SVladimir Davydov 967355627f5SEric Biggers static void mm_init_uprobes_state(struct mm_struct *mm) 968355627f5SEric Biggers { 969355627f5SEric Biggers #ifdef CONFIG_UPROBES 970355627f5SEric Biggers mm->uprobes_state.xol_area = NULL; 971355627f5SEric Biggers #endif 972355627f5SEric Biggers } 973355627f5SEric Biggers 974bfedb589SEric W. Biederman static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, 975bfedb589SEric W. Biederman struct user_namespace *user_ns) 9761da177e4SLinus Torvalds { 97741f727fdSVladimir Davydov mm->mmap = NULL; 97841f727fdSVladimir Davydov mm->mm_rb = RB_ROOT; 97941f727fdSVladimir Davydov mm->vmacache_seqnum = 0; 9801da177e4SLinus Torvalds atomic_set(&mm->mm_users, 1); 9811da177e4SLinus Torvalds atomic_set(&mm->mm_count, 1); 9821da177e4SLinus Torvalds init_rwsem(&mm->mmap_sem); 9831da177e4SLinus Torvalds INIT_LIST_HEAD(&mm->mmlist); 984999d9fc1SOleg Nesterov mm->core_state = NULL; 985af5b0f6aSKirill A. Shutemov mm_pgtables_bytes_init(mm); 98641f727fdSVladimir Davydov mm->map_count = 0; 98741f727fdSVladimir Davydov mm->locked_vm = 0; 988ce65cefaSVladimir Davydov mm->pinned_vm = 0; 989d559db08SKAMEZAWA Hiroyuki memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); 9901da177e4SLinus Torvalds spin_lock_init(&mm->page_table_lock); 99188aa7cc6SYang Shi spin_lock_init(&mm->arg_lock); 99241f727fdSVladimir Davydov mm_init_cpumask(mm); 993858f0993SAlexey Dobriyan mm_init_aio(mm); 994cf475ad2SBalbir Singh mm_init_owner(mm, p); 9952b7e8665SEric Biggers RCU_INIT_POINTER(mm->exe_file, NULL); 99641f727fdSVladimir Davydov mmu_notifier_mm_init(mm); 997133ff0eaSJérôme Glisse hmm_mm_init(mm); 99816af97dcSNadav Amit init_tlb_flush_pending(mm); 99941f727fdSVladimir Davydov #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 100041f727fdSVladimir Davydov mm->pmd_huge_pte = NULL; 100141f727fdSVladimir Davydov #endif 1002355627f5SEric Biggers mm_init_uprobes_state(mm); 10031da177e4SLinus Torvalds 1004a0715cc2SAlex Thorlton if (current->mm) { 1005a0715cc2SAlex Thorlton mm->flags = current->mm->flags & MMF_INIT_MASK; 1006a0715cc2SAlex Thorlton mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; 1007a0715cc2SAlex Thorlton } else { 1008a0715cc2SAlex Thorlton mm->flags = default_dump_filter; 10091da177e4SLinus Torvalds mm->def_flags = 0; 1010a0715cc2SAlex Thorlton } 1011a0715cc2SAlex Thorlton 101241f727fdSVladimir Davydov if (mm_alloc_pgd(mm)) 101341f727fdSVladimir Davydov goto fail_nopgd; 101478fb7466SPavel Emelianov 101541f727fdSVladimir Davydov if (init_new_context(p, mm)) 101641f727fdSVladimir Davydov goto fail_nocontext; 101741f727fdSVladimir Davydov 1018bfedb589SEric W. Biederman mm->user_ns = get_user_ns(user_ns); 101941f727fdSVladimir Davydov return mm; 102041f727fdSVladimir Davydov 102141f727fdSVladimir Davydov fail_nocontext: 102241f727fdSVladimir Davydov mm_free_pgd(mm); 102341f727fdSVladimir Davydov fail_nopgd: 10241da177e4SLinus Torvalds free_mm(mm); 10251da177e4SLinus Torvalds return NULL; 10261da177e4SLinus Torvalds } 10271da177e4SLinus Torvalds 10281da177e4SLinus Torvalds /* 10291da177e4SLinus Torvalds * Allocate and initialize an mm_struct. 10301da177e4SLinus Torvalds */ 10311da177e4SLinus Torvalds struct mm_struct *mm_alloc(void) 10321da177e4SLinus Torvalds { 10331da177e4SLinus Torvalds struct mm_struct *mm; 10341da177e4SLinus Torvalds 10351da177e4SLinus Torvalds mm = allocate_mm(); 1036de03c72cSKOSAKI Motohiro if (!mm) 1037de03c72cSKOSAKI Motohiro return NULL; 1038de03c72cSKOSAKI Motohiro 10391da177e4SLinus Torvalds memset(mm, 0, sizeof(*mm)); 1040bfedb589SEric W. Biederman return mm_init(mm, current, current_user_ns()); 10411da177e4SLinus Torvalds } 10421da177e4SLinus Torvalds 1043ec8d7c14SMichal Hocko static inline void __mmput(struct mm_struct *mm) 10441da177e4SLinus Torvalds { 1045ec8d7c14SMichal Hocko VM_BUG_ON(atomic_read(&mm->mm_users)); 10460ae26f1bSAndrew Morton 1047d4b3b638SSrikar Dronamraju uprobe_clear_state(mm); 10481da177e4SLinus Torvalds exit_aio(mm); 10491c2fb7a4SAndrea Arcangeli ksm_exit(mm); 1050ba76149fSAndrea Arcangeli khugepaged_exit(mm); /* must run before exit_mmap */ 10511da177e4SLinus Torvalds exit_mmap(mm); 10526fcb52a5SAaron Lu mm_put_huge_zero_page(mm); 1053925d1c40SMatt Helsley set_mm_exe_file(mm, NULL); 10541da177e4SLinus Torvalds if (!list_empty(&mm->mmlist)) { 10551da177e4SLinus Torvalds spin_lock(&mmlist_lock); 10561da177e4SLinus Torvalds list_del(&mm->mmlist); 10571da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 10581da177e4SLinus Torvalds } 1059801460d0SHiroshi Shimamoto if (mm->binfmt) 1060801460d0SHiroshi Shimamoto module_put(mm->binfmt->module); 10611da177e4SLinus Torvalds mmdrop(mm); 10621da177e4SLinus Torvalds } 1063ec8d7c14SMichal Hocko 1064ec8d7c14SMichal Hocko /* 1065ec8d7c14SMichal Hocko * Decrement the use count and release all resources for an mm. 1066ec8d7c14SMichal Hocko */ 1067ec8d7c14SMichal Hocko void mmput(struct mm_struct *mm) 1068ec8d7c14SMichal Hocko { 1069ec8d7c14SMichal Hocko might_sleep(); 1070ec8d7c14SMichal Hocko 1071ec8d7c14SMichal Hocko if (atomic_dec_and_test(&mm->mm_users)) 1072ec8d7c14SMichal Hocko __mmput(mm); 10731da177e4SLinus Torvalds } 10741da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(mmput); 10751da177e4SLinus Torvalds 1076a1b2289cSSherry Yang #ifdef CONFIG_MMU 1077a1b2289cSSherry Yang static void mmput_async_fn(struct work_struct *work) 1078a1b2289cSSherry Yang { 1079a1b2289cSSherry Yang struct mm_struct *mm = container_of(work, struct mm_struct, 1080a1b2289cSSherry Yang async_put_work); 1081a1b2289cSSherry Yang 1082a1b2289cSSherry Yang __mmput(mm); 1083a1b2289cSSherry Yang } 1084a1b2289cSSherry Yang 1085a1b2289cSSherry Yang void mmput_async(struct mm_struct *mm) 1086a1b2289cSSherry Yang { 1087a1b2289cSSherry Yang if (atomic_dec_and_test(&mm->mm_users)) { 1088a1b2289cSSherry Yang INIT_WORK(&mm->async_put_work, mmput_async_fn); 1089a1b2289cSSherry Yang schedule_work(&mm->async_put_work); 1090a1b2289cSSherry Yang } 1091a1b2289cSSherry Yang } 1092a1b2289cSSherry Yang #endif 1093a1b2289cSSherry Yang 109490f31d0eSKonstantin Khlebnikov /** 109590f31d0eSKonstantin Khlebnikov * set_mm_exe_file - change a reference to the mm's executable file 109690f31d0eSKonstantin Khlebnikov * 109790f31d0eSKonstantin Khlebnikov * This changes mm's executable file (shown as symlink /proc/[pid]/exe). 109890f31d0eSKonstantin Khlebnikov * 10996e399cd1SDavidlohr Bueso * Main users are mmput() and sys_execve(). Callers prevent concurrent 11006e399cd1SDavidlohr Bueso * invocations: in mmput() nobody alive left, in execve task is single 11016e399cd1SDavidlohr Bueso * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the 11026e399cd1SDavidlohr Bueso * mm->exe_file, but does so without using set_mm_exe_file() in order 11036e399cd1SDavidlohr Bueso * to do avoid the need for any locks. 110490f31d0eSKonstantin Khlebnikov */ 110538646013SJiri Slaby void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) 110638646013SJiri Slaby { 11076e399cd1SDavidlohr Bueso struct file *old_exe_file; 11086e399cd1SDavidlohr Bueso 11096e399cd1SDavidlohr Bueso /* 11106e399cd1SDavidlohr Bueso * It is safe to dereference the exe_file without RCU as 11116e399cd1SDavidlohr Bueso * this function is only called if nobody else can access 11126e399cd1SDavidlohr Bueso * this mm -- see comment above for justification. 11136e399cd1SDavidlohr Bueso */ 11146e399cd1SDavidlohr Bueso old_exe_file = rcu_dereference_raw(mm->exe_file); 111590f31d0eSKonstantin Khlebnikov 111638646013SJiri Slaby if (new_exe_file) 111738646013SJiri Slaby get_file(new_exe_file); 111890f31d0eSKonstantin Khlebnikov rcu_assign_pointer(mm->exe_file, new_exe_file); 111990f31d0eSKonstantin Khlebnikov if (old_exe_file) 112090f31d0eSKonstantin Khlebnikov fput(old_exe_file); 112138646013SJiri Slaby } 112238646013SJiri Slaby 112390f31d0eSKonstantin Khlebnikov /** 112490f31d0eSKonstantin Khlebnikov * get_mm_exe_file - acquire a reference to the mm's executable file 112590f31d0eSKonstantin Khlebnikov * 112690f31d0eSKonstantin Khlebnikov * Returns %NULL if mm has no associated executable file. 112790f31d0eSKonstantin Khlebnikov * User must release file via fput(). 112890f31d0eSKonstantin Khlebnikov */ 112938646013SJiri Slaby struct file *get_mm_exe_file(struct mm_struct *mm) 113038646013SJiri Slaby { 113138646013SJiri Slaby struct file *exe_file; 113238646013SJiri Slaby 113390f31d0eSKonstantin Khlebnikov rcu_read_lock(); 113490f31d0eSKonstantin Khlebnikov exe_file = rcu_dereference(mm->exe_file); 113590f31d0eSKonstantin Khlebnikov if (exe_file && !get_file_rcu(exe_file)) 113690f31d0eSKonstantin Khlebnikov exe_file = NULL; 113790f31d0eSKonstantin Khlebnikov rcu_read_unlock(); 113838646013SJiri Slaby return exe_file; 113938646013SJiri Slaby } 114011163348SDavidlohr Bueso EXPORT_SYMBOL(get_mm_exe_file); 114138646013SJiri Slaby 11421da177e4SLinus Torvalds /** 1143cd81a917SMateusz Guzik * get_task_exe_file - acquire a reference to the task's executable file 1144cd81a917SMateusz Guzik * 1145cd81a917SMateusz Guzik * Returns %NULL if task's mm (if any) has no associated executable file or 1146cd81a917SMateusz Guzik * this is a kernel thread with borrowed mm (see the comment above get_task_mm). 1147cd81a917SMateusz Guzik * User must release file via fput(). 1148cd81a917SMateusz Guzik */ 1149cd81a917SMateusz Guzik struct file *get_task_exe_file(struct task_struct *task) 1150cd81a917SMateusz Guzik { 1151cd81a917SMateusz Guzik struct file *exe_file = NULL; 1152cd81a917SMateusz Guzik struct mm_struct *mm; 1153cd81a917SMateusz Guzik 1154cd81a917SMateusz Guzik task_lock(task); 1155cd81a917SMateusz Guzik mm = task->mm; 1156cd81a917SMateusz Guzik if (mm) { 1157cd81a917SMateusz Guzik if (!(task->flags & PF_KTHREAD)) 1158cd81a917SMateusz Guzik exe_file = get_mm_exe_file(mm); 1159cd81a917SMateusz Guzik } 1160cd81a917SMateusz Guzik task_unlock(task); 1161cd81a917SMateusz Guzik return exe_file; 1162cd81a917SMateusz Guzik } 1163cd81a917SMateusz Guzik EXPORT_SYMBOL(get_task_exe_file); 1164cd81a917SMateusz Guzik 1165cd81a917SMateusz Guzik /** 11661da177e4SLinus Torvalds * get_task_mm - acquire a reference to the task's mm 11671da177e4SLinus Torvalds * 1168246bb0b1SOleg Nesterov * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning 11691da177e4SLinus Torvalds * this kernel workthread has transiently adopted a user mm with use_mm, 11701da177e4SLinus Torvalds * to do its AIO) is not set and if so returns a reference to it, after 11711da177e4SLinus Torvalds * bumping up the use count. User must release the mm via mmput() 11721da177e4SLinus Torvalds * after use. Typically used by /proc and ptrace. 11731da177e4SLinus Torvalds */ 11741da177e4SLinus Torvalds struct mm_struct *get_task_mm(struct task_struct *task) 11751da177e4SLinus Torvalds { 11761da177e4SLinus Torvalds struct mm_struct *mm; 11771da177e4SLinus Torvalds 11781da177e4SLinus Torvalds task_lock(task); 11791da177e4SLinus Torvalds mm = task->mm; 11801da177e4SLinus Torvalds if (mm) { 1181246bb0b1SOleg Nesterov if (task->flags & PF_KTHREAD) 11821da177e4SLinus Torvalds mm = NULL; 11831da177e4SLinus Torvalds else 11843fce371bSVegard Nossum mmget(mm); 11851da177e4SLinus Torvalds } 11861da177e4SLinus Torvalds task_unlock(task); 11871da177e4SLinus Torvalds return mm; 11881da177e4SLinus Torvalds } 11891da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(get_task_mm); 11901da177e4SLinus Torvalds 11918cdb878dSChristopher Yeoh struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) 11928cdb878dSChristopher Yeoh { 11938cdb878dSChristopher Yeoh struct mm_struct *mm; 11948cdb878dSChristopher Yeoh int err; 11958cdb878dSChristopher Yeoh 11968cdb878dSChristopher Yeoh err = mutex_lock_killable(&task->signal->cred_guard_mutex); 11978cdb878dSChristopher Yeoh if (err) 11988cdb878dSChristopher Yeoh return ERR_PTR(err); 11998cdb878dSChristopher Yeoh 12008cdb878dSChristopher Yeoh mm = get_task_mm(task); 12018cdb878dSChristopher Yeoh if (mm && mm != current->mm && 12028cdb878dSChristopher Yeoh !ptrace_may_access(task, mode)) { 12038cdb878dSChristopher Yeoh mmput(mm); 12048cdb878dSChristopher Yeoh mm = ERR_PTR(-EACCES); 12058cdb878dSChristopher Yeoh } 12068cdb878dSChristopher Yeoh mutex_unlock(&task->signal->cred_guard_mutex); 12078cdb878dSChristopher Yeoh 12088cdb878dSChristopher Yeoh return mm; 12098cdb878dSChristopher Yeoh } 12108cdb878dSChristopher Yeoh 121157b59c4aSOleg Nesterov static void complete_vfork_done(struct task_struct *tsk) 1212c415c3b4SOleg Nesterov { 1213d68b46feSOleg Nesterov struct completion *vfork; 1214c415c3b4SOleg Nesterov 1215d68b46feSOleg Nesterov task_lock(tsk); 1216d68b46feSOleg Nesterov vfork = tsk->vfork_done; 1217d68b46feSOleg Nesterov if (likely(vfork)) { 1218c415c3b4SOleg Nesterov tsk->vfork_done = NULL; 1219d68b46feSOleg Nesterov complete(vfork); 1220d68b46feSOleg Nesterov } 1221d68b46feSOleg Nesterov task_unlock(tsk); 1222d68b46feSOleg Nesterov } 1223d68b46feSOleg Nesterov 1224d68b46feSOleg Nesterov static int wait_for_vfork_done(struct task_struct *child, 1225d68b46feSOleg Nesterov struct completion *vfork) 1226d68b46feSOleg Nesterov { 1227d68b46feSOleg Nesterov int killed; 1228d68b46feSOleg Nesterov 1229d68b46feSOleg Nesterov freezer_do_not_count(); 1230d68b46feSOleg Nesterov killed = wait_for_completion_killable(vfork); 1231d68b46feSOleg Nesterov freezer_count(); 1232d68b46feSOleg Nesterov 1233d68b46feSOleg Nesterov if (killed) { 1234d68b46feSOleg Nesterov task_lock(child); 1235d68b46feSOleg Nesterov child->vfork_done = NULL; 1236d68b46feSOleg Nesterov task_unlock(child); 1237d68b46feSOleg Nesterov } 1238d68b46feSOleg Nesterov 1239d68b46feSOleg Nesterov put_task_struct(child); 1240d68b46feSOleg Nesterov return killed; 1241c415c3b4SOleg Nesterov } 1242c415c3b4SOleg Nesterov 12431da177e4SLinus Torvalds /* Please note the differences between mmput and mm_release. 12441da177e4SLinus Torvalds * mmput is called whenever we stop holding onto a mm_struct, 12451da177e4SLinus Torvalds * error success whatever. 12461da177e4SLinus Torvalds * 12471da177e4SLinus Torvalds * mm_release is called after a mm_struct has been removed 12481da177e4SLinus Torvalds * from the current process. 12491da177e4SLinus Torvalds * 12501da177e4SLinus Torvalds * This difference is important for error handling, when we 12511da177e4SLinus Torvalds * only half set up a mm_struct for a new process and need to restore 12521da177e4SLinus Torvalds * the old one. Because we mmput the new mm_struct before 12531da177e4SLinus Torvalds * restoring the old one. . . 12541da177e4SLinus Torvalds * Eric Biederman 10 January 1998 12551da177e4SLinus Torvalds */ 12561da177e4SLinus Torvalds void mm_release(struct task_struct *tsk, struct mm_struct *mm) 12571da177e4SLinus Torvalds { 12588141c7f3SLinus Torvalds /* Get rid of any futexes when releasing the mm */ 12598141c7f3SLinus Torvalds #ifdef CONFIG_FUTEX 1260fc6b177dSPeter Zijlstra if (unlikely(tsk->robust_list)) { 12618141c7f3SLinus Torvalds exit_robust_list(tsk); 1262fc6b177dSPeter Zijlstra tsk->robust_list = NULL; 1263fc6b177dSPeter Zijlstra } 12648141c7f3SLinus Torvalds #ifdef CONFIG_COMPAT 1265fc6b177dSPeter Zijlstra if (unlikely(tsk->compat_robust_list)) { 12668141c7f3SLinus Torvalds compat_exit_robust_list(tsk); 1267fc6b177dSPeter Zijlstra tsk->compat_robust_list = NULL; 1268fc6b177dSPeter Zijlstra } 12698141c7f3SLinus Torvalds #endif 1270322a2c10SThomas Gleixner if (unlikely(!list_empty(&tsk->pi_state_list))) 1271322a2c10SThomas Gleixner exit_pi_state_list(tsk); 12728141c7f3SLinus Torvalds #endif 12738141c7f3SLinus Torvalds 12740326f5a9SSrikar Dronamraju uprobe_free_utask(tsk); 12750326f5a9SSrikar Dronamraju 12761da177e4SLinus Torvalds /* Get rid of any cached register state */ 12771da177e4SLinus Torvalds deactivate_mm(tsk, mm); 12781da177e4SLinus Torvalds 1279fec1d011SRoland McGrath /* 1280735f2770SMichal Hocko * Signal userspace if we're not exiting with a core dump 1281735f2770SMichal Hocko * because we want to leave the value intact for debugging 1282735f2770SMichal Hocko * purposes. 1283fec1d011SRoland McGrath */ 12849c8a8228SEric Dumazet if (tsk->clear_child_tid) { 1285735f2770SMichal Hocko if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) && 12869c8a8228SEric Dumazet atomic_read(&mm->mm_users) > 1) { 12871da177e4SLinus Torvalds /* 12881da177e4SLinus Torvalds * We don't check the error code - if userspace has 12891da177e4SLinus Torvalds * not set up a proper pointer then tough luck. 12901da177e4SLinus Torvalds */ 12919c8a8228SEric Dumazet put_user(0, tsk->clear_child_tid); 12922de0db99SDominik Brodowski do_futex(tsk->clear_child_tid, FUTEX_WAKE, 12932de0db99SDominik Brodowski 1, NULL, NULL, 0, 0); 12949c8a8228SEric Dumazet } 12959c8a8228SEric Dumazet tsk->clear_child_tid = NULL; 12961da177e4SLinus Torvalds } 1297f7505d64SKonstantin Khlebnikov 1298f7505d64SKonstantin Khlebnikov /* 1299f7505d64SKonstantin Khlebnikov * All done, finally we can wake up parent and return this mm to him. 1300f7505d64SKonstantin Khlebnikov * Also kthread_stop() uses this completion for synchronization. 1301f7505d64SKonstantin Khlebnikov */ 1302f7505d64SKonstantin Khlebnikov if (tsk->vfork_done) 1303f7505d64SKonstantin Khlebnikov complete_vfork_done(tsk); 13041da177e4SLinus Torvalds } 13051da177e4SLinus Torvalds 1306a0a7ec30SJANAK DESAI /* 1307a0a7ec30SJANAK DESAI * Allocate a new mm structure and copy contents from the 1308a0a7ec30SJANAK DESAI * mm structure of the passed in task structure. 1309a0a7ec30SJANAK DESAI */ 1310ff252c1fSDaeSeok Youn static struct mm_struct *dup_mm(struct task_struct *tsk) 1311a0a7ec30SJANAK DESAI { 1312a0a7ec30SJANAK DESAI struct mm_struct *mm, *oldmm = current->mm; 1313a0a7ec30SJANAK DESAI int err; 1314a0a7ec30SJANAK DESAI 1315a0a7ec30SJANAK DESAI mm = allocate_mm(); 1316a0a7ec30SJANAK DESAI if (!mm) 1317a0a7ec30SJANAK DESAI goto fail_nomem; 1318a0a7ec30SJANAK DESAI 1319a0a7ec30SJANAK DESAI memcpy(mm, oldmm, sizeof(*mm)); 1320a0a7ec30SJANAK DESAI 1321bfedb589SEric W. Biederman if (!mm_init(mm, tsk, mm->user_ns)) 1322a0a7ec30SJANAK DESAI goto fail_nomem; 1323a0a7ec30SJANAK DESAI 1324a0a7ec30SJANAK DESAI err = dup_mmap(mm, oldmm); 1325a0a7ec30SJANAK DESAI if (err) 1326a0a7ec30SJANAK DESAI goto free_pt; 1327a0a7ec30SJANAK DESAI 1328a0a7ec30SJANAK DESAI mm->hiwater_rss = get_mm_rss(mm); 1329a0a7ec30SJANAK DESAI mm->hiwater_vm = mm->total_vm; 1330a0a7ec30SJANAK DESAI 1331801460d0SHiroshi Shimamoto if (mm->binfmt && !try_module_get(mm->binfmt->module)) 1332801460d0SHiroshi Shimamoto goto free_pt; 1333801460d0SHiroshi Shimamoto 1334a0a7ec30SJANAK DESAI return mm; 1335a0a7ec30SJANAK DESAI 1336a0a7ec30SJANAK DESAI free_pt: 1337801460d0SHiroshi Shimamoto /* don't put binfmt in mmput, we haven't got module yet */ 1338801460d0SHiroshi Shimamoto mm->binfmt = NULL; 1339a0a7ec30SJANAK DESAI mmput(mm); 1340a0a7ec30SJANAK DESAI 1341a0a7ec30SJANAK DESAI fail_nomem: 1342a0a7ec30SJANAK DESAI return NULL; 1343a0a7ec30SJANAK DESAI } 1344a0a7ec30SJANAK DESAI 13451da177e4SLinus Torvalds static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) 13461da177e4SLinus Torvalds { 13471da177e4SLinus Torvalds struct mm_struct *mm, *oldmm; 13481da177e4SLinus Torvalds int retval; 13491da177e4SLinus Torvalds 13501da177e4SLinus Torvalds tsk->min_flt = tsk->maj_flt = 0; 13511da177e4SLinus Torvalds tsk->nvcsw = tsk->nivcsw = 0; 135217406b82SMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK 135317406b82SMandeep Singh Baines tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; 1354a2e51445SDmitry Vyukov tsk->last_switch_time = 0; 135517406b82SMandeep Singh Baines #endif 13561da177e4SLinus Torvalds 13571da177e4SLinus Torvalds tsk->mm = NULL; 13581da177e4SLinus Torvalds tsk->active_mm = NULL; 13591da177e4SLinus Torvalds 13601da177e4SLinus Torvalds /* 13611da177e4SLinus Torvalds * Are we cloning a kernel thread? 13621da177e4SLinus Torvalds * 13631da177e4SLinus Torvalds * We need to steal a active VM for that.. 13641da177e4SLinus Torvalds */ 13651da177e4SLinus Torvalds oldmm = current->mm; 13661da177e4SLinus Torvalds if (!oldmm) 13671da177e4SLinus Torvalds return 0; 13681da177e4SLinus Torvalds 1369615d6e87SDavidlohr Bueso /* initialize the new vmacache entries */ 1370615d6e87SDavidlohr Bueso vmacache_flush(tsk); 1371615d6e87SDavidlohr Bueso 13721da177e4SLinus Torvalds if (clone_flags & CLONE_VM) { 13733fce371bSVegard Nossum mmget(oldmm); 13741da177e4SLinus Torvalds mm = oldmm; 13751da177e4SLinus Torvalds goto good_mm; 13761da177e4SLinus Torvalds } 13771da177e4SLinus Torvalds 13781da177e4SLinus Torvalds retval = -ENOMEM; 1379a0a7ec30SJANAK DESAI mm = dup_mm(tsk); 13801da177e4SLinus Torvalds if (!mm) 13811da177e4SLinus Torvalds goto fail_nomem; 13821da177e4SLinus Torvalds 13831da177e4SLinus Torvalds good_mm: 13841da177e4SLinus Torvalds tsk->mm = mm; 13851da177e4SLinus Torvalds tsk->active_mm = mm; 13861da177e4SLinus Torvalds return 0; 13871da177e4SLinus Torvalds 13881da177e4SLinus Torvalds fail_nomem: 13891da177e4SLinus Torvalds return retval; 13901da177e4SLinus Torvalds } 13911da177e4SLinus Torvalds 1392a39bc516SAlexey Dobriyan static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) 13931da177e4SLinus Torvalds { 1394498052bbSAl Viro struct fs_struct *fs = current->fs; 13951da177e4SLinus Torvalds if (clone_flags & CLONE_FS) { 1396498052bbSAl Viro /* tsk->fs is already what we want */ 13972a4419b5SNick Piggin spin_lock(&fs->lock); 1398498052bbSAl Viro if (fs->in_exec) { 13992a4419b5SNick Piggin spin_unlock(&fs->lock); 1400498052bbSAl Viro return -EAGAIN; 1401498052bbSAl Viro } 1402498052bbSAl Viro fs->users++; 14032a4419b5SNick Piggin spin_unlock(&fs->lock); 14041da177e4SLinus Torvalds return 0; 14051da177e4SLinus Torvalds } 1406498052bbSAl Viro tsk->fs = copy_fs_struct(fs); 14071da177e4SLinus Torvalds if (!tsk->fs) 14081da177e4SLinus Torvalds return -ENOMEM; 14091da177e4SLinus Torvalds return 0; 14101da177e4SLinus Torvalds } 14111da177e4SLinus Torvalds 1412a016f338SJANAK DESAI static int copy_files(unsigned long clone_flags, struct task_struct *tsk) 1413a016f338SJANAK DESAI { 1414a016f338SJANAK DESAI struct files_struct *oldf, *newf; 1415a016f338SJANAK DESAI int error = 0; 1416a016f338SJANAK DESAI 1417a016f338SJANAK DESAI /* 1418a016f338SJANAK DESAI * A background process may not have any files ... 1419a016f338SJANAK DESAI */ 1420a016f338SJANAK DESAI oldf = current->files; 1421a016f338SJANAK DESAI if (!oldf) 1422a016f338SJANAK DESAI goto out; 1423a016f338SJANAK DESAI 1424a016f338SJANAK DESAI if (clone_flags & CLONE_FILES) { 1425a016f338SJANAK DESAI atomic_inc(&oldf->count); 1426a016f338SJANAK DESAI goto out; 1427a016f338SJANAK DESAI } 1428a016f338SJANAK DESAI 1429a016f338SJANAK DESAI newf = dup_fd(oldf, &error); 1430a016f338SJANAK DESAI if (!newf) 1431a016f338SJANAK DESAI goto out; 1432a016f338SJANAK DESAI 1433a016f338SJANAK DESAI tsk->files = newf; 1434a016f338SJANAK DESAI error = 0; 1435a016f338SJANAK DESAI out: 1436a016f338SJANAK DESAI return error; 1437a016f338SJANAK DESAI } 1438a016f338SJANAK DESAI 1439fadad878SJens Axboe static int copy_io(unsigned long clone_flags, struct task_struct *tsk) 1440fd0928dfSJens Axboe { 1441fd0928dfSJens Axboe #ifdef CONFIG_BLOCK 1442fd0928dfSJens Axboe struct io_context *ioc = current->io_context; 14436e736be7STejun Heo struct io_context *new_ioc; 1444fd0928dfSJens Axboe 1445fd0928dfSJens Axboe if (!ioc) 1446fd0928dfSJens Axboe return 0; 1447fadad878SJens Axboe /* 1448fadad878SJens Axboe * Share io context with parent, if CLONE_IO is set 1449fadad878SJens Axboe */ 1450fadad878SJens Axboe if (clone_flags & CLONE_IO) { 14513d48749dSTejun Heo ioc_task_link(ioc); 14523d48749dSTejun Heo tsk->io_context = ioc; 1453fadad878SJens Axboe } else if (ioprio_valid(ioc->ioprio)) { 14546e736be7STejun Heo new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE); 14556e736be7STejun Heo if (unlikely(!new_ioc)) 1456fd0928dfSJens Axboe return -ENOMEM; 1457fd0928dfSJens Axboe 14586e736be7STejun Heo new_ioc->ioprio = ioc->ioprio; 145911a3122fSTejun Heo put_io_context(new_ioc); 1460fd0928dfSJens Axboe } 1461fd0928dfSJens Axboe #endif 1462fd0928dfSJens Axboe return 0; 1463fd0928dfSJens Axboe } 1464fd0928dfSJens Axboe 1465a39bc516SAlexey Dobriyan static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) 14661da177e4SLinus Torvalds { 14671da177e4SLinus Torvalds struct sighand_struct *sig; 14681da177e4SLinus Torvalds 146960348802SZhaolei if (clone_flags & CLONE_SIGHAND) { 14701da177e4SLinus Torvalds atomic_inc(¤t->sighand->count); 14711da177e4SLinus Torvalds return 0; 14721da177e4SLinus Torvalds } 14731da177e4SLinus Torvalds sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); 1474e56d0903SIngo Molnar rcu_assign_pointer(tsk->sighand, sig); 14751da177e4SLinus Torvalds if (!sig) 14761da177e4SLinus Torvalds return -ENOMEM; 14779d7fb042SPeter Zijlstra 14781da177e4SLinus Torvalds atomic_set(&sig->count, 1); 147906e62a46SJann Horn spin_lock_irq(¤t->sighand->siglock); 14801da177e4SLinus Torvalds memcpy(sig->action, current->sighand->action, sizeof(sig->action)); 148106e62a46SJann Horn spin_unlock_irq(¤t->sighand->siglock); 14821da177e4SLinus Torvalds return 0; 14831da177e4SLinus Torvalds } 14841da177e4SLinus Torvalds 1485a7e5328aSOleg Nesterov void __cleanup_sighand(struct sighand_struct *sighand) 1486c81addc9SOleg Nesterov { 1487d80e731eSOleg Nesterov if (atomic_dec_and_test(&sighand->count)) { 1488d80e731eSOleg Nesterov signalfd_cleanup(sighand); 1489392809b2SOleg Nesterov /* 14905f0d5a3aSPaul E. McKenney * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it 1491392809b2SOleg Nesterov * without an RCU grace period, see __lock_task_sighand(). 1492392809b2SOleg Nesterov */ 1493c81addc9SOleg Nesterov kmem_cache_free(sighand_cachep, sighand); 1494c81addc9SOleg Nesterov } 1495d80e731eSOleg Nesterov } 1496c81addc9SOleg Nesterov 1497b18b6a9cSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS 1498f06febc9SFrank Mayhar /* 1499f06febc9SFrank Mayhar * Initialize POSIX timer handling for a thread group. 1500f06febc9SFrank Mayhar */ 1501f06febc9SFrank Mayhar static void posix_cpu_timers_init_group(struct signal_struct *sig) 1502f06febc9SFrank Mayhar { 150378d7d407SJiri Slaby unsigned long cpu_limit; 150478d7d407SJiri Slaby 1505316c1608SJason Low cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); 150678d7d407SJiri Slaby if (cpu_limit != RLIM_INFINITY) { 1507ebd7e7fcSFrederic Weisbecker sig->cputime_expires.prof_exp = cpu_limit * NSEC_PER_SEC; 1508d5c373ebSJason Low sig->cputimer.running = true; 15096279a751SOleg Nesterov } 15106279a751SOleg Nesterov 1511f06febc9SFrank Mayhar /* The timer lists. */ 1512f06febc9SFrank Mayhar INIT_LIST_HEAD(&sig->cpu_timers[0]); 1513f06febc9SFrank Mayhar INIT_LIST_HEAD(&sig->cpu_timers[1]); 1514f06febc9SFrank Mayhar INIT_LIST_HEAD(&sig->cpu_timers[2]); 1515f06febc9SFrank Mayhar } 1516b18b6a9cSNicolas Pitre #else 1517b18b6a9cSNicolas Pitre static inline void posix_cpu_timers_init_group(struct signal_struct *sig) { } 1518b18b6a9cSNicolas Pitre #endif 1519f06febc9SFrank Mayhar 1520a39bc516SAlexey Dobriyan static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) 15211da177e4SLinus Torvalds { 15221da177e4SLinus Torvalds struct signal_struct *sig; 15231da177e4SLinus Torvalds 15244ab6c083SOleg Nesterov if (clone_flags & CLONE_THREAD) 1525490dea45SPeter Zijlstra return 0; 15266279a751SOleg Nesterov 1527a56704efSVeaceslav Falico sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL); 15281da177e4SLinus Torvalds tsk->signal = sig; 15291da177e4SLinus Torvalds if (!sig) 15301da177e4SLinus Torvalds return -ENOMEM; 15311da177e4SLinus Torvalds 1532b3ac022cSOleg Nesterov sig->nr_threads = 1; 15331da177e4SLinus Torvalds atomic_set(&sig->live, 1); 1534b3ac022cSOleg Nesterov atomic_set(&sig->sigcnt, 1); 15350c740d0aSOleg Nesterov 15360c740d0aSOleg Nesterov /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */ 15370c740d0aSOleg Nesterov sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node); 15380c740d0aSOleg Nesterov tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head); 15390c740d0aSOleg Nesterov 15401da177e4SLinus Torvalds init_waitqueue_head(&sig->wait_chldexit); 1541db51aeccSOleg Nesterov sig->curr_target = tsk; 15421da177e4SLinus Torvalds init_sigpending(&sig->shared_pending); 1543c3ad2c3bSEric W. Biederman INIT_HLIST_HEAD(&sig->multiprocess); 1544e78c3496SRik van Riel seqlock_init(&sig->stats_lock); 15459d7fb042SPeter Zijlstra prev_cputime_init(&sig->prev_cputime); 15461da177e4SLinus Torvalds 1547baa73d9eSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS 1548b18b6a9cSNicolas Pitre INIT_LIST_HEAD(&sig->posix_timers); 1549c9cb2e3dSThomas Gleixner hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 15501da177e4SLinus Torvalds sig->real_timer.function = it_real_fn; 1551baa73d9eSNicolas Pitre #endif 15521da177e4SLinus Torvalds 15531da177e4SLinus Torvalds task_lock(current->group_leader); 15541da177e4SLinus Torvalds memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 15551da177e4SLinus Torvalds task_unlock(current->group_leader); 15561da177e4SLinus Torvalds 15576279a751SOleg Nesterov posix_cpu_timers_init_group(sig); 15586279a751SOleg Nesterov 1559522ed776SMiloslav Trmac tty_audit_fork(sig); 15605091faa4SMike Galbraith sched_autogroup_fork(sig); 1561522ed776SMiloslav Trmac 1562a63d83f4SDavid Rientjes sig->oom_score_adj = current->signal->oom_score_adj; 1563dabb16f6SMandeep Singh Baines sig->oom_score_adj_min = current->signal->oom_score_adj_min; 156428b83c51SKOSAKI Motohiro 15659b1bf12dSKOSAKI Motohiro mutex_init(&sig->cred_guard_mutex); 15669b1bf12dSKOSAKI Motohiro 15671da177e4SLinus Torvalds return 0; 15681da177e4SLinus Torvalds } 15691da177e4SLinus Torvalds 1570dbd95212SKees Cook static void copy_seccomp(struct task_struct *p) 1571dbd95212SKees Cook { 1572dbd95212SKees Cook #ifdef CONFIG_SECCOMP 1573dbd95212SKees Cook /* 1574dbd95212SKees Cook * Must be called with sighand->lock held, which is common to 1575dbd95212SKees Cook * all threads in the group. Holding cred_guard_mutex is not 1576dbd95212SKees Cook * needed because this new task is not yet running and cannot 1577dbd95212SKees Cook * be racing exec. 1578dbd95212SKees Cook */ 157969f6a34bSGuenter Roeck assert_spin_locked(¤t->sighand->siglock); 1580dbd95212SKees Cook 1581dbd95212SKees Cook /* Ref-count the new filter user, and assign it. */ 1582dbd95212SKees Cook get_seccomp_filter(current); 1583dbd95212SKees Cook p->seccomp = current->seccomp; 1584dbd95212SKees Cook 1585dbd95212SKees Cook /* 1586dbd95212SKees Cook * Explicitly enable no_new_privs here in case it got set 1587dbd95212SKees Cook * between the task_struct being duplicated and holding the 1588dbd95212SKees Cook * sighand lock. The seccomp state and nnp must be in sync. 1589dbd95212SKees Cook */ 1590dbd95212SKees Cook if (task_no_new_privs(current)) 1591dbd95212SKees Cook task_set_no_new_privs(p); 1592dbd95212SKees Cook 1593dbd95212SKees Cook /* 1594dbd95212SKees Cook * If the parent gained a seccomp mode after copying thread 1595dbd95212SKees Cook * flags and between before we held the sighand lock, we have 1596dbd95212SKees Cook * to manually enable the seccomp thread flag here. 1597dbd95212SKees Cook */ 1598dbd95212SKees Cook if (p->seccomp.mode != SECCOMP_MODE_DISABLED) 1599dbd95212SKees Cook set_tsk_thread_flag(p, TIF_SECCOMP); 1600dbd95212SKees Cook #endif 1601dbd95212SKees Cook } 1602dbd95212SKees Cook 160317da2bd9SHeiko Carstens SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) 16041da177e4SLinus Torvalds { 16051da177e4SLinus Torvalds current->clear_child_tid = tidptr; 16061da177e4SLinus Torvalds 1607b488893aSPavel Emelyanov return task_pid_vnr(current); 16081da177e4SLinus Torvalds } 16091da177e4SLinus Torvalds 1610a39bc516SAlexey Dobriyan static void rt_mutex_init_task(struct task_struct *p) 161123f78d4aSIngo Molnar { 16121d615482SThomas Gleixner raw_spin_lock_init(&p->pi_lock); 1613e29e175bSZilvinas Valinskas #ifdef CONFIG_RT_MUTEXES 1614a23ba907SDavidlohr Bueso p->pi_waiters = RB_ROOT_CACHED; 1615e96a7705SXunlei Pang p->pi_top_task = NULL; 161623f78d4aSIngo Molnar p->pi_blocked_on = NULL; 161723f78d4aSIngo Molnar #endif 161823f78d4aSIngo Molnar } 161923f78d4aSIngo Molnar 1620b18b6a9cSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS 16211da177e4SLinus Torvalds /* 1622f06febc9SFrank Mayhar * Initialize POSIX timer handling for a single task. 1623f06febc9SFrank Mayhar */ 1624f06febc9SFrank Mayhar static void posix_cpu_timers_init(struct task_struct *tsk) 1625f06febc9SFrank Mayhar { 162664861634SMartin Schwidefsky tsk->cputime_expires.prof_exp = 0; 162764861634SMartin Schwidefsky tsk->cputime_expires.virt_exp = 0; 1628f06febc9SFrank Mayhar tsk->cputime_expires.sched_exp = 0; 1629f06febc9SFrank Mayhar INIT_LIST_HEAD(&tsk->cpu_timers[0]); 1630f06febc9SFrank Mayhar INIT_LIST_HEAD(&tsk->cpu_timers[1]); 1631f06febc9SFrank Mayhar INIT_LIST_HEAD(&tsk->cpu_timers[2]); 1632f06febc9SFrank Mayhar } 1633b18b6a9cSNicolas Pitre #else 1634b18b6a9cSNicolas Pitre static inline void posix_cpu_timers_init(struct task_struct *tsk) { } 1635b18b6a9cSNicolas Pitre #endif 1636f06febc9SFrank Mayhar 16372c470475SEric W. Biederman static inline void init_task_pid_links(struct task_struct *task) 16382c470475SEric W. Biederman { 16392c470475SEric W. Biederman enum pid_type type; 16402c470475SEric W. Biederman 16412c470475SEric W. Biederman for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { 16422c470475SEric W. Biederman INIT_HLIST_NODE(&task->pid_links[type]); 16432c470475SEric W. Biederman } 16442c470475SEric W. Biederman } 16452c470475SEric W. Biederman 164681907739SOleg Nesterov static inline void 164781907739SOleg Nesterov init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) 164881907739SOleg Nesterov { 16492c470475SEric W. Biederman if (type == PIDTYPE_PID) 16502c470475SEric W. Biederman task->thread_pid = pid; 16512c470475SEric W. Biederman else 16522c470475SEric W. Biederman task->signal->pids[type] = pid; 165381907739SOleg Nesterov } 165481907739SOleg Nesterov 16556bfbaa51SIngo Molnar static inline void rcu_copy_process(struct task_struct *p) 16566bfbaa51SIngo Molnar { 16576bfbaa51SIngo Molnar #ifdef CONFIG_PREEMPT_RCU 16586bfbaa51SIngo Molnar p->rcu_read_lock_nesting = 0; 16596bfbaa51SIngo Molnar p->rcu_read_unlock_special.s = 0; 16606bfbaa51SIngo Molnar p->rcu_blocked_node = NULL; 16616bfbaa51SIngo Molnar INIT_LIST_HEAD(&p->rcu_node_entry); 16626bfbaa51SIngo Molnar #endif /* #ifdef CONFIG_PREEMPT_RCU */ 16636bfbaa51SIngo Molnar #ifdef CONFIG_TASKS_RCU 16646bfbaa51SIngo Molnar p->rcu_tasks_holdout = false; 16656bfbaa51SIngo Molnar INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); 16666bfbaa51SIngo Molnar p->rcu_tasks_idle_cpu = -1; 16676bfbaa51SIngo Molnar #endif /* #ifdef CONFIG_TASKS_RCU */ 16686bfbaa51SIngo Molnar } 16696bfbaa51SIngo Molnar 1670f06febc9SFrank Mayhar /* 16711da177e4SLinus Torvalds * This creates a new process as a copy of the old one, 16721da177e4SLinus Torvalds * but does not actually start it yet. 16731da177e4SLinus Torvalds * 16741da177e4SLinus Torvalds * It copies the registers, and all the appropriate 16751da177e4SLinus Torvalds * parts of the process environment (as per the clone 16761da177e4SLinus Torvalds * flags). The actual kick-off is left to the caller. 16771da177e4SLinus Torvalds */ 16780766f788SEmese Revfy static __latent_entropy struct task_struct *copy_process( 16790766f788SEmese Revfy unsigned long clone_flags, 16801da177e4SLinus Torvalds unsigned long stack_start, 16811da177e4SLinus Torvalds unsigned long stack_size, 16821da177e4SLinus Torvalds int __user *child_tidptr, 168309a05394SRoland McGrath struct pid *pid, 16843033f14aSJosh Triplett int trace, 1685725fc629SAndi Kleen unsigned long tls, 1686725fc629SAndi Kleen int node) 16871da177e4SLinus Torvalds { 16881da177e4SLinus Torvalds int retval; 1689a24efe62SMariusz Kozlowski struct task_struct *p; 1690c3ad2c3bSEric W. Biederman struct multiprocess_signals delayed; 16911da177e4SLinus Torvalds 1692667b6094SMarcos Paulo de Souza /* 1693667b6094SMarcos Paulo de Souza * Don't allow sharing the root directory with processes in a different 1694667b6094SMarcos Paulo de Souza * namespace 1695667b6094SMarcos Paulo de Souza */ 16961da177e4SLinus Torvalds if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) 16971da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 16981da177e4SLinus Torvalds 1699e66eded8SEric W. Biederman if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) 1700e66eded8SEric W. Biederman return ERR_PTR(-EINVAL); 1701e66eded8SEric W. Biederman 17021da177e4SLinus Torvalds /* 17031da177e4SLinus Torvalds * Thread groups must share signals as well, and detached threads 17041da177e4SLinus Torvalds * can only be started up within the thread group. 17051da177e4SLinus Torvalds */ 17061da177e4SLinus Torvalds if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) 17071da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 17081da177e4SLinus Torvalds 17091da177e4SLinus Torvalds /* 17101da177e4SLinus Torvalds * Shared signal handlers imply shared VM. By way of the above, 17111da177e4SLinus Torvalds * thread groups also imply shared VM. Blocking this case allows 17121da177e4SLinus Torvalds * for various simplifications in other code. 17131da177e4SLinus Torvalds */ 17141da177e4SLinus Torvalds if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) 17151da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 17161da177e4SLinus Torvalds 1717123be07bSSukadev Bhattiprolu /* 1718123be07bSSukadev Bhattiprolu * Siblings of global init remain as zombies on exit since they are 1719123be07bSSukadev Bhattiprolu * not reaped by their parent (swapper). To solve this and to avoid 1720123be07bSSukadev Bhattiprolu * multi-rooted process trees, prevent global and container-inits 1721123be07bSSukadev Bhattiprolu * from creating siblings. 1722123be07bSSukadev Bhattiprolu */ 1723123be07bSSukadev Bhattiprolu if ((clone_flags & CLONE_PARENT) && 1724123be07bSSukadev Bhattiprolu current->signal->flags & SIGNAL_UNKILLABLE) 1725123be07bSSukadev Bhattiprolu return ERR_PTR(-EINVAL); 1726123be07bSSukadev Bhattiprolu 17278382fcacSEric W. Biederman /* 172840a0d32dSOleg Nesterov * If the new process will be in a different pid or user namespace 1729faf00da5SEric W. Biederman * do not allow it to share a thread group with the forking task. 17308382fcacSEric W. Biederman */ 1731faf00da5SEric W. Biederman if (clone_flags & CLONE_THREAD) { 173240a0d32dSOleg Nesterov if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || 173340a0d32dSOleg Nesterov (task_active_pid_ns(current) != 1734c2b1df2eSAndy Lutomirski current->nsproxy->pid_ns_for_children)) 17358382fcacSEric W. Biederman return ERR_PTR(-EINVAL); 173640a0d32dSOleg Nesterov } 17378382fcacSEric W. Biederman 1738c3ad2c3bSEric W. Biederman /* 1739c3ad2c3bSEric W. Biederman * Force any signals received before this point to be delivered 1740c3ad2c3bSEric W. Biederman * before the fork happens. Collect up signals sent to multiple 1741c3ad2c3bSEric W. Biederman * processes that happen during the fork and delay them so that 1742c3ad2c3bSEric W. Biederman * they appear to happen after the fork. 1743c3ad2c3bSEric W. Biederman */ 1744c3ad2c3bSEric W. Biederman sigemptyset(&delayed.signal); 1745c3ad2c3bSEric W. Biederman INIT_HLIST_NODE(&delayed.node); 1746c3ad2c3bSEric W. Biederman 1747c3ad2c3bSEric W. Biederman spin_lock_irq(¤t->sighand->siglock); 1748c3ad2c3bSEric W. Biederman if (!(clone_flags & CLONE_THREAD)) 1749c3ad2c3bSEric W. Biederman hlist_add_head(&delayed.node, ¤t->signal->multiprocess); 1750c3ad2c3bSEric W. Biederman recalc_sigpending(); 1751c3ad2c3bSEric W. Biederman spin_unlock_irq(¤t->sighand->siglock); 1752c3ad2c3bSEric W. Biederman retval = -ERESTARTNOINTR; 1753c3ad2c3bSEric W. Biederman if (signal_pending(current)) 1754c3ad2c3bSEric W. Biederman goto fork_out; 1755c3ad2c3bSEric W. Biederman 17561da177e4SLinus Torvalds retval = -ENOMEM; 1757725fc629SAndi Kleen p = dup_task_struct(current, node); 17581da177e4SLinus Torvalds if (!p) 17591da177e4SLinus Torvalds goto fork_out; 17601da177e4SLinus Torvalds 17614d6501dcSVegard Nossum /* 17624d6501dcSVegard Nossum * This _must_ happen before we call free_task(), i.e. before we jump 17634d6501dcSVegard Nossum * to any of the bad_fork_* labels. This is to avoid freeing 17644d6501dcSVegard Nossum * p->set_child_tid which is (ab)used as a kthread's data pointer for 17654d6501dcSVegard Nossum * kernel threads (PF_KTHREAD). 17664d6501dcSVegard Nossum */ 17674d6501dcSVegard Nossum p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; 17684d6501dcSVegard Nossum /* 17694d6501dcSVegard Nossum * Clear TID on mm_release()? 17704d6501dcSVegard Nossum */ 17714d6501dcSVegard Nossum p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; 17724d6501dcSVegard Nossum 1773f7e8b616SSteven Rostedt ftrace_graph_init_task(p); 1774f7e8b616SSteven Rostedt 1775bea493a0SPeter Zijlstra rt_mutex_init_task(p); 1776bea493a0SPeter Zijlstra 1777d12c1a37SIngo Molnar #ifdef CONFIG_PROVE_LOCKING 1778de30a2b3SIngo Molnar DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); 1779de30a2b3SIngo Molnar DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 1780de30a2b3SIngo Molnar #endif 17811da177e4SLinus Torvalds retval = -EAGAIN; 17823b11a1deSDavid Howells if (atomic_read(&p->real_cred->user->processes) >= 178378d7d407SJiri Slaby task_rlimit(p, RLIMIT_NPROC)) { 1784b57922b6SEric Paris if (p->real_cred->user != INIT_USER && 1785b57922b6SEric Paris !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) 17861da177e4SLinus Torvalds goto bad_fork_free; 17871da177e4SLinus Torvalds } 178872fa5997SVasiliy Kulikov current->flags &= ~PF_NPROC_EXCEEDED; 17891da177e4SLinus Torvalds 1790f1752eecSDavid Howells retval = copy_creds(p, clone_flags); 1791f1752eecSDavid Howells if (retval < 0) 1792f1752eecSDavid Howells goto bad_fork_free; 17931da177e4SLinus Torvalds 17941da177e4SLinus Torvalds /* 17951da177e4SLinus Torvalds * If multiple threads are within copy_process(), then this check 17961da177e4SLinus Torvalds * triggers too late. This doesn't hurt, the check is only there 17971da177e4SLinus Torvalds * to stop root fork bombs. 17981da177e4SLinus Torvalds */ 179904ec93feSLi Zefan retval = -EAGAIN; 18001da177e4SLinus Torvalds if (nr_threads >= max_threads) 18011da177e4SLinus Torvalds goto bad_fork_cleanup_count; 18021da177e4SLinus Torvalds 1803ca74e92bSShailabh Nagar delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ 1804c1de45caSPeter Zijlstra p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE); 1805514ddb44SDavid Rientjes p->flags |= PF_FORKNOEXEC; 18061da177e4SLinus Torvalds INIT_LIST_HEAD(&p->children); 18071da177e4SLinus Torvalds INIT_LIST_HEAD(&p->sibling); 1808f41d911fSPaul E. McKenney rcu_copy_process(p); 18091da177e4SLinus Torvalds p->vfork_done = NULL; 18101da177e4SLinus Torvalds spin_lock_init(&p->alloc_lock); 18111da177e4SLinus Torvalds 18121da177e4SLinus Torvalds init_sigpending(&p->pending); 18131da177e4SLinus Torvalds 181464861634SMartin Schwidefsky p->utime = p->stime = p->gtime = 0; 181540565b5aSStanislaw Gruszka #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 181664861634SMartin Schwidefsky p->utimescaled = p->stimescaled = 0; 181740565b5aSStanislaw Gruszka #endif 18189d7fb042SPeter Zijlstra prev_cputime_init(&p->prev_cputime); 18199d7fb042SPeter Zijlstra 18206a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1821bac5b6b6SFrederic Weisbecker seqcount_init(&p->vtime.seqcount); 1822bac5b6b6SFrederic Weisbecker p->vtime.starttime = 0; 1823bac5b6b6SFrederic Weisbecker p->vtime.state = VTIME_INACTIVE; 18246a61671bSFrederic Weisbecker #endif 18256a61671bSFrederic Weisbecker 1826a3a2e76cSKAMEZAWA Hiroyuki #if defined(SPLIT_RSS_COUNTING) 1827a3a2e76cSKAMEZAWA Hiroyuki memset(&p->rss_stat, 0, sizeof(p->rss_stat)); 1828a3a2e76cSKAMEZAWA Hiroyuki #endif 1829172ba844SBalbir Singh 18306976675dSArjan van de Ven p->default_timer_slack_ns = current->timer_slack_ns; 18316976675dSArjan van de Ven 1832eb414681SJohannes Weiner #ifdef CONFIG_PSI 1833eb414681SJohannes Weiner p->psi_flags = 0; 1834eb414681SJohannes Weiner #endif 1835eb414681SJohannes Weiner 18365995477aSAndrea Righi task_io_accounting_init(&p->ioac); 18371da177e4SLinus Torvalds acct_clear_integrals(p); 18381da177e4SLinus Torvalds 1839f06febc9SFrank Mayhar posix_cpu_timers_init(p); 18401da177e4SLinus Torvalds 1841ccbf62d8SThomas Gleixner p->start_time = ktime_get_ns(); 184257e0be04SThomas Gleixner p->real_start_time = ktime_get_boot_ns(); 18431da177e4SLinus Torvalds p->io_context = NULL; 1844c0b0ae8aSRichard Guy Briggs audit_set_context(p, NULL); 1845b4f48b63SPaul Menage cgroup_fork(p); 18461da177e4SLinus Torvalds #ifdef CONFIG_NUMA 1847846a16bfSLee Schermerhorn p->mempolicy = mpol_dup(p->mempolicy); 18481da177e4SLinus Torvalds if (IS_ERR(p->mempolicy)) { 18491da177e4SLinus Torvalds retval = PTR_ERR(p->mempolicy); 18501da177e4SLinus Torvalds p->mempolicy = NULL; 1851e8604cb4SLi Zefan goto bad_fork_cleanup_threadgroup_lock; 18521da177e4SLinus Torvalds } 18531da177e4SLinus Torvalds #endif 1854778d3b0fSMichal Hocko #ifdef CONFIG_CPUSETS 1855778d3b0fSMichal Hocko p->cpuset_mem_spread_rotor = NUMA_NO_NODE; 1856778d3b0fSMichal Hocko p->cpuset_slab_spread_rotor = NUMA_NO_NODE; 1857cc9a6c87SMel Gorman seqcount_init(&p->mems_allowed_seq); 1858778d3b0fSMichal Hocko #endif 1859de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS 1860de30a2b3SIngo Molnar p->irq_events = 0; 1861de30a2b3SIngo Molnar p->hardirqs_enabled = 0; 1862de30a2b3SIngo Molnar p->hardirq_enable_ip = 0; 1863de30a2b3SIngo Molnar p->hardirq_enable_event = 0; 1864de30a2b3SIngo Molnar p->hardirq_disable_ip = _THIS_IP_; 1865de30a2b3SIngo Molnar p->hardirq_disable_event = 0; 1866de30a2b3SIngo Molnar p->softirqs_enabled = 1; 1867de30a2b3SIngo Molnar p->softirq_enable_ip = _THIS_IP_; 1868de30a2b3SIngo Molnar p->softirq_enable_event = 0; 1869de30a2b3SIngo Molnar p->softirq_disable_ip = 0; 1870de30a2b3SIngo Molnar p->softirq_disable_event = 0; 1871de30a2b3SIngo Molnar p->hardirq_context = 0; 1872de30a2b3SIngo Molnar p->softirq_context = 0; 1873de30a2b3SIngo Molnar #endif 18748bcbde54SDavid Hildenbrand 18758bcbde54SDavid Hildenbrand p->pagefault_disabled = 0; 18768bcbde54SDavid Hildenbrand 1877fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP 1878fbb9ce95SIngo Molnar p->lockdep_depth = 0; /* no locks held yet */ 1879fbb9ce95SIngo Molnar p->curr_chain_key = 0; 1880fbb9ce95SIngo Molnar p->lockdep_recursion = 0; 1881b09be676SByungchul Park lockdep_init_task(p); 1882fbb9ce95SIngo Molnar #endif 18831da177e4SLinus Torvalds 1884408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES 1885408894eeSIngo Molnar p->blocked_on = NULL; /* not blocked yet */ 1886408894eeSIngo Molnar #endif 1887cafe5635SKent Overstreet #ifdef CONFIG_BCACHE 1888cafe5635SKent Overstreet p->sequential_io = 0; 1889cafe5635SKent Overstreet p->sequential_io_avg = 0; 1890cafe5635SKent Overstreet #endif 18910f481406SMarkus Metzger 18923c90e6e9SSrivatsa Vaddagiri /* Perform scheduler related setup. Assign this task to a CPU. */ 1893aab03e05SDario Faggioli retval = sched_fork(clone_flags, p); 1894aab03e05SDario Faggioli if (retval) 1895aab03e05SDario Faggioli goto bad_fork_cleanup_policy; 18966ab423e0SPeter Zijlstra 1897cdd6c482SIngo Molnar retval = perf_event_init_task(p); 18986ab423e0SPeter Zijlstra if (retval) 18996ab423e0SPeter Zijlstra goto bad_fork_cleanup_policy; 1900fb0a685cSDaniel Rebelo de Oliveira retval = audit_alloc(p); 1901fb0a685cSDaniel Rebelo de Oliveira if (retval) 19026c72e350SPeter Zijlstra goto bad_fork_cleanup_perf; 19031da177e4SLinus Torvalds /* copy all the process information */ 1904ab602f79SJack Miller shm_init_task(p); 1905e4e55b47STetsuo Handa retval = security_task_alloc(p, clone_flags); 1906fb0a685cSDaniel Rebelo de Oliveira if (retval) 19071da177e4SLinus Torvalds goto bad_fork_cleanup_audit; 1908e4e55b47STetsuo Handa retval = copy_semundo(clone_flags, p); 1909e4e55b47STetsuo Handa if (retval) 1910e4e55b47STetsuo Handa goto bad_fork_cleanup_security; 1911fb0a685cSDaniel Rebelo de Oliveira retval = copy_files(clone_flags, p); 1912fb0a685cSDaniel Rebelo de Oliveira if (retval) 19131da177e4SLinus Torvalds goto bad_fork_cleanup_semundo; 1914fb0a685cSDaniel Rebelo de Oliveira retval = copy_fs(clone_flags, p); 1915fb0a685cSDaniel Rebelo de Oliveira if (retval) 19161da177e4SLinus Torvalds goto bad_fork_cleanup_files; 1917fb0a685cSDaniel Rebelo de Oliveira retval = copy_sighand(clone_flags, p); 1918fb0a685cSDaniel Rebelo de Oliveira if (retval) 19191da177e4SLinus Torvalds goto bad_fork_cleanup_fs; 1920fb0a685cSDaniel Rebelo de Oliveira retval = copy_signal(clone_flags, p); 1921fb0a685cSDaniel Rebelo de Oliveira if (retval) 19221da177e4SLinus Torvalds goto bad_fork_cleanup_sighand; 1923fb0a685cSDaniel Rebelo de Oliveira retval = copy_mm(clone_flags, p); 1924fb0a685cSDaniel Rebelo de Oliveira if (retval) 19251da177e4SLinus Torvalds goto bad_fork_cleanup_signal; 1926fb0a685cSDaniel Rebelo de Oliveira retval = copy_namespaces(clone_flags, p); 1927fb0a685cSDaniel Rebelo de Oliveira if (retval) 1928d84f4f99SDavid Howells goto bad_fork_cleanup_mm; 1929fb0a685cSDaniel Rebelo de Oliveira retval = copy_io(clone_flags, p); 1930fb0a685cSDaniel Rebelo de Oliveira if (retval) 1931fd0928dfSJens Axboe goto bad_fork_cleanup_namespaces; 19323033f14aSJosh Triplett retval = copy_thread_tls(clone_flags, stack_start, stack_size, p, tls); 19331da177e4SLinus Torvalds if (retval) 1934fd0928dfSJens Axboe goto bad_fork_cleanup_io; 19351da177e4SLinus Torvalds 1936afaef01cSAlexander Popov stackleak_task_init(p); 1937afaef01cSAlexander Popov 1938425fb2b4SPavel Emelyanov if (pid != &init_struct_pid) { 1939c2b1df2eSAndy Lutomirski pid = alloc_pid(p->nsproxy->pid_ns_for_children); 194035f71bc0SMichal Hocko if (IS_ERR(pid)) { 194135f71bc0SMichal Hocko retval = PTR_ERR(pid); 19420740aa5fSJiri Slaby goto bad_fork_cleanup_thread; 1943425fb2b4SPavel Emelyanov } 194435f71bc0SMichal Hocko } 1945425fb2b4SPavel Emelyanov 194673c10101SJens Axboe #ifdef CONFIG_BLOCK 194773c10101SJens Axboe p->plug = NULL; 194873c10101SJens Axboe #endif 194942b2dd0aSAlexey Dobriyan #ifdef CONFIG_FUTEX 19508f17d3a5SIngo Molnar p->robust_list = NULL; 19518f17d3a5SIngo Molnar #ifdef CONFIG_COMPAT 19528f17d3a5SIngo Molnar p->compat_robust_list = NULL; 19538f17d3a5SIngo Molnar #endif 1954c87e2837SIngo Molnar INIT_LIST_HEAD(&p->pi_state_list); 1955c87e2837SIngo Molnar p->pi_state_cache = NULL; 195642b2dd0aSAlexey Dobriyan #endif 19571da177e4SLinus Torvalds /* 1958f9a3879aSGOTO Masanori * sigaltstack should be cleared when sharing the same VM 1959f9a3879aSGOTO Masanori */ 1960f9a3879aSGOTO Masanori if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) 19612a742138SStas Sergeev sas_ss_reset(p); 1962f9a3879aSGOTO Masanori 1963f9a3879aSGOTO Masanori /* 19646580807dSOleg Nesterov * Syscall tracing and stepping should be turned off in the 19656580807dSOleg Nesterov * child regardless of CLONE_PTRACE. 19661da177e4SLinus Torvalds */ 19676580807dSOleg Nesterov user_disable_single_step(p); 19681da177e4SLinus Torvalds clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); 1969ed75e8d5SLaurent Vivier #ifdef TIF_SYSCALL_EMU 1970ed75e8d5SLaurent Vivier clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); 1971ed75e8d5SLaurent Vivier #endif 19729745512cSArjan van de Ven clear_all_latency_tracing(p); 19731da177e4SLinus Torvalds 19741da177e4SLinus Torvalds /* ok, now we should be set up.. */ 197518c830dfSOleg Nesterov p->pid = pid_nr(pid); 197618c830dfSOleg Nesterov if (clone_flags & CLONE_THREAD) { 19775f8aadd8SOleg Nesterov p->exit_signal = -1; 197818c830dfSOleg Nesterov p->group_leader = current->group_leader; 197918c830dfSOleg Nesterov p->tgid = current->tgid; 198018c830dfSOleg Nesterov } else { 198118c830dfSOleg Nesterov if (clone_flags & CLONE_PARENT) 19825f8aadd8SOleg Nesterov p->exit_signal = current->group_leader->exit_signal; 19835f8aadd8SOleg Nesterov else 19845f8aadd8SOleg Nesterov p->exit_signal = (clone_flags & CSIGNAL); 198518c830dfSOleg Nesterov p->group_leader = p; 198618c830dfSOleg Nesterov p->tgid = p->pid; 198718c830dfSOleg Nesterov } 19885f8aadd8SOleg Nesterov 19899d823e8fSWu Fengguang p->nr_dirtied = 0; 19909d823e8fSWu Fengguang p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); 199183712358SWu Fengguang p->dirty_paused_when = 0; 19929d823e8fSWu Fengguang 1993bb8cbbfeSOleg Nesterov p->pdeath_signal = 0; 199447e65328SOleg Nesterov INIT_LIST_HEAD(&p->thread_group); 1995158e1645SAl Viro p->task_works = NULL; 19961da177e4SLinus Torvalds 1997780de9ddSIngo Molnar cgroup_threadgroup_change_begin(current); 199818c830dfSOleg Nesterov /* 19997e47682eSAleksa Sarai * Ensure that the cgroup subsystem policies allow the new process to be 20007e47682eSAleksa Sarai * forked. It should be noted the the new process's css_set can be changed 20017e47682eSAleksa Sarai * between here and cgroup_post_fork() if an organisation operation is in 20027e47682eSAleksa Sarai * progress. 20037e47682eSAleksa Sarai */ 2004b53202e6SOleg Nesterov retval = cgroup_can_fork(p); 20057e47682eSAleksa Sarai if (retval) 20067e47682eSAleksa Sarai goto bad_fork_free_pid; 20077e47682eSAleksa Sarai 20087e47682eSAleksa Sarai /* 200918c830dfSOleg Nesterov * Make it visible to the rest of the system, but dont wake it up yet. 201018c830dfSOleg Nesterov * Need tasklist lock for parent etc handling! 201118c830dfSOleg Nesterov */ 20121da177e4SLinus Torvalds write_lock_irq(&tasklist_lock); 20131da177e4SLinus Torvalds 20141da177e4SLinus Torvalds /* CLONE_PARENT re-uses the old parent */ 20152d5516cbSOleg Nesterov if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { 20161da177e4SLinus Torvalds p->real_parent = current->real_parent; 20172d5516cbSOleg Nesterov p->parent_exec_id = current->parent_exec_id; 20182d5516cbSOleg Nesterov } else { 20191da177e4SLinus Torvalds p->real_parent = current; 20202d5516cbSOleg Nesterov p->parent_exec_id = current->self_exec_id; 20212d5516cbSOleg Nesterov } 20221da177e4SLinus Torvalds 2023d83a7cb3SJosh Poimboeuf klp_copy_process(p); 2024d83a7cb3SJosh Poimboeuf 20251da177e4SLinus Torvalds spin_lock(¤t->sighand->siglock); 20264a2c7a78SOleg Nesterov 20274a2c7a78SOleg Nesterov /* 2028dbd95212SKees Cook * Copy seccomp details explicitly here, in case they were changed 2029dbd95212SKees Cook * before holding sighand lock. 2030dbd95212SKees Cook */ 2031dbd95212SKees Cook copy_seccomp(p); 2032dbd95212SKees Cook 2033d7822b1eSMathieu Desnoyers rseq_fork(p, clone_flags); 2034d7822b1eSMathieu Desnoyers 20354ca1d3eeSEric W. Biederman /* Don't start children in a dying pid namespace */ 2036e8cfbc24SGargi Sharma if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) { 20373fd37226SKirill Tkhai retval = -ENOMEM; 20383fd37226SKirill Tkhai goto bad_fork_cancel_cgroup; 20393fd37226SKirill Tkhai } 20404a2c7a78SOleg Nesterov 20417673bf55SEric W. Biederman /* Let kill terminate clone/fork in the middle */ 20427673bf55SEric W. Biederman if (fatal_signal_pending(current)) { 20437673bf55SEric W. Biederman retval = -EINTR; 20447673bf55SEric W. Biederman goto bad_fork_cancel_cgroup; 20457673bf55SEric W. Biederman } 20467673bf55SEric W. Biederman 20471da177e4SLinus Torvalds 20482c470475SEric W. Biederman init_task_pid_links(p); 204973b9ebfeSOleg Nesterov if (likely(p->pid)) { 20504b9d33e6STejun Heo ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); 20511da177e4SLinus Torvalds 205281907739SOleg Nesterov init_task_pid(p, PIDTYPE_PID, pid); 20531da177e4SLinus Torvalds if (thread_group_leader(p)) { 20546883f81aSEric W. Biederman init_task_pid(p, PIDTYPE_TGID, pid); 205581907739SOleg Nesterov init_task_pid(p, PIDTYPE_PGID, task_pgrp(current)); 205681907739SOleg Nesterov init_task_pid(p, PIDTYPE_SID, task_session(current)); 205781907739SOleg Nesterov 20581c4042c2SEric W. Biederman if (is_child_reaper(pid)) { 205917cf22c3SEric W. Biederman ns_of_pid(pid)->child_reaper = p; 20601c4042c2SEric W. Biederman p->signal->flags |= SIGNAL_UNKILLABLE; 20611c4042c2SEric W. Biederman } 2062c3ad2c3bSEric W. Biederman p->signal->shared_pending.signal = delayed.signal; 20639c9f4dedSAlan Cox p->signal->tty = tty_kref_get(current->signal->tty); 2064749860ceSPavel Tikhomirov /* 2065749860ceSPavel Tikhomirov * Inherit has_child_subreaper flag under the same 2066749860ceSPavel Tikhomirov * tasklist_lock with adding child to the process tree 2067749860ceSPavel Tikhomirov * for propagate_has_child_subreaper optimization. 2068749860ceSPavel Tikhomirov */ 2069749860ceSPavel Tikhomirov p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper || 2070749860ceSPavel Tikhomirov p->real_parent->signal->is_child_subreaper; 20719cd80bbbSOleg Nesterov list_add_tail(&p->sibling, &p->real_parent->children); 20725e85d4abSEric W. Biederman list_add_tail_rcu(&p->tasks, &init_task.tasks); 20736883f81aSEric W. Biederman attach_pid(p, PIDTYPE_TGID); 207481907739SOleg Nesterov attach_pid(p, PIDTYPE_PGID); 207581907739SOleg Nesterov attach_pid(p, PIDTYPE_SID); 2076909ea964SChristoph Lameter __this_cpu_inc(process_counts); 207780628ca0SOleg Nesterov } else { 207880628ca0SOleg Nesterov current->signal->nr_threads++; 207980628ca0SOleg Nesterov atomic_inc(¤t->signal->live); 208080628ca0SOleg Nesterov atomic_inc(¤t->signal->sigcnt); 2081924de3b8SEric W. Biederman task_join_group_stop(p); 208280628ca0SOleg Nesterov list_add_tail_rcu(&p->thread_group, 208380628ca0SOleg Nesterov &p->group_leader->thread_group); 20840c740d0aSOleg Nesterov list_add_tail_rcu(&p->thread_node, 20850c740d0aSOleg Nesterov &p->signal->thread_head); 20861da177e4SLinus Torvalds } 208781907739SOleg Nesterov attach_pid(p, PIDTYPE_PID); 20881da177e4SLinus Torvalds nr_threads++; 208973b9ebfeSOleg Nesterov } 20901da177e4SLinus Torvalds total_forks++; 2091c3ad2c3bSEric W. Biederman hlist_del_init(&delayed.node); 20923f17da69SOleg Nesterov spin_unlock(¤t->sighand->siglock); 20934af4206bSOleg Nesterov syscall_tracepoint_update(p); 20941da177e4SLinus Torvalds write_unlock_irq(&tasklist_lock); 20954af4206bSOleg Nesterov 2096c13cf856SAndrew Morton proc_fork_connector(p); 2097b53202e6SOleg Nesterov cgroup_post_fork(p); 2098780de9ddSIngo Molnar cgroup_threadgroup_change_end(current); 2099cdd6c482SIngo Molnar perf_event_fork(p); 210043d2b113SKAMEZAWA Hiroyuki 210143d2b113SKAMEZAWA Hiroyuki trace_task_newtask(p, clone_flags); 21023ab67966SOleg Nesterov uprobe_copy_process(p, clone_flags); 210343d2b113SKAMEZAWA Hiroyuki 21041da177e4SLinus Torvalds return p; 21051da177e4SLinus Torvalds 21067e47682eSAleksa Sarai bad_fork_cancel_cgroup: 21073fd37226SKirill Tkhai spin_unlock(¤t->sighand->siglock); 21083fd37226SKirill Tkhai write_unlock_irq(&tasklist_lock); 2109b53202e6SOleg Nesterov cgroup_cancel_fork(p); 2110425fb2b4SPavel Emelyanov bad_fork_free_pid: 2111780de9ddSIngo Molnar cgroup_threadgroup_change_end(current); 2112425fb2b4SPavel Emelyanov if (pid != &init_struct_pid) 2113425fb2b4SPavel Emelyanov free_pid(pid); 21140740aa5fSJiri Slaby bad_fork_cleanup_thread: 21150740aa5fSJiri Slaby exit_thread(p); 2116fd0928dfSJens Axboe bad_fork_cleanup_io: 2117b69f2292SLouis Rilling if (p->io_context) 2118b69f2292SLouis Rilling exit_io_context(p); 2119ab516013SSerge E. Hallyn bad_fork_cleanup_namespaces: 2120444f378bSLinus Torvalds exit_task_namespaces(p); 21211da177e4SLinus Torvalds bad_fork_cleanup_mm: 2122c9f01245SDavid Rientjes if (p->mm) 21231da177e4SLinus Torvalds mmput(p->mm); 21241da177e4SLinus Torvalds bad_fork_cleanup_signal: 21254ab6c083SOleg Nesterov if (!(clone_flags & CLONE_THREAD)) 21261c5354deSMike Galbraith free_signal_struct(p->signal); 21271da177e4SLinus Torvalds bad_fork_cleanup_sighand: 2128a7e5328aSOleg Nesterov __cleanup_sighand(p->sighand); 21291da177e4SLinus Torvalds bad_fork_cleanup_fs: 21301da177e4SLinus Torvalds exit_fs(p); /* blocking */ 21311da177e4SLinus Torvalds bad_fork_cleanup_files: 21321da177e4SLinus Torvalds exit_files(p); /* blocking */ 21331da177e4SLinus Torvalds bad_fork_cleanup_semundo: 21341da177e4SLinus Torvalds exit_sem(p); 2135e4e55b47STetsuo Handa bad_fork_cleanup_security: 2136e4e55b47STetsuo Handa security_task_free(p); 21371da177e4SLinus Torvalds bad_fork_cleanup_audit: 21381da177e4SLinus Torvalds audit_free(p); 21396c72e350SPeter Zijlstra bad_fork_cleanup_perf: 2140cdd6c482SIngo Molnar perf_event_free_task(p); 21416c72e350SPeter Zijlstra bad_fork_cleanup_policy: 2142b09be676SByungchul Park lockdep_free_task(p); 21431da177e4SLinus Torvalds #ifdef CONFIG_NUMA 2144f0be3d32SLee Schermerhorn mpol_put(p->mempolicy); 2145e8604cb4SLi Zefan bad_fork_cleanup_threadgroup_lock: 21461da177e4SLinus Torvalds #endif 214735df17c5SShailabh Nagar delayacct_tsk_free(p); 21481da177e4SLinus Torvalds bad_fork_cleanup_count: 2149d84f4f99SDavid Howells atomic_dec(&p->cred->user->processes); 2150e0e81739SDavid Howells exit_creds(p); 21511da177e4SLinus Torvalds bad_fork_free: 2152405c0759SAndy Lutomirski p->state = TASK_DEAD; 215368f24b08SAndy Lutomirski put_task_stack(p); 21541da177e4SLinus Torvalds free_task(p); 2155fe7d37d1SOleg Nesterov fork_out: 2156c3ad2c3bSEric W. Biederman spin_lock_irq(¤t->sighand->siglock); 2157c3ad2c3bSEric W. Biederman hlist_del_init(&delayed.node); 2158c3ad2c3bSEric W. Biederman spin_unlock_irq(¤t->sighand->siglock); 2159fe7d37d1SOleg Nesterov return ERR_PTR(retval); 21601da177e4SLinus Torvalds } 21611da177e4SLinus Torvalds 21622c470475SEric W. Biederman static inline void init_idle_pids(struct task_struct *idle) 2163f106eee1SOleg Nesterov { 2164f106eee1SOleg Nesterov enum pid_type type; 2165f106eee1SOleg Nesterov 2166f106eee1SOleg Nesterov for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { 21672c470475SEric W. Biederman INIT_HLIST_NODE(&idle->pid_links[type]); /* not really needed */ 21682c470475SEric W. Biederman init_task_pid(idle, type, &init_struct_pid); 2169f106eee1SOleg Nesterov } 2170f106eee1SOleg Nesterov } 2171f106eee1SOleg Nesterov 21720db0628dSPaul Gortmaker struct task_struct *fork_idle(int cpu) 21731da177e4SLinus Torvalds { 217436c8b586SIngo Molnar struct task_struct *task; 2175725fc629SAndi Kleen task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0, 2176725fc629SAndi Kleen cpu_to_node(cpu)); 2177f106eee1SOleg Nesterov if (!IS_ERR(task)) { 21782c470475SEric W. Biederman init_idle_pids(task); 21791da177e4SLinus Torvalds init_idle(task, cpu); 2180f106eee1SOleg Nesterov } 218173b9ebfeSOleg Nesterov 21821da177e4SLinus Torvalds return task; 21831da177e4SLinus Torvalds } 21841da177e4SLinus Torvalds 21851da177e4SLinus Torvalds /* 21861da177e4SLinus Torvalds * Ok, this is the main fork-routine. 21871da177e4SLinus Torvalds * 21881da177e4SLinus Torvalds * It copies the process, and if successful kick-starts 21891da177e4SLinus Torvalds * it and waits for it to finish using the VM if required. 21901da177e4SLinus Torvalds */ 21913033f14aSJosh Triplett long _do_fork(unsigned long clone_flags, 21921da177e4SLinus Torvalds unsigned long stack_start, 21931da177e4SLinus Torvalds unsigned long stack_size, 21941da177e4SLinus Torvalds int __user *parent_tidptr, 21953033f14aSJosh Triplett int __user *child_tidptr, 21963033f14aSJosh Triplett unsigned long tls) 21971da177e4SLinus Torvalds { 21989f5325aaSMarcos Paulo de Souza struct completion vfork; 21999f5325aaSMarcos Paulo de Souza struct pid *pid; 22001da177e4SLinus Torvalds struct task_struct *p; 22011da177e4SLinus Torvalds int trace = 0; 220292476d7fSEric W. Biederman long nr; 22031da177e4SLinus Torvalds 2204bdff746aSAndrew Morton /* 22054b9d33e6STejun Heo * Determine whether and which event to report to ptracer. When 22064b9d33e6STejun Heo * called from kernel_thread or CLONE_UNTRACED is explicitly 22074b9d33e6STejun Heo * requested, no event is reported; otherwise, report if the event 22084b9d33e6STejun Heo * for the type of forking is enabled. 220909a05394SRoland McGrath */ 2210e80d6661SAl Viro if (!(clone_flags & CLONE_UNTRACED)) { 22114b9d33e6STejun Heo if (clone_flags & CLONE_VFORK) 22124b9d33e6STejun Heo trace = PTRACE_EVENT_VFORK; 22134b9d33e6STejun Heo else if ((clone_flags & CSIGNAL) != SIGCHLD) 22144b9d33e6STejun Heo trace = PTRACE_EVENT_CLONE; 22154b9d33e6STejun Heo else 22164b9d33e6STejun Heo trace = PTRACE_EVENT_FORK; 22174b9d33e6STejun Heo 22184b9d33e6STejun Heo if (likely(!ptrace_event_enabled(current, trace))) 22194b9d33e6STejun Heo trace = 0; 22204b9d33e6STejun Heo } 22211da177e4SLinus Torvalds 222262e791c1SAl Viro p = copy_process(clone_flags, stack_start, stack_size, 2223725fc629SAndi Kleen child_tidptr, NULL, trace, tls, NUMA_NO_NODE); 222438addce8SEmese Revfy add_latent_entropy(); 22259f5325aaSMarcos Paulo de Souza 22269f5325aaSMarcos Paulo de Souza if (IS_ERR(p)) 22279f5325aaSMarcos Paulo de Souza return PTR_ERR(p); 22289f5325aaSMarcos Paulo de Souza 22291da177e4SLinus Torvalds /* 22301da177e4SLinus Torvalds * Do this prior waking up the new thread - the thread pointer 22311da177e4SLinus Torvalds * might get invalid after that point, if the thread exits quickly. 22321da177e4SLinus Torvalds */ 22330a16b607SMathieu Desnoyers trace_sched_process_fork(current, p); 22340a16b607SMathieu Desnoyers 22354e52365fSMatthew Dempsky pid = get_task_pid(p, PIDTYPE_PID); 22364e52365fSMatthew Dempsky nr = pid_vnr(pid); 223730e49c26SPavel Emelyanov 223830e49c26SPavel Emelyanov if (clone_flags & CLONE_PARENT_SETTID) 223930e49c26SPavel Emelyanov put_user(nr, parent_tidptr); 2240a6f5e063SSukadev Bhattiprolu 22411da177e4SLinus Torvalds if (clone_flags & CLONE_VFORK) { 22421da177e4SLinus Torvalds p->vfork_done = &vfork; 22431da177e4SLinus Torvalds init_completion(&vfork); 2244d68b46feSOleg Nesterov get_task_struct(p); 22451da177e4SLinus Torvalds } 22461da177e4SLinus Torvalds 22473e51e3edSSamir Bellabes wake_up_new_task(p); 22481da177e4SLinus Torvalds 22494b9d33e6STejun Heo /* forking complete and child started to run, tell ptracer */ 22504b9d33e6STejun Heo if (unlikely(trace)) 22514e52365fSMatthew Dempsky ptrace_event_pid(trace, pid); 225209a05394SRoland McGrath 22531da177e4SLinus Torvalds if (clone_flags & CLONE_VFORK) { 2254d68b46feSOleg Nesterov if (!wait_for_vfork_done(p, &vfork)) 22554e52365fSMatthew Dempsky ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); 22569f59ce5dSChuck Ebbert } 22574e52365fSMatthew Dempsky 22584e52365fSMatthew Dempsky put_pid(pid); 225992476d7fSEric W. Biederman return nr; 22601da177e4SLinus Torvalds } 22611da177e4SLinus Torvalds 22623033f14aSJosh Triplett #ifndef CONFIG_HAVE_COPY_THREAD_TLS 22633033f14aSJosh Triplett /* For compatibility with architectures that call do_fork directly rather than 22643033f14aSJosh Triplett * using the syscall entry points below. */ 22653033f14aSJosh Triplett long do_fork(unsigned long clone_flags, 22663033f14aSJosh Triplett unsigned long stack_start, 22673033f14aSJosh Triplett unsigned long stack_size, 22683033f14aSJosh Triplett int __user *parent_tidptr, 22693033f14aSJosh Triplett int __user *child_tidptr) 22703033f14aSJosh Triplett { 22713033f14aSJosh Triplett return _do_fork(clone_flags, stack_start, stack_size, 22723033f14aSJosh Triplett parent_tidptr, child_tidptr, 0); 22733033f14aSJosh Triplett } 22743033f14aSJosh Triplett #endif 22753033f14aSJosh Triplett 22762aa3a7f8SAl Viro /* 22772aa3a7f8SAl Viro * Create a kernel thread. 22782aa3a7f8SAl Viro */ 22792aa3a7f8SAl Viro pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 22802aa3a7f8SAl Viro { 22813033f14aSJosh Triplett return _do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn, 22823033f14aSJosh Triplett (unsigned long)arg, NULL, NULL, 0); 22832aa3a7f8SAl Viro } 22842aa3a7f8SAl Viro 2285d2125043SAl Viro #ifdef __ARCH_WANT_SYS_FORK 2286d2125043SAl Viro SYSCALL_DEFINE0(fork) 2287d2125043SAl Viro { 2288d2125043SAl Viro #ifdef CONFIG_MMU 22893033f14aSJosh Triplett return _do_fork(SIGCHLD, 0, 0, NULL, NULL, 0); 2290d2125043SAl Viro #else 2291d2125043SAl Viro /* can not support in nommu mode */ 22925d59e182SDaeseok Youn return -EINVAL; 2293d2125043SAl Viro #endif 2294d2125043SAl Viro } 2295d2125043SAl Viro #endif 2296d2125043SAl Viro 2297d2125043SAl Viro #ifdef __ARCH_WANT_SYS_VFORK 2298d2125043SAl Viro SYSCALL_DEFINE0(vfork) 2299d2125043SAl Viro { 23003033f14aSJosh Triplett return _do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0, 23013033f14aSJosh Triplett 0, NULL, NULL, 0); 2302d2125043SAl Viro } 2303d2125043SAl Viro #endif 2304d2125043SAl Viro 2305d2125043SAl Viro #ifdef __ARCH_WANT_SYS_CLONE 2306d2125043SAl Viro #ifdef CONFIG_CLONE_BACKWARDS 2307d2125043SAl Viro SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 2308d2125043SAl Viro int __user *, parent_tidptr, 23093033f14aSJosh Triplett unsigned long, tls, 2310d2125043SAl Viro int __user *, child_tidptr) 2311d2125043SAl Viro #elif defined(CONFIG_CLONE_BACKWARDS2) 2312d2125043SAl Viro SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, 2313d2125043SAl Viro int __user *, parent_tidptr, 2314d2125043SAl Viro int __user *, child_tidptr, 23153033f14aSJosh Triplett unsigned long, tls) 2316dfa9771aSMichal Simek #elif defined(CONFIG_CLONE_BACKWARDS3) 2317dfa9771aSMichal Simek SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, 2318dfa9771aSMichal Simek int, stack_size, 2319dfa9771aSMichal Simek int __user *, parent_tidptr, 2320dfa9771aSMichal Simek int __user *, child_tidptr, 23213033f14aSJosh Triplett unsigned long, tls) 2322d2125043SAl Viro #else 2323d2125043SAl Viro SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 2324d2125043SAl Viro int __user *, parent_tidptr, 2325d2125043SAl Viro int __user *, child_tidptr, 23263033f14aSJosh Triplett unsigned long, tls) 2327d2125043SAl Viro #endif 2328d2125043SAl Viro { 23293033f14aSJosh Triplett return _do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr, tls); 2330d2125043SAl Viro } 2331d2125043SAl Viro #endif 2332d2125043SAl Viro 23330f1b92cbSOleg Nesterov void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data) 23340f1b92cbSOleg Nesterov { 23350f1b92cbSOleg Nesterov struct task_struct *leader, *parent, *child; 23360f1b92cbSOleg Nesterov int res; 23370f1b92cbSOleg Nesterov 23380f1b92cbSOleg Nesterov read_lock(&tasklist_lock); 23390f1b92cbSOleg Nesterov leader = top = top->group_leader; 23400f1b92cbSOleg Nesterov down: 23410f1b92cbSOleg Nesterov for_each_thread(leader, parent) { 23420f1b92cbSOleg Nesterov list_for_each_entry(child, &parent->children, sibling) { 23430f1b92cbSOleg Nesterov res = visitor(child, data); 23440f1b92cbSOleg Nesterov if (res) { 23450f1b92cbSOleg Nesterov if (res < 0) 23460f1b92cbSOleg Nesterov goto out; 23470f1b92cbSOleg Nesterov leader = child; 23480f1b92cbSOleg Nesterov goto down; 23490f1b92cbSOleg Nesterov } 23500f1b92cbSOleg Nesterov up: 23510f1b92cbSOleg Nesterov ; 23520f1b92cbSOleg Nesterov } 23530f1b92cbSOleg Nesterov } 23540f1b92cbSOleg Nesterov 23550f1b92cbSOleg Nesterov if (leader != top) { 23560f1b92cbSOleg Nesterov child = leader; 23570f1b92cbSOleg Nesterov parent = child->real_parent; 23580f1b92cbSOleg Nesterov leader = parent->group_leader; 23590f1b92cbSOleg Nesterov goto up; 23600f1b92cbSOleg Nesterov } 23610f1b92cbSOleg Nesterov out: 23620f1b92cbSOleg Nesterov read_unlock(&tasklist_lock); 23630f1b92cbSOleg Nesterov } 23640f1b92cbSOleg Nesterov 23655fd63b30SRavikiran G Thirumalai #ifndef ARCH_MIN_MMSTRUCT_ALIGN 23665fd63b30SRavikiran G Thirumalai #define ARCH_MIN_MMSTRUCT_ALIGN 0 23675fd63b30SRavikiran G Thirumalai #endif 23685fd63b30SRavikiran G Thirumalai 236951cc5068SAlexey Dobriyan static void sighand_ctor(void *data) 2370aa1757f9SOleg Nesterov { 2371aa1757f9SOleg Nesterov struct sighand_struct *sighand = data; 2372aa1757f9SOleg Nesterov 2373aa1757f9SOleg Nesterov spin_lock_init(&sighand->siglock); 2374b8fceee1SDavide Libenzi init_waitqueue_head(&sighand->signalfd_wqh); 2375fba2afaaSDavide Libenzi } 2376aa1757f9SOleg Nesterov 23771da177e4SLinus Torvalds void __init proc_caches_init(void) 23781da177e4SLinus Torvalds { 2379c1a2f7f0SRik van Riel unsigned int mm_size; 2380c1a2f7f0SRik van Riel 23811da177e4SLinus Torvalds sighand_cachep = kmem_cache_create("sighand_cache", 23821da177e4SLinus Torvalds sizeof(struct sighand_struct), 0, 23835f0d5a3aSPaul E. McKenney SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| 238475f296d9SLevin, Alexander (Sasha Levin) SLAB_ACCOUNT, sighand_ctor); 23851da177e4SLinus Torvalds signal_cachep = kmem_cache_create("signal_cache", 23861da177e4SLinus Torvalds sizeof(struct signal_struct), 0, 238775f296d9SLevin, Alexander (Sasha Levin) SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 23885d097056SVladimir Davydov NULL); 23891da177e4SLinus Torvalds files_cachep = kmem_cache_create("files_cache", 23901da177e4SLinus Torvalds sizeof(struct files_struct), 0, 239175f296d9SLevin, Alexander (Sasha Levin) SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 23925d097056SVladimir Davydov NULL); 23931da177e4SLinus Torvalds fs_cachep = kmem_cache_create("fs_cache", 23941da177e4SLinus Torvalds sizeof(struct fs_struct), 0, 239575f296d9SLevin, Alexander (Sasha Levin) SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 23965d097056SVladimir Davydov NULL); 2397c1a2f7f0SRik van Riel 23986345d24dSLinus Torvalds /* 2399c1a2f7f0SRik van Riel * The mm_cpumask is located at the end of mm_struct, and is 2400c1a2f7f0SRik van Riel * dynamically sized based on the maximum CPU number this system 2401c1a2f7f0SRik van Riel * can have, taking hotplug into account (nr_cpu_ids). 24026345d24dSLinus Torvalds */ 2403c1a2f7f0SRik van Riel mm_size = sizeof(struct mm_struct) + cpumask_size(); 2404c1a2f7f0SRik van Riel 240507dcd7feSDavid Windsor mm_cachep = kmem_cache_create_usercopy("mm_struct", 2406c1a2f7f0SRik van Riel mm_size, ARCH_MIN_MMSTRUCT_ALIGN, 240775f296d9SLevin, Alexander (Sasha Levin) SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 240807dcd7feSDavid Windsor offsetof(struct mm_struct, saved_auxv), 240907dcd7feSDavid Windsor sizeof_field(struct mm_struct, saved_auxv), 24105d097056SVladimir Davydov NULL); 24115d097056SVladimir Davydov vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); 24128feae131SDavid Howells mmap_init(); 241366577193SAl Viro nsproxy_cache_init(); 24141da177e4SLinus Torvalds } 2415cf2e340fSJANAK DESAI 2416cf2e340fSJANAK DESAI /* 24179bfb23fcSOleg Nesterov * Check constraints on flags passed to the unshare system call. 2418cf2e340fSJANAK DESAI */ 24199bfb23fcSOleg Nesterov static int check_unshare_flags(unsigned long unshare_flags) 2420cf2e340fSJANAK DESAI { 24219bfb23fcSOleg Nesterov if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| 24229bfb23fcSOleg Nesterov CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| 242350804fe3SEric W. Biederman CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET| 2424a79a908fSAditya Kali CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP)) 2425cf2e340fSJANAK DESAI return -EINVAL; 24269bfb23fcSOleg Nesterov /* 242712c641abSEric W. Biederman * Not implemented, but pretend it works if there is nothing 242812c641abSEric W. Biederman * to unshare. Note that unsharing the address space or the 242912c641abSEric W. Biederman * signal handlers also need to unshare the signal queues (aka 243012c641abSEric W. Biederman * CLONE_THREAD). 24319bfb23fcSOleg Nesterov */ 24329bfb23fcSOleg Nesterov if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { 243312c641abSEric W. Biederman if (!thread_group_empty(current)) 243412c641abSEric W. Biederman return -EINVAL; 243512c641abSEric W. Biederman } 243612c641abSEric W. Biederman if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) { 243712c641abSEric W. Biederman if (atomic_read(¤t->sighand->count) > 1) 243812c641abSEric W. Biederman return -EINVAL; 243912c641abSEric W. Biederman } 244012c641abSEric W. Biederman if (unshare_flags & CLONE_VM) { 244112c641abSEric W. Biederman if (!current_is_single_threaded()) 24429bfb23fcSOleg Nesterov return -EINVAL; 24439bfb23fcSOleg Nesterov } 2444cf2e340fSJANAK DESAI 2445cf2e340fSJANAK DESAI return 0; 2446cf2e340fSJANAK DESAI } 2447cf2e340fSJANAK DESAI 2448cf2e340fSJANAK DESAI /* 244999d1419dSJANAK DESAI * Unshare the filesystem structure if it is being shared 2450cf2e340fSJANAK DESAI */ 2451cf2e340fSJANAK DESAI static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) 2452cf2e340fSJANAK DESAI { 2453cf2e340fSJANAK DESAI struct fs_struct *fs = current->fs; 2454cf2e340fSJANAK DESAI 2455498052bbSAl Viro if (!(unshare_flags & CLONE_FS) || !fs) 2456498052bbSAl Viro return 0; 2457498052bbSAl Viro 2458498052bbSAl Viro /* don't need lock here; in the worst case we'll do useless copy */ 2459498052bbSAl Viro if (fs->users == 1) 2460498052bbSAl Viro return 0; 2461498052bbSAl Viro 2462498052bbSAl Viro *new_fsp = copy_fs_struct(fs); 246399d1419dSJANAK DESAI if (!*new_fsp) 246499d1419dSJANAK DESAI return -ENOMEM; 2465cf2e340fSJANAK DESAI 2466cf2e340fSJANAK DESAI return 0; 2467cf2e340fSJANAK DESAI } 2468cf2e340fSJANAK DESAI 2469cf2e340fSJANAK DESAI /* 2470a016f338SJANAK DESAI * Unshare file descriptor table if it is being shared 2471cf2e340fSJANAK DESAI */ 2472cf2e340fSJANAK DESAI static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) 2473cf2e340fSJANAK DESAI { 2474cf2e340fSJANAK DESAI struct files_struct *fd = current->files; 2475a016f338SJANAK DESAI int error = 0; 2476cf2e340fSJANAK DESAI 2477cf2e340fSJANAK DESAI if ((unshare_flags & CLONE_FILES) && 2478a016f338SJANAK DESAI (fd && atomic_read(&fd->count) > 1)) { 2479a016f338SJANAK DESAI *new_fdp = dup_fd(fd, &error); 2480a016f338SJANAK DESAI if (!*new_fdp) 2481a016f338SJANAK DESAI return error; 2482a016f338SJANAK DESAI } 2483cf2e340fSJANAK DESAI 2484cf2e340fSJANAK DESAI return 0; 2485cf2e340fSJANAK DESAI } 2486cf2e340fSJANAK DESAI 2487cf2e340fSJANAK DESAI /* 2488cf2e340fSJANAK DESAI * unshare allows a process to 'unshare' part of the process 2489cf2e340fSJANAK DESAI * context which was originally shared using clone. copy_* 2490cf2e340fSJANAK DESAI * functions used by do_fork() cannot be used here directly 2491cf2e340fSJANAK DESAI * because they modify an inactive task_struct that is being 2492cf2e340fSJANAK DESAI * constructed. Here we are modifying the current, active, 2493cf2e340fSJANAK DESAI * task_struct. 2494cf2e340fSJANAK DESAI */ 24959b32105eSDominik Brodowski int ksys_unshare(unsigned long unshare_flags) 2496cf2e340fSJANAK DESAI { 2497cf2e340fSJANAK DESAI struct fs_struct *fs, *new_fs = NULL; 2498cf2e340fSJANAK DESAI struct files_struct *fd, *new_fd = NULL; 2499b2e0d987SEric W. Biederman struct cred *new_cred = NULL; 2500cf7b708cSPavel Emelyanov struct nsproxy *new_nsproxy = NULL; 25019edff4abSManfred Spraul int do_sysvsem = 0; 25029bfb23fcSOleg Nesterov int err; 2503cf2e340fSJANAK DESAI 250450804fe3SEric W. Biederman /* 2505faf00da5SEric W. Biederman * If unsharing a user namespace must also unshare the thread group 2506faf00da5SEric W. Biederman * and unshare the filesystem root and working directories. 2507b2e0d987SEric W. Biederman */ 2508b2e0d987SEric W. Biederman if (unshare_flags & CLONE_NEWUSER) 2509e66eded8SEric W. Biederman unshare_flags |= CLONE_THREAD | CLONE_FS; 2510b2e0d987SEric W. Biederman /* 251150804fe3SEric W. Biederman * If unsharing vm, must also unshare signal handlers. 251250804fe3SEric W. Biederman */ 251350804fe3SEric W. Biederman if (unshare_flags & CLONE_VM) 251450804fe3SEric W. Biederman unshare_flags |= CLONE_SIGHAND; 25156013f67fSManfred Spraul /* 251612c641abSEric W. Biederman * If unsharing a signal handlers, must also unshare the signal queues. 251712c641abSEric W. Biederman */ 251812c641abSEric W. Biederman if (unshare_flags & CLONE_SIGHAND) 251912c641abSEric W. Biederman unshare_flags |= CLONE_THREAD; 252012c641abSEric W. Biederman /* 25219bfb23fcSOleg Nesterov * If unsharing namespace, must also unshare filesystem information. 25229bfb23fcSOleg Nesterov */ 25239bfb23fcSOleg Nesterov if (unshare_flags & CLONE_NEWNS) 25249bfb23fcSOleg Nesterov unshare_flags |= CLONE_FS; 252550804fe3SEric W. Biederman 252650804fe3SEric W. Biederman err = check_unshare_flags(unshare_flags); 252750804fe3SEric W. Biederman if (err) 252850804fe3SEric W. Biederman goto bad_unshare_out; 25299bfb23fcSOleg Nesterov /* 25306013f67fSManfred Spraul * CLONE_NEWIPC must also detach from the undolist: after switching 25316013f67fSManfred Spraul * to a new ipc namespace, the semaphore arrays from the old 25326013f67fSManfred Spraul * namespace are unreachable. 25336013f67fSManfred Spraul */ 25346013f67fSManfred Spraul if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) 25359edff4abSManfred Spraul do_sysvsem = 1; 2536fb0a685cSDaniel Rebelo de Oliveira err = unshare_fs(unshare_flags, &new_fs); 2537fb0a685cSDaniel Rebelo de Oliveira if (err) 25389bfb23fcSOleg Nesterov goto bad_unshare_out; 2539fb0a685cSDaniel Rebelo de Oliveira err = unshare_fd(unshare_flags, &new_fd); 2540fb0a685cSDaniel Rebelo de Oliveira if (err) 25419bfb23fcSOleg Nesterov goto bad_unshare_cleanup_fs; 2542b2e0d987SEric W. Biederman err = unshare_userns(unshare_flags, &new_cred); 2543fb0a685cSDaniel Rebelo de Oliveira if (err) 25449edff4abSManfred Spraul goto bad_unshare_cleanup_fd; 2545b2e0d987SEric W. Biederman err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, 2546b2e0d987SEric W. Biederman new_cred, new_fs); 2547b2e0d987SEric W. Biederman if (err) 2548b2e0d987SEric W. Biederman goto bad_unshare_cleanup_cred; 2549cf2e340fSJANAK DESAI 2550b2e0d987SEric W. Biederman if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) { 25519edff4abSManfred Spraul if (do_sysvsem) { 25529edff4abSManfred Spraul /* 25539edff4abSManfred Spraul * CLONE_SYSVSEM is equivalent to sys_exit(). 25549edff4abSManfred Spraul */ 25559edff4abSManfred Spraul exit_sem(current); 25569edff4abSManfred Spraul } 2557ab602f79SJack Miller if (unshare_flags & CLONE_NEWIPC) { 2558ab602f79SJack Miller /* Orphan segments in old ns (see sem above). */ 2559ab602f79SJack Miller exit_shm(current); 2560ab602f79SJack Miller shm_init_task(current); 2561ab602f79SJack Miller } 2562ab516013SSerge E. Hallyn 25636f977e6bSAlan Cox if (new_nsproxy) 2564cf7b708cSPavel Emelyanov switch_task_namespaces(current, new_nsproxy); 2565cf2e340fSJANAK DESAI 2566cf7b708cSPavel Emelyanov task_lock(current); 2567cf7b708cSPavel Emelyanov 2568cf2e340fSJANAK DESAI if (new_fs) { 2569cf2e340fSJANAK DESAI fs = current->fs; 25702a4419b5SNick Piggin spin_lock(&fs->lock); 2571cf2e340fSJANAK DESAI current->fs = new_fs; 2572498052bbSAl Viro if (--fs->users) 2573498052bbSAl Viro new_fs = NULL; 2574498052bbSAl Viro else 2575cf2e340fSJANAK DESAI new_fs = fs; 25762a4419b5SNick Piggin spin_unlock(&fs->lock); 2577cf2e340fSJANAK DESAI } 2578cf2e340fSJANAK DESAI 2579cf2e340fSJANAK DESAI if (new_fd) { 2580cf2e340fSJANAK DESAI fd = current->files; 2581cf2e340fSJANAK DESAI current->files = new_fd; 2582cf2e340fSJANAK DESAI new_fd = fd; 2583cf2e340fSJANAK DESAI } 2584cf2e340fSJANAK DESAI 2585cf2e340fSJANAK DESAI task_unlock(current); 2586b2e0d987SEric W. Biederman 2587b2e0d987SEric W. Biederman if (new_cred) { 2588b2e0d987SEric W. Biederman /* Install the new user namespace */ 2589b2e0d987SEric W. Biederman commit_creds(new_cred); 2590b2e0d987SEric W. Biederman new_cred = NULL; 2591b2e0d987SEric W. Biederman } 2592cf2e340fSJANAK DESAI } 2593cf2e340fSJANAK DESAI 2594e4222673SHari Bathini perf_event_namespaces(current); 2595e4222673SHari Bathini 2596b2e0d987SEric W. Biederman bad_unshare_cleanup_cred: 2597b2e0d987SEric W. Biederman if (new_cred) 2598b2e0d987SEric W. Biederman put_cred(new_cred); 2599cf2e340fSJANAK DESAI bad_unshare_cleanup_fd: 2600cf2e340fSJANAK DESAI if (new_fd) 2601cf2e340fSJANAK DESAI put_files_struct(new_fd); 2602cf2e340fSJANAK DESAI 2603cf2e340fSJANAK DESAI bad_unshare_cleanup_fs: 2604cf2e340fSJANAK DESAI if (new_fs) 2605498052bbSAl Viro free_fs_struct(new_fs); 2606cf2e340fSJANAK DESAI 2607cf2e340fSJANAK DESAI bad_unshare_out: 2608cf2e340fSJANAK DESAI return err; 2609cf2e340fSJANAK DESAI } 26103b125388SAl Viro 26119b32105eSDominik Brodowski SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) 26129b32105eSDominik Brodowski { 26139b32105eSDominik Brodowski return ksys_unshare(unshare_flags); 26149b32105eSDominik Brodowski } 26159b32105eSDominik Brodowski 26163b125388SAl Viro /* 26173b125388SAl Viro * Helper to unshare the files of the current task. 26183b125388SAl Viro * We don't want to expose copy_files internals to 26193b125388SAl Viro * the exec layer of the kernel. 26203b125388SAl Viro */ 26213b125388SAl Viro 26223b125388SAl Viro int unshare_files(struct files_struct **displaced) 26233b125388SAl Viro { 26243b125388SAl Viro struct task_struct *task = current; 262550704516SAl Viro struct files_struct *copy = NULL; 26263b125388SAl Viro int error; 26273b125388SAl Viro 26283b125388SAl Viro error = unshare_fd(CLONE_FILES, ©); 26293b125388SAl Viro if (error || !copy) { 26303b125388SAl Viro *displaced = NULL; 26313b125388SAl Viro return error; 26323b125388SAl Viro } 26333b125388SAl Viro *displaced = task->files; 26343b125388SAl Viro task_lock(task); 26353b125388SAl Viro task->files = copy; 26363b125388SAl Viro task_unlock(task); 26373b125388SAl Viro return 0; 26383b125388SAl Viro } 263916db3d3fSHeinrich Schuchardt 264016db3d3fSHeinrich Schuchardt int sysctl_max_threads(struct ctl_table *table, int write, 264116db3d3fSHeinrich Schuchardt void __user *buffer, size_t *lenp, loff_t *ppos) 264216db3d3fSHeinrich Schuchardt { 264316db3d3fSHeinrich Schuchardt struct ctl_table t; 264416db3d3fSHeinrich Schuchardt int ret; 264516db3d3fSHeinrich Schuchardt int threads = max_threads; 264616db3d3fSHeinrich Schuchardt int min = MIN_THREADS; 264716db3d3fSHeinrich Schuchardt int max = MAX_THREADS; 264816db3d3fSHeinrich Schuchardt 264916db3d3fSHeinrich Schuchardt t = *table; 265016db3d3fSHeinrich Schuchardt t.data = &threads; 265116db3d3fSHeinrich Schuchardt t.extra1 = &min; 265216db3d3fSHeinrich Schuchardt t.extra2 = &max; 265316db3d3fSHeinrich Schuchardt 265416db3d3fSHeinrich Schuchardt ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 265516db3d3fSHeinrich Schuchardt if (ret || !write) 265616db3d3fSHeinrich Schuchardt return ret; 265716db3d3fSHeinrich Schuchardt 265816db3d3fSHeinrich Schuchardt set_max_threads(threads); 265916db3d3fSHeinrich Schuchardt 266016db3d3fSHeinrich Schuchardt return 0; 266116db3d3fSHeinrich Schuchardt } 2662