11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/fork.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1991, 1992 Linus Torvalds 51da177e4SLinus Torvalds */ 61da177e4SLinus Torvalds 71da177e4SLinus Torvalds /* 81da177e4SLinus Torvalds * 'fork.c' contains the help-routines for the 'fork' system call 91da177e4SLinus Torvalds * (see also entry.S and others). 101da177e4SLinus Torvalds * Fork is rather simple, once you get the hang of it, but the memory 111da177e4SLinus Torvalds * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' 121da177e4SLinus Torvalds */ 131da177e4SLinus Torvalds 141da177e4SLinus Torvalds #include <linux/slab.h> 151da177e4SLinus Torvalds #include <linux/init.h> 161da177e4SLinus Torvalds #include <linux/unistd.h> 171da177e4SLinus Torvalds #include <linux/module.h> 181da177e4SLinus Torvalds #include <linux/vmalloc.h> 191da177e4SLinus Torvalds #include <linux/completion.h> 201da177e4SLinus Torvalds #include <linux/personality.h> 211da177e4SLinus Torvalds #include <linux/mempolicy.h> 221da177e4SLinus Torvalds #include <linux/sem.h> 231da177e4SLinus Torvalds #include <linux/file.h> 249f3acc31SAl Viro #include <linux/fdtable.h> 25da9cbc87SJens Axboe #include <linux/iocontext.h> 261da177e4SLinus Torvalds #include <linux/key.h> 271da177e4SLinus Torvalds #include <linux/binfmts.h> 281da177e4SLinus Torvalds #include <linux/mman.h> 29cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 301da177e4SLinus Torvalds #include <linux/fs.h> 31615d6e87SDavidlohr Bueso #include <linux/mm.h> 32615d6e87SDavidlohr Bueso #include <linux/vmacache.h> 33ab516013SSerge E. Hallyn #include <linux/nsproxy.h> 34c59ede7bSRandy.Dunlap #include <linux/capability.h> 351da177e4SLinus Torvalds #include <linux/cpu.h> 36b4f48b63SPaul Menage #include <linux/cgroup.h> 371da177e4SLinus Torvalds #include <linux/security.h> 38a1e78772SMel Gorman #include <linux/hugetlb.h> 39e2cfabdfSWill Drewry #include <linux/seccomp.h> 401da177e4SLinus Torvalds #include <linux/swap.h> 411da177e4SLinus Torvalds #include <linux/syscalls.h> 421da177e4SLinus Torvalds #include <linux/jiffies.h> 431da177e4SLinus Torvalds #include <linux/futex.h> 448141c7f3SLinus Torvalds #include <linux/compat.h> 45207205a2SEric Dumazet #include <linux/kthread.h> 467c3ab738SAndrew Morton #include <linux/task_io_accounting_ops.h> 47ab2af1f5SDipankar Sarma #include <linux/rcupdate.h> 481da177e4SLinus Torvalds #include <linux/ptrace.h> 491da177e4SLinus Torvalds #include <linux/mount.h> 501da177e4SLinus Torvalds #include <linux/audit.h> 5178fb7466SPavel Emelianov #include <linux/memcontrol.h> 52f201ae23SFrederic Weisbecker #include <linux/ftrace.h> 535e2bf014SMike Galbraith #include <linux/proc_fs.h> 541da177e4SLinus Torvalds #include <linux/profile.h> 551da177e4SLinus Torvalds #include <linux/rmap.h> 56f8af4da3SHugh Dickins #include <linux/ksm.h> 571da177e4SLinus Torvalds #include <linux/acct.h> 58893e26e6SPavel Emelyanov #include <linux/userfaultfd_k.h> 598f0ab514SJay Lan #include <linux/tsacct_kern.h> 609f46080cSMatt Helsley #include <linux/cn_proc.h> 61ba96a0c8SRafael J. Wysocki #include <linux/freezer.h> 62ca74e92bSShailabh Nagar #include <linux/delayacct.h> 63ad4ecbcbSShailabh Nagar #include <linux/taskstats_kern.h> 640a425405SArjan van de Ven #include <linux/random.h> 65522ed776SMiloslav Trmac #include <linux/tty.h> 66fd0928dfSJens Axboe #include <linux/blkdev.h> 675ad4e53bSAl Viro #include <linux/fs_struct.h> 687c9f8861SEric Sandeen #include <linux/magic.h> 69cdd6c482SIngo Molnar #include <linux/perf_event.h> 7042c4ab41SStanislaw Gruszka #include <linux/posix-timers.h> 718e7cac79SAvi Kivity #include <linux/user-return-notifier.h> 723d5992d2SYing Han #include <linux/oom.h> 73ba76149fSAndrea Arcangeli #include <linux/khugepaged.h> 74d80e731eSOleg Nesterov #include <linux/signalfd.h> 750326f5a9SSrikar Dronamraju #include <linux/uprobes.h> 76a27bb332SKent Overstreet #include <linux/aio.h> 7752f5684cSGideon Israel Dsouza #include <linux/compiler.h> 7816db3d3fSHeinrich Schuchardt #include <linux/sysctl.h> 795c9a8750SDmitry Vyukov #include <linux/kcov.h> 801da177e4SLinus Torvalds 811da177e4SLinus Torvalds #include <asm/pgtable.h> 821da177e4SLinus Torvalds #include <asm/pgalloc.h> 837c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 841da177e4SLinus Torvalds #include <asm/mmu_context.h> 851da177e4SLinus Torvalds #include <asm/cacheflush.h> 861da177e4SLinus Torvalds #include <asm/tlbflush.h> 871da177e4SLinus Torvalds 88ad8d75ffSSteven Rostedt #include <trace/events/sched.h> 89ad8d75ffSSteven Rostedt 9043d2b113SKAMEZAWA Hiroyuki #define CREATE_TRACE_POINTS 9143d2b113SKAMEZAWA Hiroyuki #include <trace/events/task.h> 9243d2b113SKAMEZAWA Hiroyuki 931da177e4SLinus Torvalds /* 94ac1b398dSHeinrich Schuchardt * Minimum number of threads to boot the kernel 95ac1b398dSHeinrich Schuchardt */ 96ac1b398dSHeinrich Schuchardt #define MIN_THREADS 20 97ac1b398dSHeinrich Schuchardt 98ac1b398dSHeinrich Schuchardt /* 99ac1b398dSHeinrich Schuchardt * Maximum number of threads 100ac1b398dSHeinrich Schuchardt */ 101ac1b398dSHeinrich Schuchardt #define MAX_THREADS FUTEX_TID_MASK 102ac1b398dSHeinrich Schuchardt 103ac1b398dSHeinrich Schuchardt /* 1041da177e4SLinus Torvalds * Protected counters by write_lock_irq(&tasklist_lock) 1051da177e4SLinus Torvalds */ 1061da177e4SLinus Torvalds unsigned long total_forks; /* Handle normal Linux uptimes. */ 1071da177e4SLinus Torvalds int nr_threads; /* The idle threads do not count.. */ 1081da177e4SLinus Torvalds 1091da177e4SLinus Torvalds int max_threads; /* tunable limit on nr_threads */ 1101da177e4SLinus Torvalds 1111da177e4SLinus Torvalds DEFINE_PER_CPU(unsigned long, process_counts) = 0; 1121da177e4SLinus Torvalds 1131da177e4SLinus Torvalds __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 114db1466b3SPaul E. McKenney 115db1466b3SPaul E. McKenney #ifdef CONFIG_PROVE_RCU 116db1466b3SPaul E. McKenney int lockdep_tasklist_lock_is_held(void) 117db1466b3SPaul E. McKenney { 118db1466b3SPaul E. McKenney return lockdep_is_held(&tasklist_lock); 119db1466b3SPaul E. McKenney } 120db1466b3SPaul E. McKenney EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); 121db1466b3SPaul E. McKenney #endif /* #ifdef CONFIG_PROVE_RCU */ 1221da177e4SLinus Torvalds 1231da177e4SLinus Torvalds int nr_processes(void) 1241da177e4SLinus Torvalds { 1251da177e4SLinus Torvalds int cpu; 1261da177e4SLinus Torvalds int total = 0; 1271da177e4SLinus Torvalds 1281d510750SIan Campbell for_each_possible_cpu(cpu) 1291da177e4SLinus Torvalds total += per_cpu(process_counts, cpu); 1301da177e4SLinus Torvalds 1311da177e4SLinus Torvalds return total; 1321da177e4SLinus Torvalds } 1331da177e4SLinus Torvalds 134f19b9f74SAkinobu Mita void __weak arch_release_task_struct(struct task_struct *tsk) 135f19b9f74SAkinobu Mita { 136f19b9f74SAkinobu Mita } 137f19b9f74SAkinobu Mita 138f5e10287SThomas Gleixner #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 139e18b890bSChristoph Lameter static struct kmem_cache *task_struct_cachep; 14041101809SThomas Gleixner 14141101809SThomas Gleixner static inline struct task_struct *alloc_task_struct_node(int node) 14241101809SThomas Gleixner { 14341101809SThomas Gleixner return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); 14441101809SThomas Gleixner } 14541101809SThomas Gleixner 14641101809SThomas Gleixner static inline void free_task_struct(struct task_struct *tsk) 14741101809SThomas Gleixner { 14841101809SThomas Gleixner kmem_cache_free(task_struct_cachep, tsk); 14941101809SThomas Gleixner } 1501da177e4SLinus Torvalds #endif 1511da177e4SLinus Torvalds 152b235beeaSLinus Torvalds void __weak arch_release_thread_stack(unsigned long *stack) 153f19b9f74SAkinobu Mita { 154f19b9f74SAkinobu Mita } 155f19b9f74SAkinobu Mita 156b235beeaSLinus Torvalds #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR 15741101809SThomas Gleixner 1580d15d74aSThomas Gleixner /* 1590d15d74aSThomas Gleixner * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a 1600d15d74aSThomas Gleixner * kmemcache based allocator. 1610d15d74aSThomas Gleixner */ 162ba14a194SAndy Lutomirski # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) 163ac496bf4SAndy Lutomirski 164ac496bf4SAndy Lutomirski #ifdef CONFIG_VMAP_STACK 165ac496bf4SAndy Lutomirski /* 166ac496bf4SAndy Lutomirski * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB 167ac496bf4SAndy Lutomirski * flush. Try to minimize the number of calls by caching stacks. 168ac496bf4SAndy Lutomirski */ 169ac496bf4SAndy Lutomirski #define NR_CACHED_STACKS 2 170ac496bf4SAndy Lutomirski static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]); 171ac496bf4SAndy Lutomirski #endif 172ac496bf4SAndy Lutomirski 173ba14a194SAndy Lutomirski static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) 174b69c49b7SFUJITA Tomonori { 175ba14a194SAndy Lutomirski #ifdef CONFIG_VMAP_STACK 176ac496bf4SAndy Lutomirski void *stack; 177ac496bf4SAndy Lutomirski int i; 178ac496bf4SAndy Lutomirski 179ac496bf4SAndy Lutomirski local_irq_disable(); 180ac496bf4SAndy Lutomirski for (i = 0; i < NR_CACHED_STACKS; i++) { 181ac496bf4SAndy Lutomirski struct vm_struct *s = this_cpu_read(cached_stacks[i]); 182ac496bf4SAndy Lutomirski 183ac496bf4SAndy Lutomirski if (!s) 184ac496bf4SAndy Lutomirski continue; 185ac496bf4SAndy Lutomirski this_cpu_write(cached_stacks[i], NULL); 186ac496bf4SAndy Lutomirski 187ac496bf4SAndy Lutomirski tsk->stack_vm_area = s; 188ac496bf4SAndy Lutomirski local_irq_enable(); 189ac496bf4SAndy Lutomirski return s->addr; 190ac496bf4SAndy Lutomirski } 191ac496bf4SAndy Lutomirski local_irq_enable(); 192ac496bf4SAndy Lutomirski 193ac496bf4SAndy Lutomirski stack = __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE, 194ba14a194SAndy Lutomirski VMALLOC_START, VMALLOC_END, 195ba14a194SAndy Lutomirski THREADINFO_GFP | __GFP_HIGHMEM, 196ba14a194SAndy Lutomirski PAGE_KERNEL, 197ac496bf4SAndy Lutomirski 0, node, __builtin_return_address(0)); 198ba14a194SAndy Lutomirski 199ba14a194SAndy Lutomirski /* 200ba14a194SAndy Lutomirski * We can't call find_vm_area() in interrupt context, and 201ba14a194SAndy Lutomirski * free_thread_stack() can be called in interrupt context, 202ba14a194SAndy Lutomirski * so cache the vm_struct. 203ba14a194SAndy Lutomirski */ 204ba14a194SAndy Lutomirski if (stack) 205ba14a194SAndy Lutomirski tsk->stack_vm_area = find_vm_area(stack); 206ba14a194SAndy Lutomirski return stack; 207ba14a194SAndy Lutomirski #else 2084949148aSVladimir Davydov struct page *page = alloc_pages_node(node, THREADINFO_GFP, 2092889f608SThomas Gleixner THREAD_SIZE_ORDER); 210b6a84016SEric Dumazet 211b6a84016SEric Dumazet return page ? page_address(page) : NULL; 212ba14a194SAndy Lutomirski #endif 213b69c49b7SFUJITA Tomonori } 214b69c49b7SFUJITA Tomonori 215ba14a194SAndy Lutomirski static inline void free_thread_stack(struct task_struct *tsk) 216b69c49b7SFUJITA Tomonori { 217ac496bf4SAndy Lutomirski #ifdef CONFIG_VMAP_STACK 218ac496bf4SAndy Lutomirski if (task_stack_vm_area(tsk)) { 219ac496bf4SAndy Lutomirski unsigned long flags; 220ac496bf4SAndy Lutomirski int i; 221ac496bf4SAndy Lutomirski 222ac496bf4SAndy Lutomirski local_irq_save(flags); 223ac496bf4SAndy Lutomirski for (i = 0; i < NR_CACHED_STACKS; i++) { 224ac496bf4SAndy Lutomirski if (this_cpu_read(cached_stacks[i])) 225ac496bf4SAndy Lutomirski continue; 226ac496bf4SAndy Lutomirski 227ac496bf4SAndy Lutomirski this_cpu_write(cached_stacks[i], tsk->stack_vm_area); 228ac496bf4SAndy Lutomirski local_irq_restore(flags); 229ac496bf4SAndy Lutomirski return; 230ac496bf4SAndy Lutomirski } 231ac496bf4SAndy Lutomirski local_irq_restore(flags); 232ac496bf4SAndy Lutomirski 2330f110a9bSAndrey Ryabinin vfree_atomic(tsk->stack); 234ac496bf4SAndy Lutomirski return; 235ac496bf4SAndy Lutomirski } 236ac496bf4SAndy Lutomirski #endif 237ac496bf4SAndy Lutomirski 238ba14a194SAndy Lutomirski __free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER); 239b69c49b7SFUJITA Tomonori } 2400d15d74aSThomas Gleixner # else 241b235beeaSLinus Torvalds static struct kmem_cache *thread_stack_cache; 2420d15d74aSThomas Gleixner 2439521d399SMichael Ellerman static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, 2440d15d74aSThomas Gleixner int node) 2450d15d74aSThomas Gleixner { 246b235beeaSLinus Torvalds return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); 2470d15d74aSThomas Gleixner } 2480d15d74aSThomas Gleixner 249ba14a194SAndy Lutomirski static void free_thread_stack(struct task_struct *tsk) 2500d15d74aSThomas Gleixner { 251ba14a194SAndy Lutomirski kmem_cache_free(thread_stack_cache, tsk->stack); 2520d15d74aSThomas Gleixner } 2530d15d74aSThomas Gleixner 254b235beeaSLinus Torvalds void thread_stack_cache_init(void) 2550d15d74aSThomas Gleixner { 256b235beeaSLinus Torvalds thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE, 2570d15d74aSThomas Gleixner THREAD_SIZE, 0, NULL); 258b235beeaSLinus Torvalds BUG_ON(thread_stack_cache == NULL); 2590d15d74aSThomas Gleixner } 2600d15d74aSThomas Gleixner # endif 261b69c49b7SFUJITA Tomonori #endif 262b69c49b7SFUJITA Tomonori 2631da177e4SLinus Torvalds /* SLAB cache for signal_struct structures (tsk->signal) */ 264e18b890bSChristoph Lameter static struct kmem_cache *signal_cachep; 2651da177e4SLinus Torvalds 2661da177e4SLinus Torvalds /* SLAB cache for sighand_struct structures (tsk->sighand) */ 267e18b890bSChristoph Lameter struct kmem_cache *sighand_cachep; 2681da177e4SLinus Torvalds 2691da177e4SLinus Torvalds /* SLAB cache for files_struct structures (tsk->files) */ 270e18b890bSChristoph Lameter struct kmem_cache *files_cachep; 2711da177e4SLinus Torvalds 2721da177e4SLinus Torvalds /* SLAB cache for fs_struct structures (tsk->fs) */ 273e18b890bSChristoph Lameter struct kmem_cache *fs_cachep; 2741da177e4SLinus Torvalds 2751da177e4SLinus Torvalds /* SLAB cache for vm_area_struct structures */ 276e18b890bSChristoph Lameter struct kmem_cache *vm_area_cachep; 2771da177e4SLinus Torvalds 2781da177e4SLinus Torvalds /* SLAB cache for mm_struct structures (tsk->mm) */ 279e18b890bSChristoph Lameter static struct kmem_cache *mm_cachep; 2801da177e4SLinus Torvalds 281ba14a194SAndy Lutomirski static void account_kernel_stack(struct task_struct *tsk, int account) 282c6a7f572SKOSAKI Motohiro { 283ba14a194SAndy Lutomirski void *stack = task_stack_page(tsk); 284ba14a194SAndy Lutomirski struct vm_struct *vm = task_stack_vm_area(tsk); 285ba14a194SAndy Lutomirski 286ba14a194SAndy Lutomirski BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0); 287ba14a194SAndy Lutomirski 288ba14a194SAndy Lutomirski if (vm) { 289ba14a194SAndy Lutomirski int i; 290ba14a194SAndy Lutomirski 291ba14a194SAndy Lutomirski BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE); 292ba14a194SAndy Lutomirski 293ba14a194SAndy Lutomirski for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { 294ba14a194SAndy Lutomirski mod_zone_page_state(page_zone(vm->pages[i]), 295ba14a194SAndy Lutomirski NR_KERNEL_STACK_KB, 296ba14a194SAndy Lutomirski PAGE_SIZE / 1024 * account); 297ba14a194SAndy Lutomirski } 298ba14a194SAndy Lutomirski 299ba14a194SAndy Lutomirski /* All stack pages belong to the same memcg. */ 300ba14a194SAndy Lutomirski memcg_kmem_update_page_stat(vm->pages[0], MEMCG_KERNEL_STACK_KB, 301ba14a194SAndy Lutomirski account * (THREAD_SIZE / 1024)); 302ba14a194SAndy Lutomirski } else { 303ba14a194SAndy Lutomirski /* 304ba14a194SAndy Lutomirski * All stack pages are in the same zone and belong to the 305ba14a194SAndy Lutomirski * same memcg. 306ba14a194SAndy Lutomirski */ 307efdc9490SAndy Lutomirski struct page *first_page = virt_to_page(stack); 308c6a7f572SKOSAKI Motohiro 309efdc9490SAndy Lutomirski mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB, 310d30dd8beSAndy Lutomirski THREAD_SIZE / 1024 * account); 311efdc9490SAndy Lutomirski 312ba14a194SAndy Lutomirski memcg_kmem_update_page_stat(first_page, MEMCG_KERNEL_STACK_KB, 313efdc9490SAndy Lutomirski account * (THREAD_SIZE / 1024)); 314c6a7f572SKOSAKI Motohiro } 315ba14a194SAndy Lutomirski } 316c6a7f572SKOSAKI Motohiro 31768f24b08SAndy Lutomirski static void release_task_stack(struct task_struct *tsk) 3181da177e4SLinus Torvalds { 319405c0759SAndy Lutomirski if (WARN_ON(tsk->state != TASK_DEAD)) 320405c0759SAndy Lutomirski return; /* Better to leak the stack than to free prematurely */ 321405c0759SAndy Lutomirski 322ba14a194SAndy Lutomirski account_kernel_stack(tsk, -1); 323b235beeaSLinus Torvalds arch_release_thread_stack(tsk->stack); 324ba14a194SAndy Lutomirski free_thread_stack(tsk); 32568f24b08SAndy Lutomirski tsk->stack = NULL; 32668f24b08SAndy Lutomirski #ifdef CONFIG_VMAP_STACK 32768f24b08SAndy Lutomirski tsk->stack_vm_area = NULL; 32868f24b08SAndy Lutomirski #endif 32968f24b08SAndy Lutomirski } 33068f24b08SAndy Lutomirski 33168f24b08SAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 33268f24b08SAndy Lutomirski void put_task_stack(struct task_struct *tsk) 33368f24b08SAndy Lutomirski { 33468f24b08SAndy Lutomirski if (atomic_dec_and_test(&tsk->stack_refcount)) 33568f24b08SAndy Lutomirski release_task_stack(tsk); 33668f24b08SAndy Lutomirski } 33768f24b08SAndy Lutomirski #endif 33868f24b08SAndy Lutomirski 33968f24b08SAndy Lutomirski void free_task(struct task_struct *tsk) 34068f24b08SAndy Lutomirski { 34168f24b08SAndy Lutomirski #ifndef CONFIG_THREAD_INFO_IN_TASK 34268f24b08SAndy Lutomirski /* 34368f24b08SAndy Lutomirski * The task is finally done with both the stack and thread_info, 34468f24b08SAndy Lutomirski * so free both. 34568f24b08SAndy Lutomirski */ 34668f24b08SAndy Lutomirski release_task_stack(tsk); 34768f24b08SAndy Lutomirski #else 34868f24b08SAndy Lutomirski /* 34968f24b08SAndy Lutomirski * If the task had a separate stack allocation, it should be gone 35068f24b08SAndy Lutomirski * by now. 35168f24b08SAndy Lutomirski */ 35268f24b08SAndy Lutomirski WARN_ON_ONCE(atomic_read(&tsk->stack_refcount) != 0); 35368f24b08SAndy Lutomirski #endif 35423f78d4aSIngo Molnar rt_mutex_debug_task_free(tsk); 355fb52607aSFrederic Weisbecker ftrace_graph_exit_task(tsk); 356e2cfabdfSWill Drewry put_seccomp_filter(tsk); 357f19b9f74SAkinobu Mita arch_release_task_struct(tsk); 3581da5c46fSOleg Nesterov if (tsk->flags & PF_KTHREAD) 3591da5c46fSOleg Nesterov free_kthread_struct(tsk); 3601da177e4SLinus Torvalds free_task_struct(tsk); 3611da177e4SLinus Torvalds } 3621da177e4SLinus Torvalds EXPORT_SYMBOL(free_task); 3631da177e4SLinus Torvalds 364ea6d290cSOleg Nesterov static inline void free_signal_struct(struct signal_struct *sig) 365ea6d290cSOleg Nesterov { 36697101eb4SOleg Nesterov taskstats_tgid_free(sig); 3671c5354deSMike Galbraith sched_autogroup_exit(sig); 3687283094eSMichal Hocko /* 3697283094eSMichal Hocko * __mmdrop is not safe to call from softirq context on x86 due to 3707283094eSMichal Hocko * pgd_dtor so postpone it to the async context 3717283094eSMichal Hocko */ 37226db62f1SMichal Hocko if (sig->oom_mm) 3737283094eSMichal Hocko mmdrop_async(sig->oom_mm); 374ea6d290cSOleg Nesterov kmem_cache_free(signal_cachep, sig); 375ea6d290cSOleg Nesterov } 376ea6d290cSOleg Nesterov 377ea6d290cSOleg Nesterov static inline void put_signal_struct(struct signal_struct *sig) 378ea6d290cSOleg Nesterov { 3791c5354deSMike Galbraith if (atomic_dec_and_test(&sig->sigcnt)) 380ea6d290cSOleg Nesterov free_signal_struct(sig); 381ea6d290cSOleg Nesterov } 382ea6d290cSOleg Nesterov 383158d9ebdSAndrew Morton void __put_task_struct(struct task_struct *tsk) 3841da177e4SLinus Torvalds { 385270f722dSEugene Teo WARN_ON(!tsk->exit_state); 3861da177e4SLinus Torvalds WARN_ON(atomic_read(&tsk->usage)); 3871da177e4SLinus Torvalds WARN_ON(tsk == current); 3881da177e4SLinus Torvalds 3892e91fa7fSTejun Heo cgroup_free(tsk); 390156654f4SMike Galbraith task_numa_free(tsk); 3911a2a4d06SKees Cook security_task_free(tsk); 392e0e81739SDavid Howells exit_creds(tsk); 39335df17c5SShailabh Nagar delayacct_tsk_free(tsk); 394ea6d290cSOleg Nesterov put_signal_struct(tsk->signal); 3951da177e4SLinus Torvalds 3961da177e4SLinus Torvalds if (!profile_handoff_task(tsk)) 3971da177e4SLinus Torvalds free_task(tsk); 3981da177e4SLinus Torvalds } 39977c100c8SRik van Riel EXPORT_SYMBOL_GPL(__put_task_struct); 4001da177e4SLinus Torvalds 4016c0a9fa6SThomas Gleixner void __init __weak arch_task_cache_init(void) { } 40261c4628bSSuresh Siddha 403ff691f6eSHeinrich Schuchardt /* 404ff691f6eSHeinrich Schuchardt * set_max_threads 405ff691f6eSHeinrich Schuchardt */ 40616db3d3fSHeinrich Schuchardt static void set_max_threads(unsigned int max_threads_suggested) 407ff691f6eSHeinrich Schuchardt { 408ac1b398dSHeinrich Schuchardt u64 threads; 409ff691f6eSHeinrich Schuchardt 410ff691f6eSHeinrich Schuchardt /* 411ac1b398dSHeinrich Schuchardt * The number of threads shall be limited such that the thread 412ac1b398dSHeinrich Schuchardt * structures may only consume a small part of the available memory. 413ff691f6eSHeinrich Schuchardt */ 414ac1b398dSHeinrich Schuchardt if (fls64(totalram_pages) + fls64(PAGE_SIZE) > 64) 415ac1b398dSHeinrich Schuchardt threads = MAX_THREADS; 416ac1b398dSHeinrich Schuchardt else 417ac1b398dSHeinrich Schuchardt threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE, 418ac1b398dSHeinrich Schuchardt (u64) THREAD_SIZE * 8UL); 419ac1b398dSHeinrich Schuchardt 42016db3d3fSHeinrich Schuchardt if (threads > max_threads_suggested) 42116db3d3fSHeinrich Schuchardt threads = max_threads_suggested; 42216db3d3fSHeinrich Schuchardt 423ac1b398dSHeinrich Schuchardt max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); 424ff691f6eSHeinrich Schuchardt } 425ff691f6eSHeinrich Schuchardt 4265aaeb5c0SIngo Molnar #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT 4275aaeb5c0SIngo Molnar /* Initialized by the architecture: */ 4285aaeb5c0SIngo Molnar int arch_task_struct_size __read_mostly; 4295aaeb5c0SIngo Molnar #endif 4300c8c0f03SDave Hansen 431ff691f6eSHeinrich Schuchardt void __init fork_init(void) 4321da177e4SLinus Torvalds { 43325f9c081SEric W. Biederman int i; 434f5e10287SThomas Gleixner #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 4351da177e4SLinus Torvalds #ifndef ARCH_MIN_TASKALIGN 436e274795eSPeter Zijlstra #define ARCH_MIN_TASKALIGN 0 4371da177e4SLinus Torvalds #endif 43895cb64c1SPeter Zijlstra int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN); 439e274795eSPeter Zijlstra 4401da177e4SLinus Torvalds /* create a slab on which task_structs can be allocated */ 4415d097056SVladimir Davydov task_struct_cachep = kmem_cache_create("task_struct", 442e274795eSPeter Zijlstra arch_task_struct_size, align, 4435d097056SVladimir Davydov SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, NULL); 4441da177e4SLinus Torvalds #endif 4451da177e4SLinus Torvalds 44661c4628bSSuresh Siddha /* do the arch specific task caches init */ 44761c4628bSSuresh Siddha arch_task_cache_init(); 44861c4628bSSuresh Siddha 44916db3d3fSHeinrich Schuchardt set_max_threads(MAX_THREADS); 4501da177e4SLinus Torvalds 4511da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; 4521da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; 4531da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_SIGPENDING] = 4541da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_NPROC]; 455b376c3e1SEric W. Biederman 45625f9c081SEric W. Biederman for (i = 0; i < UCOUNT_COUNTS; i++) { 45725f9c081SEric W. Biederman init_user_ns.ucount_max[i] = max_threads/2; 45825f9c081SEric W. Biederman } 4591da177e4SLinus Torvalds } 4601da177e4SLinus Torvalds 46152f5684cSGideon Israel Dsouza int __weak arch_dup_task_struct(struct task_struct *dst, 46261c4628bSSuresh Siddha struct task_struct *src) 46361c4628bSSuresh Siddha { 46461c4628bSSuresh Siddha *dst = *src; 46561c4628bSSuresh Siddha return 0; 46661c4628bSSuresh Siddha } 46761c4628bSSuresh Siddha 468d4311ff1SAaron Tomlin void set_task_stack_end_magic(struct task_struct *tsk) 469d4311ff1SAaron Tomlin { 470d4311ff1SAaron Tomlin unsigned long *stackend; 471d4311ff1SAaron Tomlin 472d4311ff1SAaron Tomlin stackend = end_of_stack(tsk); 473d4311ff1SAaron Tomlin *stackend = STACK_END_MAGIC; /* for overflow detection */ 474d4311ff1SAaron Tomlin } 475d4311ff1SAaron Tomlin 476725fc629SAndi Kleen static struct task_struct *dup_task_struct(struct task_struct *orig, int node) 4771da177e4SLinus Torvalds { 4781da177e4SLinus Torvalds struct task_struct *tsk; 479b235beeaSLinus Torvalds unsigned long *stack; 480ba14a194SAndy Lutomirski struct vm_struct *stack_vm_area; 4813e26c149SPeter Zijlstra int err; 4821da177e4SLinus Torvalds 483725fc629SAndi Kleen if (node == NUMA_NO_NODE) 484725fc629SAndi Kleen node = tsk_fork_get_node(orig); 485504f52b5SEric Dumazet tsk = alloc_task_struct_node(node); 4861da177e4SLinus Torvalds if (!tsk) 4871da177e4SLinus Torvalds return NULL; 4881da177e4SLinus Torvalds 489b235beeaSLinus Torvalds stack = alloc_thread_stack_node(tsk, node); 490b235beeaSLinus Torvalds if (!stack) 491f19b9f74SAkinobu Mita goto free_tsk; 4921da177e4SLinus Torvalds 493ba14a194SAndy Lutomirski stack_vm_area = task_stack_vm_area(tsk); 494ba14a194SAndy Lutomirski 49561c4628bSSuresh Siddha err = arch_dup_task_struct(tsk, orig); 496ba14a194SAndy Lutomirski 497ba14a194SAndy Lutomirski /* 498ba14a194SAndy Lutomirski * arch_dup_task_struct() clobbers the stack-related fields. Make 499ba14a194SAndy Lutomirski * sure they're properly initialized before using any stack-related 500ba14a194SAndy Lutomirski * functions again. 501ba14a194SAndy Lutomirski */ 502ba14a194SAndy Lutomirski tsk->stack = stack; 503ba14a194SAndy Lutomirski #ifdef CONFIG_VMAP_STACK 504ba14a194SAndy Lutomirski tsk->stack_vm_area = stack_vm_area; 505ba14a194SAndy Lutomirski #endif 50668f24b08SAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 50768f24b08SAndy Lutomirski atomic_set(&tsk->stack_refcount, 1); 50868f24b08SAndy Lutomirski #endif 509ba14a194SAndy Lutomirski 51061c4628bSSuresh Siddha if (err) 511b235beeaSLinus Torvalds goto free_stack; 51261c4628bSSuresh Siddha 513dbd95212SKees Cook #ifdef CONFIG_SECCOMP 514dbd95212SKees Cook /* 515dbd95212SKees Cook * We must handle setting up seccomp filters once we're under 516dbd95212SKees Cook * the sighand lock in case orig has changed between now and 517dbd95212SKees Cook * then. Until then, filter must be NULL to avoid messing up 518dbd95212SKees Cook * the usage counts on the error path calling free_task. 519dbd95212SKees Cook */ 520dbd95212SKees Cook tsk->seccomp.filter = NULL; 521dbd95212SKees Cook #endif 52287bec58aSAndrew Morton 52387bec58aSAndrew Morton setup_thread_stack(tsk, orig); 5248e7cac79SAvi Kivity clear_user_return_notifier(tsk); 525f26f9affSMike Galbraith clear_tsk_need_resched(tsk); 526d4311ff1SAaron Tomlin set_task_stack_end_magic(tsk); 5271da177e4SLinus Torvalds 5280a425405SArjan van de Ven #ifdef CONFIG_CC_STACKPROTECTOR 5290a425405SArjan van de Ven tsk->stack_canary = get_random_int(); 5300a425405SArjan van de Ven #endif 5310a425405SArjan van de Ven 532fb0a685cSDaniel Rebelo de Oliveira /* 533fb0a685cSDaniel Rebelo de Oliveira * One for us, one for whoever does the "release_task()" (usually 534fb0a685cSDaniel Rebelo de Oliveira * parent) 535fb0a685cSDaniel Rebelo de Oliveira */ 5361da177e4SLinus Torvalds atomic_set(&tsk->usage, 2); 5376c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE 5382056a782SJens Axboe tsk->btrace_seq = 0; 5396c5c9341SAlexey Dobriyan #endif 540a0aa7f68SJens Axboe tsk->splice_pipe = NULL; 5415640f768SEric Dumazet tsk->task_frag.page = NULL; 542093e5840SSebastian Andrzej Siewior tsk->wake_q.next = NULL; 543c6a7f572SKOSAKI Motohiro 544ba14a194SAndy Lutomirski account_kernel_stack(tsk, 1); 545c6a7f572SKOSAKI Motohiro 5465c9a8750SDmitry Vyukov kcov_task_init(tsk); 5475c9a8750SDmitry Vyukov 5481da177e4SLinus Torvalds return tsk; 54961c4628bSSuresh Siddha 550b235beeaSLinus Torvalds free_stack: 551ba14a194SAndy Lutomirski free_thread_stack(tsk); 552f19b9f74SAkinobu Mita free_tsk: 55361c4628bSSuresh Siddha free_task_struct(tsk); 55461c4628bSSuresh Siddha return NULL; 5551da177e4SLinus Torvalds } 5561da177e4SLinus Torvalds 5571da177e4SLinus Torvalds #ifdef CONFIG_MMU 5580766f788SEmese Revfy static __latent_entropy int dup_mmap(struct mm_struct *mm, 5590766f788SEmese Revfy struct mm_struct *oldmm) 5601da177e4SLinus Torvalds { 561297c5eeeSLinus Torvalds struct vm_area_struct *mpnt, *tmp, *prev, **pprev; 5621da177e4SLinus Torvalds struct rb_node **rb_link, *rb_parent; 5631da177e4SLinus Torvalds int retval; 5641da177e4SLinus Torvalds unsigned long charge; 565893e26e6SPavel Emelyanov LIST_HEAD(uf); 5661da177e4SLinus Torvalds 56732cdba1eSOleg Nesterov uprobe_start_dup_mmap(); 5687c051267SMichal Hocko if (down_write_killable(&oldmm->mmap_sem)) { 5697c051267SMichal Hocko retval = -EINTR; 5707c051267SMichal Hocko goto fail_uprobe_end; 5717c051267SMichal Hocko } 572ec8c0446SRalf Baechle flush_cache_dup_mm(oldmm); 573f8ac4ec9SOleg Nesterov uprobe_dup_mmap(oldmm, mm); 574ad339451SIngo Molnar /* 575ad339451SIngo Molnar * Not linked in yet - no deadlock potential: 576ad339451SIngo Molnar */ 577ad339451SIngo Molnar down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); 5787ee78232SHugh Dickins 57990f31d0eSKonstantin Khlebnikov /* No ordering required: file already has been exposed. */ 58090f31d0eSKonstantin Khlebnikov RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); 58190f31d0eSKonstantin Khlebnikov 5824f7d4614SVladimir Davydov mm->total_vm = oldmm->total_vm; 58384638335SKonstantin Khlebnikov mm->data_vm = oldmm->data_vm; 5844f7d4614SVladimir Davydov mm->exec_vm = oldmm->exec_vm; 5854f7d4614SVladimir Davydov mm->stack_vm = oldmm->stack_vm; 5864f7d4614SVladimir Davydov 5871da177e4SLinus Torvalds rb_link = &mm->mm_rb.rb_node; 5881da177e4SLinus Torvalds rb_parent = NULL; 5891da177e4SLinus Torvalds pprev = &mm->mmap; 590f8af4da3SHugh Dickins retval = ksm_fork(mm, oldmm); 591f8af4da3SHugh Dickins if (retval) 592f8af4da3SHugh Dickins goto out; 593ba76149fSAndrea Arcangeli retval = khugepaged_fork(mm, oldmm); 594ba76149fSAndrea Arcangeli if (retval) 595ba76149fSAndrea Arcangeli goto out; 5961da177e4SLinus Torvalds 597297c5eeeSLinus Torvalds prev = NULL; 598fd3e42fcSHugh Dickins for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { 5991da177e4SLinus Torvalds struct file *file; 6001da177e4SLinus Torvalds 6011da177e4SLinus Torvalds if (mpnt->vm_flags & VM_DONTCOPY) { 60284638335SKonstantin Khlebnikov vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt)); 6031da177e4SLinus Torvalds continue; 6041da177e4SLinus Torvalds } 6051da177e4SLinus Torvalds charge = 0; 6061da177e4SLinus Torvalds if (mpnt->vm_flags & VM_ACCOUNT) { 607b2412b7fSHuang Shijie unsigned long len = vma_pages(mpnt); 608b2412b7fSHuang Shijie 609191c5424SAl Viro if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ 6101da177e4SLinus Torvalds goto fail_nomem; 6111da177e4SLinus Torvalds charge = len; 6121da177e4SLinus Torvalds } 613e94b1766SChristoph Lameter tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 6141da177e4SLinus Torvalds if (!tmp) 6151da177e4SLinus Torvalds goto fail_nomem; 6161da177e4SLinus Torvalds *tmp = *mpnt; 6175beb4930SRik van Riel INIT_LIST_HEAD(&tmp->anon_vma_chain); 618ef0855d3SOleg Nesterov retval = vma_dup_policy(mpnt, tmp); 619ef0855d3SOleg Nesterov if (retval) 6201da177e4SLinus Torvalds goto fail_nomem_policy; 621a247c3a9SAndrea Arcangeli tmp->vm_mm = mm; 622893e26e6SPavel Emelyanov retval = dup_userfaultfd(tmp, &uf); 623893e26e6SPavel Emelyanov if (retval) 624893e26e6SPavel Emelyanov goto fail_nomem_anon_vma_fork; 6255beb4930SRik van Riel if (anon_vma_fork(tmp, mpnt)) 6265beb4930SRik van Riel goto fail_nomem_anon_vma_fork; 627893e26e6SPavel Emelyanov tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT); 628297c5eeeSLinus Torvalds tmp->vm_next = tmp->vm_prev = NULL; 6291da177e4SLinus Torvalds file = tmp->vm_file; 6301da177e4SLinus Torvalds if (file) { 631496ad9aaSAl Viro struct inode *inode = file_inode(file); 632b88ed205SHugh Dickins struct address_space *mapping = file->f_mapping; 633b88ed205SHugh Dickins 6341da177e4SLinus Torvalds get_file(file); 6351da177e4SLinus Torvalds if (tmp->vm_flags & VM_DENYWRITE) 6361da177e4SLinus Torvalds atomic_dec(&inode->i_writecount); 63783cde9e8SDavidlohr Bueso i_mmap_lock_write(mapping); 638b88ed205SHugh Dickins if (tmp->vm_flags & VM_SHARED) 6394bb5f5d9SDavid Herrmann atomic_inc(&mapping->i_mmap_writable); 640b88ed205SHugh Dickins flush_dcache_mmap_lock(mapping); 641b88ed205SHugh Dickins /* insert tmp into the share list, just after mpnt */ 6429826a516SMichel Lespinasse vma_interval_tree_insert_after(tmp, mpnt, 6439826a516SMichel Lespinasse &mapping->i_mmap); 644b88ed205SHugh Dickins flush_dcache_mmap_unlock(mapping); 64583cde9e8SDavidlohr Bueso i_mmap_unlock_write(mapping); 6461da177e4SLinus Torvalds } 6471da177e4SLinus Torvalds 6481da177e4SLinus Torvalds /* 649a1e78772SMel Gorman * Clear hugetlb-related page reserves for children. This only 650a1e78772SMel Gorman * affects MAP_PRIVATE mappings. Faults generated by the child 651a1e78772SMel Gorman * are not guaranteed to succeed, even if read-only 652a1e78772SMel Gorman */ 653a1e78772SMel Gorman if (is_vm_hugetlb_page(tmp)) 654a1e78772SMel Gorman reset_vma_resv_huge_pages(tmp); 655a1e78772SMel Gorman 656a1e78772SMel Gorman /* 6577ee78232SHugh Dickins * Link in the new vma and copy the page table entries. 6581da177e4SLinus Torvalds */ 6591da177e4SLinus Torvalds *pprev = tmp; 6601da177e4SLinus Torvalds pprev = &tmp->vm_next; 661297c5eeeSLinus Torvalds tmp->vm_prev = prev; 662297c5eeeSLinus Torvalds prev = tmp; 6631da177e4SLinus Torvalds 6641da177e4SLinus Torvalds __vma_link_rb(mm, tmp, rb_link, rb_parent); 6651da177e4SLinus Torvalds rb_link = &tmp->vm_rb.rb_right; 6661da177e4SLinus Torvalds rb_parent = &tmp->vm_rb; 6671da177e4SLinus Torvalds 6681da177e4SLinus Torvalds mm->map_count++; 6690b0db14cSHugh Dickins retval = copy_page_range(mm, oldmm, mpnt); 6701da177e4SLinus Torvalds 6711da177e4SLinus Torvalds if (tmp->vm_ops && tmp->vm_ops->open) 6721da177e4SLinus Torvalds tmp->vm_ops->open(tmp); 6731da177e4SLinus Torvalds 6741da177e4SLinus Torvalds if (retval) 6751da177e4SLinus Torvalds goto out; 6761da177e4SLinus Torvalds } 677d6dd61c8SJeremy Fitzhardinge /* a new mm has just been created */ 678d6dd61c8SJeremy Fitzhardinge arch_dup_mmap(oldmm, mm); 6791da177e4SLinus Torvalds retval = 0; 6801da177e4SLinus Torvalds out: 6817ee78232SHugh Dickins up_write(&mm->mmap_sem); 682fd3e42fcSHugh Dickins flush_tlb_mm(oldmm); 6831da177e4SLinus Torvalds up_write(&oldmm->mmap_sem); 684893e26e6SPavel Emelyanov dup_userfaultfd_complete(&uf); 6857c051267SMichal Hocko fail_uprobe_end: 68632cdba1eSOleg Nesterov uprobe_end_dup_mmap(); 6871da177e4SLinus Torvalds return retval; 6885beb4930SRik van Riel fail_nomem_anon_vma_fork: 689ef0855d3SOleg Nesterov mpol_put(vma_policy(tmp)); 6901da177e4SLinus Torvalds fail_nomem_policy: 6911da177e4SLinus Torvalds kmem_cache_free(vm_area_cachep, tmp); 6921da177e4SLinus Torvalds fail_nomem: 6931da177e4SLinus Torvalds retval = -ENOMEM; 6941da177e4SLinus Torvalds vm_unacct_memory(charge); 6951da177e4SLinus Torvalds goto out; 6961da177e4SLinus Torvalds } 6971da177e4SLinus Torvalds 6981da177e4SLinus Torvalds static inline int mm_alloc_pgd(struct mm_struct *mm) 6991da177e4SLinus Torvalds { 7001da177e4SLinus Torvalds mm->pgd = pgd_alloc(mm); 7011da177e4SLinus Torvalds if (unlikely(!mm->pgd)) 7021da177e4SLinus Torvalds return -ENOMEM; 7031da177e4SLinus Torvalds return 0; 7041da177e4SLinus Torvalds } 7051da177e4SLinus Torvalds 7061da177e4SLinus Torvalds static inline void mm_free_pgd(struct mm_struct *mm) 7071da177e4SLinus Torvalds { 7085e541973SBenjamin Herrenschmidt pgd_free(mm, mm->pgd); 7091da177e4SLinus Torvalds } 7101da177e4SLinus Torvalds #else 71190f31d0eSKonstantin Khlebnikov static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 71290f31d0eSKonstantin Khlebnikov { 71390f31d0eSKonstantin Khlebnikov down_write(&oldmm->mmap_sem); 71490f31d0eSKonstantin Khlebnikov RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); 71590f31d0eSKonstantin Khlebnikov up_write(&oldmm->mmap_sem); 71690f31d0eSKonstantin Khlebnikov return 0; 71790f31d0eSKonstantin Khlebnikov } 7181da177e4SLinus Torvalds #define mm_alloc_pgd(mm) (0) 7191da177e4SLinus Torvalds #define mm_free_pgd(mm) 7201da177e4SLinus Torvalds #endif /* CONFIG_MMU */ 7211da177e4SLinus Torvalds 7221da177e4SLinus Torvalds __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); 7231da177e4SLinus Torvalds 724e94b1766SChristoph Lameter #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) 7251da177e4SLinus Torvalds #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) 7261da177e4SLinus Torvalds 7274cb0e11bSHidehiro Kawai static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; 7284cb0e11bSHidehiro Kawai 7294cb0e11bSHidehiro Kawai static int __init coredump_filter_setup(char *s) 7304cb0e11bSHidehiro Kawai { 7314cb0e11bSHidehiro Kawai default_dump_filter = 7324cb0e11bSHidehiro Kawai (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & 7334cb0e11bSHidehiro Kawai MMF_DUMP_FILTER_MASK; 7344cb0e11bSHidehiro Kawai return 1; 7354cb0e11bSHidehiro Kawai } 7364cb0e11bSHidehiro Kawai 7374cb0e11bSHidehiro Kawai __setup("coredump_filter=", coredump_filter_setup); 7384cb0e11bSHidehiro Kawai 7391da177e4SLinus Torvalds #include <linux/init_task.h> 7401da177e4SLinus Torvalds 741858f0993SAlexey Dobriyan static void mm_init_aio(struct mm_struct *mm) 742858f0993SAlexey Dobriyan { 743858f0993SAlexey Dobriyan #ifdef CONFIG_AIO 744858f0993SAlexey Dobriyan spin_lock_init(&mm->ioctx_lock); 745db446a08SBenjamin LaHaise mm->ioctx_table = NULL; 746858f0993SAlexey Dobriyan #endif 747858f0993SAlexey Dobriyan } 748858f0993SAlexey Dobriyan 74933144e84SVladimir Davydov static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) 75033144e84SVladimir Davydov { 75133144e84SVladimir Davydov #ifdef CONFIG_MEMCG 75233144e84SVladimir Davydov mm->owner = p; 75333144e84SVladimir Davydov #endif 75433144e84SVladimir Davydov } 75533144e84SVladimir Davydov 756bfedb589SEric W. Biederman static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, 757bfedb589SEric W. Biederman struct user_namespace *user_ns) 7581da177e4SLinus Torvalds { 75941f727fdSVladimir Davydov mm->mmap = NULL; 76041f727fdSVladimir Davydov mm->mm_rb = RB_ROOT; 76141f727fdSVladimir Davydov mm->vmacache_seqnum = 0; 7621da177e4SLinus Torvalds atomic_set(&mm->mm_users, 1); 7631da177e4SLinus Torvalds atomic_set(&mm->mm_count, 1); 7641da177e4SLinus Torvalds init_rwsem(&mm->mmap_sem); 7651da177e4SLinus Torvalds INIT_LIST_HEAD(&mm->mmlist); 766999d9fc1SOleg Nesterov mm->core_state = NULL; 767e1f56c89SKirill A. Shutemov atomic_long_set(&mm->nr_ptes, 0); 7682d2f5119SKirill A. Shutemov mm_nr_pmds_init(mm); 76941f727fdSVladimir Davydov mm->map_count = 0; 77041f727fdSVladimir Davydov mm->locked_vm = 0; 771ce65cefaSVladimir Davydov mm->pinned_vm = 0; 772d559db08SKAMEZAWA Hiroyuki memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); 7731da177e4SLinus Torvalds spin_lock_init(&mm->page_table_lock); 77441f727fdSVladimir Davydov mm_init_cpumask(mm); 775858f0993SAlexey Dobriyan mm_init_aio(mm); 776cf475ad2SBalbir Singh mm_init_owner(mm, p); 77741f727fdSVladimir Davydov mmu_notifier_mm_init(mm); 77820841405SRik van Riel clear_tlb_flush_pending(mm); 77941f727fdSVladimir Davydov #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 78041f727fdSVladimir Davydov mm->pmd_huge_pte = NULL; 78141f727fdSVladimir Davydov #endif 7821da177e4SLinus Torvalds 783a0715cc2SAlex Thorlton if (current->mm) { 784a0715cc2SAlex Thorlton mm->flags = current->mm->flags & MMF_INIT_MASK; 785a0715cc2SAlex Thorlton mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; 786a0715cc2SAlex Thorlton } else { 787a0715cc2SAlex Thorlton mm->flags = default_dump_filter; 7881da177e4SLinus Torvalds mm->def_flags = 0; 789a0715cc2SAlex Thorlton } 790a0715cc2SAlex Thorlton 79141f727fdSVladimir Davydov if (mm_alloc_pgd(mm)) 79241f727fdSVladimir Davydov goto fail_nopgd; 79378fb7466SPavel Emelianov 79441f727fdSVladimir Davydov if (init_new_context(p, mm)) 79541f727fdSVladimir Davydov goto fail_nocontext; 79641f727fdSVladimir Davydov 797bfedb589SEric W. Biederman mm->user_ns = get_user_ns(user_ns); 79841f727fdSVladimir Davydov return mm; 79941f727fdSVladimir Davydov 80041f727fdSVladimir Davydov fail_nocontext: 80141f727fdSVladimir Davydov mm_free_pgd(mm); 80241f727fdSVladimir Davydov fail_nopgd: 8031da177e4SLinus Torvalds free_mm(mm); 8041da177e4SLinus Torvalds return NULL; 8051da177e4SLinus Torvalds } 8061da177e4SLinus Torvalds 807c3f0327fSKonstantin Khlebnikov static void check_mm(struct mm_struct *mm) 808c3f0327fSKonstantin Khlebnikov { 809c3f0327fSKonstantin Khlebnikov int i; 810c3f0327fSKonstantin Khlebnikov 811c3f0327fSKonstantin Khlebnikov for (i = 0; i < NR_MM_COUNTERS; i++) { 812c3f0327fSKonstantin Khlebnikov long x = atomic_long_read(&mm->rss_stat.count[i]); 813c3f0327fSKonstantin Khlebnikov 814c3f0327fSKonstantin Khlebnikov if (unlikely(x)) 815c3f0327fSKonstantin Khlebnikov printk(KERN_ALERT "BUG: Bad rss-counter state " 816c3f0327fSKonstantin Khlebnikov "mm:%p idx:%d val:%ld\n", mm, i, x); 817c3f0327fSKonstantin Khlebnikov } 818b30fe6c7SKirill A. Shutemov 819b30fe6c7SKirill A. Shutemov if (atomic_long_read(&mm->nr_ptes)) 820b30fe6c7SKirill A. Shutemov pr_alert("BUG: non-zero nr_ptes on freeing mm: %ld\n", 821b30fe6c7SKirill A. Shutemov atomic_long_read(&mm->nr_ptes)); 822b30fe6c7SKirill A. Shutemov if (mm_nr_pmds(mm)) 823b30fe6c7SKirill A. Shutemov pr_alert("BUG: non-zero nr_pmds on freeing mm: %ld\n", 824b30fe6c7SKirill A. Shutemov mm_nr_pmds(mm)); 825b30fe6c7SKirill A. Shutemov 826e009bb30SKirill A. Shutemov #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 82796dad67fSSasha Levin VM_BUG_ON_MM(mm->pmd_huge_pte, mm); 828c3f0327fSKonstantin Khlebnikov #endif 829c3f0327fSKonstantin Khlebnikov } 830c3f0327fSKonstantin Khlebnikov 8311da177e4SLinus Torvalds /* 8321da177e4SLinus Torvalds * Allocate and initialize an mm_struct. 8331da177e4SLinus Torvalds */ 8341da177e4SLinus Torvalds struct mm_struct *mm_alloc(void) 8351da177e4SLinus Torvalds { 8361da177e4SLinus Torvalds struct mm_struct *mm; 8371da177e4SLinus Torvalds 8381da177e4SLinus Torvalds mm = allocate_mm(); 839de03c72cSKOSAKI Motohiro if (!mm) 840de03c72cSKOSAKI Motohiro return NULL; 841de03c72cSKOSAKI Motohiro 8421da177e4SLinus Torvalds memset(mm, 0, sizeof(*mm)); 843bfedb589SEric W. Biederman return mm_init(mm, current, current_user_ns()); 8441da177e4SLinus Torvalds } 8451da177e4SLinus Torvalds 8461da177e4SLinus Torvalds /* 8471da177e4SLinus Torvalds * Called when the last reference to the mm 8481da177e4SLinus Torvalds * is dropped: either by a lazy thread or by 8491da177e4SLinus Torvalds * mmput. Free the page directory and the mm. 8501da177e4SLinus Torvalds */ 8517ad5b3a5SHarvey Harrison void __mmdrop(struct mm_struct *mm) 8521da177e4SLinus Torvalds { 8531da177e4SLinus Torvalds BUG_ON(mm == &init_mm); 8541da177e4SLinus Torvalds mm_free_pgd(mm); 8551da177e4SLinus Torvalds destroy_context(mm); 856cddb8a5cSAndrea Arcangeli mmu_notifier_mm_destroy(mm); 857c3f0327fSKonstantin Khlebnikov check_mm(mm); 858bfedb589SEric W. Biederman put_user_ns(mm->user_ns); 8591da177e4SLinus Torvalds free_mm(mm); 8601da177e4SLinus Torvalds } 8616d4e4c4fSAvi Kivity EXPORT_SYMBOL_GPL(__mmdrop); 8621da177e4SLinus Torvalds 863ec8d7c14SMichal Hocko static inline void __mmput(struct mm_struct *mm) 8641da177e4SLinus Torvalds { 865ec8d7c14SMichal Hocko VM_BUG_ON(atomic_read(&mm->mm_users)); 8660ae26f1bSAndrew Morton 867d4b3b638SSrikar Dronamraju uprobe_clear_state(mm); 8681da177e4SLinus Torvalds exit_aio(mm); 8691c2fb7a4SAndrea Arcangeli ksm_exit(mm); 870ba76149fSAndrea Arcangeli khugepaged_exit(mm); /* must run before exit_mmap */ 8711da177e4SLinus Torvalds exit_mmap(mm); 8726fcb52a5SAaron Lu mm_put_huge_zero_page(mm); 873925d1c40SMatt Helsley set_mm_exe_file(mm, NULL); 8741da177e4SLinus Torvalds if (!list_empty(&mm->mmlist)) { 8751da177e4SLinus Torvalds spin_lock(&mmlist_lock); 8761da177e4SLinus Torvalds list_del(&mm->mmlist); 8771da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 8781da177e4SLinus Torvalds } 879801460d0SHiroshi Shimamoto if (mm->binfmt) 880801460d0SHiroshi Shimamoto module_put(mm->binfmt->module); 881862e3073SMichal Hocko set_bit(MMF_OOM_SKIP, &mm->flags); 8821da177e4SLinus Torvalds mmdrop(mm); 8831da177e4SLinus Torvalds } 884ec8d7c14SMichal Hocko 885ec8d7c14SMichal Hocko /* 886ec8d7c14SMichal Hocko * Decrement the use count and release all resources for an mm. 887ec8d7c14SMichal Hocko */ 888ec8d7c14SMichal Hocko void mmput(struct mm_struct *mm) 889ec8d7c14SMichal Hocko { 890ec8d7c14SMichal Hocko might_sleep(); 891ec8d7c14SMichal Hocko 892ec8d7c14SMichal Hocko if (atomic_dec_and_test(&mm->mm_users)) 893ec8d7c14SMichal Hocko __mmput(mm); 8941da177e4SLinus Torvalds } 8951da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(mmput); 8961da177e4SLinus Torvalds 8977ef949d7SMichal Hocko #ifdef CONFIG_MMU 898ec8d7c14SMichal Hocko static void mmput_async_fn(struct work_struct *work) 899ec8d7c14SMichal Hocko { 900ec8d7c14SMichal Hocko struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); 901ec8d7c14SMichal Hocko __mmput(mm); 902ec8d7c14SMichal Hocko } 903ec8d7c14SMichal Hocko 904ec8d7c14SMichal Hocko void mmput_async(struct mm_struct *mm) 905ec8d7c14SMichal Hocko { 906ec8d7c14SMichal Hocko if (atomic_dec_and_test(&mm->mm_users)) { 907ec8d7c14SMichal Hocko INIT_WORK(&mm->async_put_work, mmput_async_fn); 908ec8d7c14SMichal Hocko schedule_work(&mm->async_put_work); 909ec8d7c14SMichal Hocko } 910ec8d7c14SMichal Hocko } 9117ef949d7SMichal Hocko #endif 912ec8d7c14SMichal Hocko 91390f31d0eSKonstantin Khlebnikov /** 91490f31d0eSKonstantin Khlebnikov * set_mm_exe_file - change a reference to the mm's executable file 91590f31d0eSKonstantin Khlebnikov * 91690f31d0eSKonstantin Khlebnikov * This changes mm's executable file (shown as symlink /proc/[pid]/exe). 91790f31d0eSKonstantin Khlebnikov * 9186e399cd1SDavidlohr Bueso * Main users are mmput() and sys_execve(). Callers prevent concurrent 9196e399cd1SDavidlohr Bueso * invocations: in mmput() nobody alive left, in execve task is single 9206e399cd1SDavidlohr Bueso * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the 9216e399cd1SDavidlohr Bueso * mm->exe_file, but does so without using set_mm_exe_file() in order 9226e399cd1SDavidlohr Bueso * to do avoid the need for any locks. 92390f31d0eSKonstantin Khlebnikov */ 92438646013SJiri Slaby void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) 92538646013SJiri Slaby { 9266e399cd1SDavidlohr Bueso struct file *old_exe_file; 9276e399cd1SDavidlohr Bueso 9286e399cd1SDavidlohr Bueso /* 9296e399cd1SDavidlohr Bueso * It is safe to dereference the exe_file without RCU as 9306e399cd1SDavidlohr Bueso * this function is only called if nobody else can access 9316e399cd1SDavidlohr Bueso * this mm -- see comment above for justification. 9326e399cd1SDavidlohr Bueso */ 9336e399cd1SDavidlohr Bueso old_exe_file = rcu_dereference_raw(mm->exe_file); 93490f31d0eSKonstantin Khlebnikov 93538646013SJiri Slaby if (new_exe_file) 93638646013SJiri Slaby get_file(new_exe_file); 93790f31d0eSKonstantin Khlebnikov rcu_assign_pointer(mm->exe_file, new_exe_file); 93890f31d0eSKonstantin Khlebnikov if (old_exe_file) 93990f31d0eSKonstantin Khlebnikov fput(old_exe_file); 94038646013SJiri Slaby } 94138646013SJiri Slaby 94290f31d0eSKonstantin Khlebnikov /** 94390f31d0eSKonstantin Khlebnikov * get_mm_exe_file - acquire a reference to the mm's executable file 94490f31d0eSKonstantin Khlebnikov * 94590f31d0eSKonstantin Khlebnikov * Returns %NULL if mm has no associated executable file. 94690f31d0eSKonstantin Khlebnikov * User must release file via fput(). 94790f31d0eSKonstantin Khlebnikov */ 94838646013SJiri Slaby struct file *get_mm_exe_file(struct mm_struct *mm) 94938646013SJiri Slaby { 95038646013SJiri Slaby struct file *exe_file; 95138646013SJiri Slaby 95290f31d0eSKonstantin Khlebnikov rcu_read_lock(); 95390f31d0eSKonstantin Khlebnikov exe_file = rcu_dereference(mm->exe_file); 95490f31d0eSKonstantin Khlebnikov if (exe_file && !get_file_rcu(exe_file)) 95590f31d0eSKonstantin Khlebnikov exe_file = NULL; 95690f31d0eSKonstantin Khlebnikov rcu_read_unlock(); 95738646013SJiri Slaby return exe_file; 95838646013SJiri Slaby } 95911163348SDavidlohr Bueso EXPORT_SYMBOL(get_mm_exe_file); 96038646013SJiri Slaby 9611da177e4SLinus Torvalds /** 962cd81a917SMateusz Guzik * get_task_exe_file - acquire a reference to the task's executable file 963cd81a917SMateusz Guzik * 964cd81a917SMateusz Guzik * Returns %NULL if task's mm (if any) has no associated executable file or 965cd81a917SMateusz Guzik * this is a kernel thread with borrowed mm (see the comment above get_task_mm). 966cd81a917SMateusz Guzik * User must release file via fput(). 967cd81a917SMateusz Guzik */ 968cd81a917SMateusz Guzik struct file *get_task_exe_file(struct task_struct *task) 969cd81a917SMateusz Guzik { 970cd81a917SMateusz Guzik struct file *exe_file = NULL; 971cd81a917SMateusz Guzik struct mm_struct *mm; 972cd81a917SMateusz Guzik 973cd81a917SMateusz Guzik task_lock(task); 974cd81a917SMateusz Guzik mm = task->mm; 975cd81a917SMateusz Guzik if (mm) { 976cd81a917SMateusz Guzik if (!(task->flags & PF_KTHREAD)) 977cd81a917SMateusz Guzik exe_file = get_mm_exe_file(mm); 978cd81a917SMateusz Guzik } 979cd81a917SMateusz Guzik task_unlock(task); 980cd81a917SMateusz Guzik return exe_file; 981cd81a917SMateusz Guzik } 982cd81a917SMateusz Guzik EXPORT_SYMBOL(get_task_exe_file); 983cd81a917SMateusz Guzik 984cd81a917SMateusz Guzik /** 9851da177e4SLinus Torvalds * get_task_mm - acquire a reference to the task's mm 9861da177e4SLinus Torvalds * 987246bb0b1SOleg Nesterov * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning 9881da177e4SLinus Torvalds * this kernel workthread has transiently adopted a user mm with use_mm, 9891da177e4SLinus Torvalds * to do its AIO) is not set and if so returns a reference to it, after 9901da177e4SLinus Torvalds * bumping up the use count. User must release the mm via mmput() 9911da177e4SLinus Torvalds * after use. Typically used by /proc and ptrace. 9921da177e4SLinus Torvalds */ 9931da177e4SLinus Torvalds struct mm_struct *get_task_mm(struct task_struct *task) 9941da177e4SLinus Torvalds { 9951da177e4SLinus Torvalds struct mm_struct *mm; 9961da177e4SLinus Torvalds 9971da177e4SLinus Torvalds task_lock(task); 9981da177e4SLinus Torvalds mm = task->mm; 9991da177e4SLinus Torvalds if (mm) { 1000246bb0b1SOleg Nesterov if (task->flags & PF_KTHREAD) 10011da177e4SLinus Torvalds mm = NULL; 10021da177e4SLinus Torvalds else 1003*3fce371bSVegard Nossum mmget(mm); 10041da177e4SLinus Torvalds } 10051da177e4SLinus Torvalds task_unlock(task); 10061da177e4SLinus Torvalds return mm; 10071da177e4SLinus Torvalds } 10081da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(get_task_mm); 10091da177e4SLinus Torvalds 10108cdb878dSChristopher Yeoh struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) 10118cdb878dSChristopher Yeoh { 10128cdb878dSChristopher Yeoh struct mm_struct *mm; 10138cdb878dSChristopher Yeoh int err; 10148cdb878dSChristopher Yeoh 10158cdb878dSChristopher Yeoh err = mutex_lock_killable(&task->signal->cred_guard_mutex); 10168cdb878dSChristopher Yeoh if (err) 10178cdb878dSChristopher Yeoh return ERR_PTR(err); 10188cdb878dSChristopher Yeoh 10198cdb878dSChristopher Yeoh mm = get_task_mm(task); 10208cdb878dSChristopher Yeoh if (mm && mm != current->mm && 10218cdb878dSChristopher Yeoh !ptrace_may_access(task, mode)) { 10228cdb878dSChristopher Yeoh mmput(mm); 10238cdb878dSChristopher Yeoh mm = ERR_PTR(-EACCES); 10248cdb878dSChristopher Yeoh } 10258cdb878dSChristopher Yeoh mutex_unlock(&task->signal->cred_guard_mutex); 10268cdb878dSChristopher Yeoh 10278cdb878dSChristopher Yeoh return mm; 10288cdb878dSChristopher Yeoh } 10298cdb878dSChristopher Yeoh 103057b59c4aSOleg Nesterov static void complete_vfork_done(struct task_struct *tsk) 1031c415c3b4SOleg Nesterov { 1032d68b46feSOleg Nesterov struct completion *vfork; 1033c415c3b4SOleg Nesterov 1034d68b46feSOleg Nesterov task_lock(tsk); 1035d68b46feSOleg Nesterov vfork = tsk->vfork_done; 1036d68b46feSOleg Nesterov if (likely(vfork)) { 1037c415c3b4SOleg Nesterov tsk->vfork_done = NULL; 1038d68b46feSOleg Nesterov complete(vfork); 1039d68b46feSOleg Nesterov } 1040d68b46feSOleg Nesterov task_unlock(tsk); 1041d68b46feSOleg Nesterov } 1042d68b46feSOleg Nesterov 1043d68b46feSOleg Nesterov static int wait_for_vfork_done(struct task_struct *child, 1044d68b46feSOleg Nesterov struct completion *vfork) 1045d68b46feSOleg Nesterov { 1046d68b46feSOleg Nesterov int killed; 1047d68b46feSOleg Nesterov 1048d68b46feSOleg Nesterov freezer_do_not_count(); 1049d68b46feSOleg Nesterov killed = wait_for_completion_killable(vfork); 1050d68b46feSOleg Nesterov freezer_count(); 1051d68b46feSOleg Nesterov 1052d68b46feSOleg Nesterov if (killed) { 1053d68b46feSOleg Nesterov task_lock(child); 1054d68b46feSOleg Nesterov child->vfork_done = NULL; 1055d68b46feSOleg Nesterov task_unlock(child); 1056d68b46feSOleg Nesterov } 1057d68b46feSOleg Nesterov 1058d68b46feSOleg Nesterov put_task_struct(child); 1059d68b46feSOleg Nesterov return killed; 1060c415c3b4SOleg Nesterov } 1061c415c3b4SOleg Nesterov 10621da177e4SLinus Torvalds /* Please note the differences between mmput and mm_release. 10631da177e4SLinus Torvalds * mmput is called whenever we stop holding onto a mm_struct, 10641da177e4SLinus Torvalds * error success whatever. 10651da177e4SLinus Torvalds * 10661da177e4SLinus Torvalds * mm_release is called after a mm_struct has been removed 10671da177e4SLinus Torvalds * from the current process. 10681da177e4SLinus Torvalds * 10691da177e4SLinus Torvalds * This difference is important for error handling, when we 10701da177e4SLinus Torvalds * only half set up a mm_struct for a new process and need to restore 10711da177e4SLinus Torvalds * the old one. Because we mmput the new mm_struct before 10721da177e4SLinus Torvalds * restoring the old one. . . 10731da177e4SLinus Torvalds * Eric Biederman 10 January 1998 10741da177e4SLinus Torvalds */ 10751da177e4SLinus Torvalds void mm_release(struct task_struct *tsk, struct mm_struct *mm) 10761da177e4SLinus Torvalds { 10778141c7f3SLinus Torvalds /* Get rid of any futexes when releasing the mm */ 10788141c7f3SLinus Torvalds #ifdef CONFIG_FUTEX 1079fc6b177dSPeter Zijlstra if (unlikely(tsk->robust_list)) { 10808141c7f3SLinus Torvalds exit_robust_list(tsk); 1081fc6b177dSPeter Zijlstra tsk->robust_list = NULL; 1082fc6b177dSPeter Zijlstra } 10838141c7f3SLinus Torvalds #ifdef CONFIG_COMPAT 1084fc6b177dSPeter Zijlstra if (unlikely(tsk->compat_robust_list)) { 10858141c7f3SLinus Torvalds compat_exit_robust_list(tsk); 1086fc6b177dSPeter Zijlstra tsk->compat_robust_list = NULL; 1087fc6b177dSPeter Zijlstra } 10888141c7f3SLinus Torvalds #endif 1089322a2c10SThomas Gleixner if (unlikely(!list_empty(&tsk->pi_state_list))) 1090322a2c10SThomas Gleixner exit_pi_state_list(tsk); 10918141c7f3SLinus Torvalds #endif 10928141c7f3SLinus Torvalds 10930326f5a9SSrikar Dronamraju uprobe_free_utask(tsk); 10940326f5a9SSrikar Dronamraju 10951da177e4SLinus Torvalds /* Get rid of any cached register state */ 10961da177e4SLinus Torvalds deactivate_mm(tsk, mm); 10971da177e4SLinus Torvalds 1098fec1d011SRoland McGrath /* 1099735f2770SMichal Hocko * Signal userspace if we're not exiting with a core dump 1100735f2770SMichal Hocko * because we want to leave the value intact for debugging 1101735f2770SMichal Hocko * purposes. 1102fec1d011SRoland McGrath */ 11039c8a8228SEric Dumazet if (tsk->clear_child_tid) { 1104735f2770SMichal Hocko if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) && 11059c8a8228SEric Dumazet atomic_read(&mm->mm_users) > 1) { 11061da177e4SLinus Torvalds /* 11071da177e4SLinus Torvalds * We don't check the error code - if userspace has 11081da177e4SLinus Torvalds * not set up a proper pointer then tough luck. 11091da177e4SLinus Torvalds */ 11109c8a8228SEric Dumazet put_user(0, tsk->clear_child_tid); 11119c8a8228SEric Dumazet sys_futex(tsk->clear_child_tid, FUTEX_WAKE, 11129c8a8228SEric Dumazet 1, NULL, NULL, 0); 11139c8a8228SEric Dumazet } 11149c8a8228SEric Dumazet tsk->clear_child_tid = NULL; 11151da177e4SLinus Torvalds } 1116f7505d64SKonstantin Khlebnikov 1117f7505d64SKonstantin Khlebnikov /* 1118f7505d64SKonstantin Khlebnikov * All done, finally we can wake up parent and return this mm to him. 1119f7505d64SKonstantin Khlebnikov * Also kthread_stop() uses this completion for synchronization. 1120f7505d64SKonstantin Khlebnikov */ 1121f7505d64SKonstantin Khlebnikov if (tsk->vfork_done) 1122f7505d64SKonstantin Khlebnikov complete_vfork_done(tsk); 11231da177e4SLinus Torvalds } 11241da177e4SLinus Torvalds 1125a0a7ec30SJANAK DESAI /* 1126a0a7ec30SJANAK DESAI * Allocate a new mm structure and copy contents from the 1127a0a7ec30SJANAK DESAI * mm structure of the passed in task structure. 1128a0a7ec30SJANAK DESAI */ 1129ff252c1fSDaeSeok Youn static struct mm_struct *dup_mm(struct task_struct *tsk) 1130a0a7ec30SJANAK DESAI { 1131a0a7ec30SJANAK DESAI struct mm_struct *mm, *oldmm = current->mm; 1132a0a7ec30SJANAK DESAI int err; 1133a0a7ec30SJANAK DESAI 1134a0a7ec30SJANAK DESAI mm = allocate_mm(); 1135a0a7ec30SJANAK DESAI if (!mm) 1136a0a7ec30SJANAK DESAI goto fail_nomem; 1137a0a7ec30SJANAK DESAI 1138a0a7ec30SJANAK DESAI memcpy(mm, oldmm, sizeof(*mm)); 1139a0a7ec30SJANAK DESAI 1140bfedb589SEric W. Biederman if (!mm_init(mm, tsk, mm->user_ns)) 1141a0a7ec30SJANAK DESAI goto fail_nomem; 1142a0a7ec30SJANAK DESAI 1143a0a7ec30SJANAK DESAI err = dup_mmap(mm, oldmm); 1144a0a7ec30SJANAK DESAI if (err) 1145a0a7ec30SJANAK DESAI goto free_pt; 1146a0a7ec30SJANAK DESAI 1147a0a7ec30SJANAK DESAI mm->hiwater_rss = get_mm_rss(mm); 1148a0a7ec30SJANAK DESAI mm->hiwater_vm = mm->total_vm; 1149a0a7ec30SJANAK DESAI 1150801460d0SHiroshi Shimamoto if (mm->binfmt && !try_module_get(mm->binfmt->module)) 1151801460d0SHiroshi Shimamoto goto free_pt; 1152801460d0SHiroshi Shimamoto 1153a0a7ec30SJANAK DESAI return mm; 1154a0a7ec30SJANAK DESAI 1155a0a7ec30SJANAK DESAI free_pt: 1156801460d0SHiroshi Shimamoto /* don't put binfmt in mmput, we haven't got module yet */ 1157801460d0SHiroshi Shimamoto mm->binfmt = NULL; 1158a0a7ec30SJANAK DESAI mmput(mm); 1159a0a7ec30SJANAK DESAI 1160a0a7ec30SJANAK DESAI fail_nomem: 1161a0a7ec30SJANAK DESAI return NULL; 1162a0a7ec30SJANAK DESAI } 1163a0a7ec30SJANAK DESAI 11641da177e4SLinus Torvalds static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) 11651da177e4SLinus Torvalds { 11661da177e4SLinus Torvalds struct mm_struct *mm, *oldmm; 11671da177e4SLinus Torvalds int retval; 11681da177e4SLinus Torvalds 11691da177e4SLinus Torvalds tsk->min_flt = tsk->maj_flt = 0; 11701da177e4SLinus Torvalds tsk->nvcsw = tsk->nivcsw = 0; 117117406b82SMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK 117217406b82SMandeep Singh Baines tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; 117317406b82SMandeep Singh Baines #endif 11741da177e4SLinus Torvalds 11751da177e4SLinus Torvalds tsk->mm = NULL; 11761da177e4SLinus Torvalds tsk->active_mm = NULL; 11771da177e4SLinus Torvalds 11781da177e4SLinus Torvalds /* 11791da177e4SLinus Torvalds * Are we cloning a kernel thread? 11801da177e4SLinus Torvalds * 11811da177e4SLinus Torvalds * We need to steal a active VM for that.. 11821da177e4SLinus Torvalds */ 11831da177e4SLinus Torvalds oldmm = current->mm; 11841da177e4SLinus Torvalds if (!oldmm) 11851da177e4SLinus Torvalds return 0; 11861da177e4SLinus Torvalds 1187615d6e87SDavidlohr Bueso /* initialize the new vmacache entries */ 1188615d6e87SDavidlohr Bueso vmacache_flush(tsk); 1189615d6e87SDavidlohr Bueso 11901da177e4SLinus Torvalds if (clone_flags & CLONE_VM) { 1191*3fce371bSVegard Nossum mmget(oldmm); 11921da177e4SLinus Torvalds mm = oldmm; 11931da177e4SLinus Torvalds goto good_mm; 11941da177e4SLinus Torvalds } 11951da177e4SLinus Torvalds 11961da177e4SLinus Torvalds retval = -ENOMEM; 1197a0a7ec30SJANAK DESAI mm = dup_mm(tsk); 11981da177e4SLinus Torvalds if (!mm) 11991da177e4SLinus Torvalds goto fail_nomem; 12001da177e4SLinus Torvalds 12011da177e4SLinus Torvalds good_mm: 12021da177e4SLinus Torvalds tsk->mm = mm; 12031da177e4SLinus Torvalds tsk->active_mm = mm; 12041da177e4SLinus Torvalds return 0; 12051da177e4SLinus Torvalds 12061da177e4SLinus Torvalds fail_nomem: 12071da177e4SLinus Torvalds return retval; 12081da177e4SLinus Torvalds } 12091da177e4SLinus Torvalds 1210a39bc516SAlexey Dobriyan static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) 12111da177e4SLinus Torvalds { 1212498052bbSAl Viro struct fs_struct *fs = current->fs; 12131da177e4SLinus Torvalds if (clone_flags & CLONE_FS) { 1214498052bbSAl Viro /* tsk->fs is already what we want */ 12152a4419b5SNick Piggin spin_lock(&fs->lock); 1216498052bbSAl Viro if (fs->in_exec) { 12172a4419b5SNick Piggin spin_unlock(&fs->lock); 1218498052bbSAl Viro return -EAGAIN; 1219498052bbSAl Viro } 1220498052bbSAl Viro fs->users++; 12212a4419b5SNick Piggin spin_unlock(&fs->lock); 12221da177e4SLinus Torvalds return 0; 12231da177e4SLinus Torvalds } 1224498052bbSAl Viro tsk->fs = copy_fs_struct(fs); 12251da177e4SLinus Torvalds if (!tsk->fs) 12261da177e4SLinus Torvalds return -ENOMEM; 12271da177e4SLinus Torvalds return 0; 12281da177e4SLinus Torvalds } 12291da177e4SLinus Torvalds 1230a016f338SJANAK DESAI static int copy_files(unsigned long clone_flags, struct task_struct *tsk) 1231a016f338SJANAK DESAI { 1232a016f338SJANAK DESAI struct files_struct *oldf, *newf; 1233a016f338SJANAK DESAI int error = 0; 1234a016f338SJANAK DESAI 1235a016f338SJANAK DESAI /* 1236a016f338SJANAK DESAI * A background process may not have any files ... 1237a016f338SJANAK DESAI */ 1238a016f338SJANAK DESAI oldf = current->files; 1239a016f338SJANAK DESAI if (!oldf) 1240a016f338SJANAK DESAI goto out; 1241a016f338SJANAK DESAI 1242a016f338SJANAK DESAI if (clone_flags & CLONE_FILES) { 1243a016f338SJANAK DESAI atomic_inc(&oldf->count); 1244a016f338SJANAK DESAI goto out; 1245a016f338SJANAK DESAI } 1246a016f338SJANAK DESAI 1247a016f338SJANAK DESAI newf = dup_fd(oldf, &error); 1248a016f338SJANAK DESAI if (!newf) 1249a016f338SJANAK DESAI goto out; 1250a016f338SJANAK DESAI 1251a016f338SJANAK DESAI tsk->files = newf; 1252a016f338SJANAK DESAI error = 0; 1253a016f338SJANAK DESAI out: 1254a016f338SJANAK DESAI return error; 1255a016f338SJANAK DESAI } 1256a016f338SJANAK DESAI 1257fadad878SJens Axboe static int copy_io(unsigned long clone_flags, struct task_struct *tsk) 1258fd0928dfSJens Axboe { 1259fd0928dfSJens Axboe #ifdef CONFIG_BLOCK 1260fd0928dfSJens Axboe struct io_context *ioc = current->io_context; 12616e736be7STejun Heo struct io_context *new_ioc; 1262fd0928dfSJens Axboe 1263fd0928dfSJens Axboe if (!ioc) 1264fd0928dfSJens Axboe return 0; 1265fadad878SJens Axboe /* 1266fadad878SJens Axboe * Share io context with parent, if CLONE_IO is set 1267fadad878SJens Axboe */ 1268fadad878SJens Axboe if (clone_flags & CLONE_IO) { 12693d48749dSTejun Heo ioc_task_link(ioc); 12703d48749dSTejun Heo tsk->io_context = ioc; 1271fadad878SJens Axboe } else if (ioprio_valid(ioc->ioprio)) { 12726e736be7STejun Heo new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE); 12736e736be7STejun Heo if (unlikely(!new_ioc)) 1274fd0928dfSJens Axboe return -ENOMEM; 1275fd0928dfSJens Axboe 12766e736be7STejun Heo new_ioc->ioprio = ioc->ioprio; 127711a3122fSTejun Heo put_io_context(new_ioc); 1278fd0928dfSJens Axboe } 1279fd0928dfSJens Axboe #endif 1280fd0928dfSJens Axboe return 0; 1281fd0928dfSJens Axboe } 1282fd0928dfSJens Axboe 1283a39bc516SAlexey Dobriyan static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) 12841da177e4SLinus Torvalds { 12851da177e4SLinus Torvalds struct sighand_struct *sig; 12861da177e4SLinus Torvalds 128760348802SZhaolei if (clone_flags & CLONE_SIGHAND) { 12881da177e4SLinus Torvalds atomic_inc(¤t->sighand->count); 12891da177e4SLinus Torvalds return 0; 12901da177e4SLinus Torvalds } 12911da177e4SLinus Torvalds sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); 1292e56d0903SIngo Molnar rcu_assign_pointer(tsk->sighand, sig); 12931da177e4SLinus Torvalds if (!sig) 12941da177e4SLinus Torvalds return -ENOMEM; 12959d7fb042SPeter Zijlstra 12961da177e4SLinus Torvalds atomic_set(&sig->count, 1); 12971da177e4SLinus Torvalds memcpy(sig->action, current->sighand->action, sizeof(sig->action)); 12981da177e4SLinus Torvalds return 0; 12991da177e4SLinus Torvalds } 13001da177e4SLinus Torvalds 1301a7e5328aSOleg Nesterov void __cleanup_sighand(struct sighand_struct *sighand) 1302c81addc9SOleg Nesterov { 1303d80e731eSOleg Nesterov if (atomic_dec_and_test(&sighand->count)) { 1304d80e731eSOleg Nesterov signalfd_cleanup(sighand); 1305392809b2SOleg Nesterov /* 1306392809b2SOleg Nesterov * sighand_cachep is SLAB_DESTROY_BY_RCU so we can free it 1307392809b2SOleg Nesterov * without an RCU grace period, see __lock_task_sighand(). 1308392809b2SOleg Nesterov */ 1309c81addc9SOleg Nesterov kmem_cache_free(sighand_cachep, sighand); 1310c81addc9SOleg Nesterov } 1311d80e731eSOleg Nesterov } 1312c81addc9SOleg Nesterov 1313b18b6a9cSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS 1314f06febc9SFrank Mayhar /* 1315f06febc9SFrank Mayhar * Initialize POSIX timer handling for a thread group. 1316f06febc9SFrank Mayhar */ 1317f06febc9SFrank Mayhar static void posix_cpu_timers_init_group(struct signal_struct *sig) 1318f06febc9SFrank Mayhar { 131978d7d407SJiri Slaby unsigned long cpu_limit; 132078d7d407SJiri Slaby 1321316c1608SJason Low cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); 132278d7d407SJiri Slaby if (cpu_limit != RLIM_INFINITY) { 1323ebd7e7fcSFrederic Weisbecker sig->cputime_expires.prof_exp = cpu_limit * NSEC_PER_SEC; 1324d5c373ebSJason Low sig->cputimer.running = true; 13256279a751SOleg Nesterov } 13266279a751SOleg Nesterov 1327f06febc9SFrank Mayhar /* The timer lists. */ 1328f06febc9SFrank Mayhar INIT_LIST_HEAD(&sig->cpu_timers[0]); 1329f06febc9SFrank Mayhar INIT_LIST_HEAD(&sig->cpu_timers[1]); 1330f06febc9SFrank Mayhar INIT_LIST_HEAD(&sig->cpu_timers[2]); 1331f06febc9SFrank Mayhar } 1332b18b6a9cSNicolas Pitre #else 1333b18b6a9cSNicolas Pitre static inline void posix_cpu_timers_init_group(struct signal_struct *sig) { } 1334b18b6a9cSNicolas Pitre #endif 1335f06febc9SFrank Mayhar 1336a39bc516SAlexey Dobriyan static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) 13371da177e4SLinus Torvalds { 13381da177e4SLinus Torvalds struct signal_struct *sig; 13391da177e4SLinus Torvalds 13404ab6c083SOleg Nesterov if (clone_flags & CLONE_THREAD) 1341490dea45SPeter Zijlstra return 0; 13426279a751SOleg Nesterov 1343a56704efSVeaceslav Falico sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL); 13441da177e4SLinus Torvalds tsk->signal = sig; 13451da177e4SLinus Torvalds if (!sig) 13461da177e4SLinus Torvalds return -ENOMEM; 13471da177e4SLinus Torvalds 1348b3ac022cSOleg Nesterov sig->nr_threads = 1; 13491da177e4SLinus Torvalds atomic_set(&sig->live, 1); 1350b3ac022cSOleg Nesterov atomic_set(&sig->sigcnt, 1); 13510c740d0aSOleg Nesterov 13520c740d0aSOleg Nesterov /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */ 13530c740d0aSOleg Nesterov sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node); 13540c740d0aSOleg Nesterov tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head); 13550c740d0aSOleg Nesterov 13561da177e4SLinus Torvalds init_waitqueue_head(&sig->wait_chldexit); 1357db51aeccSOleg Nesterov sig->curr_target = tsk; 13581da177e4SLinus Torvalds init_sigpending(&sig->shared_pending); 1359e78c3496SRik van Riel seqlock_init(&sig->stats_lock); 13609d7fb042SPeter Zijlstra prev_cputime_init(&sig->prev_cputime); 13611da177e4SLinus Torvalds 1362baa73d9eSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS 1363b18b6a9cSNicolas Pitre INIT_LIST_HEAD(&sig->posix_timers); 1364c9cb2e3dSThomas Gleixner hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 13651da177e4SLinus Torvalds sig->real_timer.function = it_real_fn; 1366baa73d9eSNicolas Pitre #endif 13671da177e4SLinus Torvalds 13681da177e4SLinus Torvalds task_lock(current->group_leader); 13691da177e4SLinus Torvalds memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 13701da177e4SLinus Torvalds task_unlock(current->group_leader); 13711da177e4SLinus Torvalds 13726279a751SOleg Nesterov posix_cpu_timers_init_group(sig); 13736279a751SOleg Nesterov 1374522ed776SMiloslav Trmac tty_audit_fork(sig); 13755091faa4SMike Galbraith sched_autogroup_fork(sig); 1376522ed776SMiloslav Trmac 1377a63d83f4SDavid Rientjes sig->oom_score_adj = current->signal->oom_score_adj; 1378dabb16f6SMandeep Singh Baines sig->oom_score_adj_min = current->signal->oom_score_adj_min; 137928b83c51SKOSAKI Motohiro 13809b1bf12dSKOSAKI Motohiro mutex_init(&sig->cred_guard_mutex); 13819b1bf12dSKOSAKI Motohiro 13821da177e4SLinus Torvalds return 0; 13831da177e4SLinus Torvalds } 13841da177e4SLinus Torvalds 1385dbd95212SKees Cook static void copy_seccomp(struct task_struct *p) 1386dbd95212SKees Cook { 1387dbd95212SKees Cook #ifdef CONFIG_SECCOMP 1388dbd95212SKees Cook /* 1389dbd95212SKees Cook * Must be called with sighand->lock held, which is common to 1390dbd95212SKees Cook * all threads in the group. Holding cred_guard_mutex is not 1391dbd95212SKees Cook * needed because this new task is not yet running and cannot 1392dbd95212SKees Cook * be racing exec. 1393dbd95212SKees Cook */ 139469f6a34bSGuenter Roeck assert_spin_locked(¤t->sighand->siglock); 1395dbd95212SKees Cook 1396dbd95212SKees Cook /* Ref-count the new filter user, and assign it. */ 1397dbd95212SKees Cook get_seccomp_filter(current); 1398dbd95212SKees Cook p->seccomp = current->seccomp; 1399dbd95212SKees Cook 1400dbd95212SKees Cook /* 1401dbd95212SKees Cook * Explicitly enable no_new_privs here in case it got set 1402dbd95212SKees Cook * between the task_struct being duplicated and holding the 1403dbd95212SKees Cook * sighand lock. The seccomp state and nnp must be in sync. 1404dbd95212SKees Cook */ 1405dbd95212SKees Cook if (task_no_new_privs(current)) 1406dbd95212SKees Cook task_set_no_new_privs(p); 1407dbd95212SKees Cook 1408dbd95212SKees Cook /* 1409dbd95212SKees Cook * If the parent gained a seccomp mode after copying thread 1410dbd95212SKees Cook * flags and between before we held the sighand lock, we have 1411dbd95212SKees Cook * to manually enable the seccomp thread flag here. 1412dbd95212SKees Cook */ 1413dbd95212SKees Cook if (p->seccomp.mode != SECCOMP_MODE_DISABLED) 1414dbd95212SKees Cook set_tsk_thread_flag(p, TIF_SECCOMP); 1415dbd95212SKees Cook #endif 1416dbd95212SKees Cook } 1417dbd95212SKees Cook 141817da2bd9SHeiko Carstens SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) 14191da177e4SLinus Torvalds { 14201da177e4SLinus Torvalds current->clear_child_tid = tidptr; 14211da177e4SLinus Torvalds 1422b488893aSPavel Emelyanov return task_pid_vnr(current); 14231da177e4SLinus Torvalds } 14241da177e4SLinus Torvalds 1425a39bc516SAlexey Dobriyan static void rt_mutex_init_task(struct task_struct *p) 142623f78d4aSIngo Molnar { 14271d615482SThomas Gleixner raw_spin_lock_init(&p->pi_lock); 1428e29e175bSZilvinas Valinskas #ifdef CONFIG_RT_MUTEXES 1429fb00aca4SPeter Zijlstra p->pi_waiters = RB_ROOT; 1430fb00aca4SPeter Zijlstra p->pi_waiters_leftmost = NULL; 143123f78d4aSIngo Molnar p->pi_blocked_on = NULL; 143223f78d4aSIngo Molnar #endif 143323f78d4aSIngo Molnar } 143423f78d4aSIngo Molnar 1435b18b6a9cSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS 14361da177e4SLinus Torvalds /* 1437f06febc9SFrank Mayhar * Initialize POSIX timer handling for a single task. 1438f06febc9SFrank Mayhar */ 1439f06febc9SFrank Mayhar static void posix_cpu_timers_init(struct task_struct *tsk) 1440f06febc9SFrank Mayhar { 144164861634SMartin Schwidefsky tsk->cputime_expires.prof_exp = 0; 144264861634SMartin Schwidefsky tsk->cputime_expires.virt_exp = 0; 1443f06febc9SFrank Mayhar tsk->cputime_expires.sched_exp = 0; 1444f06febc9SFrank Mayhar INIT_LIST_HEAD(&tsk->cpu_timers[0]); 1445f06febc9SFrank Mayhar INIT_LIST_HEAD(&tsk->cpu_timers[1]); 1446f06febc9SFrank Mayhar INIT_LIST_HEAD(&tsk->cpu_timers[2]); 1447f06febc9SFrank Mayhar } 1448b18b6a9cSNicolas Pitre #else 1449b18b6a9cSNicolas Pitre static inline void posix_cpu_timers_init(struct task_struct *tsk) { } 1450b18b6a9cSNicolas Pitre #endif 1451f06febc9SFrank Mayhar 145281907739SOleg Nesterov static inline void 145381907739SOleg Nesterov init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) 145481907739SOleg Nesterov { 145581907739SOleg Nesterov task->pids[type].pid = pid; 145681907739SOleg Nesterov } 145781907739SOleg Nesterov 1458f06febc9SFrank Mayhar /* 14591da177e4SLinus Torvalds * This creates a new process as a copy of the old one, 14601da177e4SLinus Torvalds * but does not actually start it yet. 14611da177e4SLinus Torvalds * 14621da177e4SLinus Torvalds * It copies the registers, and all the appropriate 14631da177e4SLinus Torvalds * parts of the process environment (as per the clone 14641da177e4SLinus Torvalds * flags). The actual kick-off is left to the caller. 14651da177e4SLinus Torvalds */ 14660766f788SEmese Revfy static __latent_entropy struct task_struct *copy_process( 14670766f788SEmese Revfy unsigned long clone_flags, 14681da177e4SLinus Torvalds unsigned long stack_start, 14691da177e4SLinus Torvalds unsigned long stack_size, 14701da177e4SLinus Torvalds int __user *child_tidptr, 147109a05394SRoland McGrath struct pid *pid, 14723033f14aSJosh Triplett int trace, 1473725fc629SAndi Kleen unsigned long tls, 1474725fc629SAndi Kleen int node) 14751da177e4SLinus Torvalds { 14761da177e4SLinus Torvalds int retval; 1477a24efe62SMariusz Kozlowski struct task_struct *p; 14781da177e4SLinus Torvalds 14791da177e4SLinus Torvalds if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) 14801da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 14811da177e4SLinus Torvalds 1482e66eded8SEric W. Biederman if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) 1483e66eded8SEric W. Biederman return ERR_PTR(-EINVAL); 1484e66eded8SEric W. Biederman 14851da177e4SLinus Torvalds /* 14861da177e4SLinus Torvalds * Thread groups must share signals as well, and detached threads 14871da177e4SLinus Torvalds * can only be started up within the thread group. 14881da177e4SLinus Torvalds */ 14891da177e4SLinus Torvalds if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) 14901da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 14911da177e4SLinus Torvalds 14921da177e4SLinus Torvalds /* 14931da177e4SLinus Torvalds * Shared signal handlers imply shared VM. By way of the above, 14941da177e4SLinus Torvalds * thread groups also imply shared VM. Blocking this case allows 14951da177e4SLinus Torvalds * for various simplifications in other code. 14961da177e4SLinus Torvalds */ 14971da177e4SLinus Torvalds if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) 14981da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 14991da177e4SLinus Torvalds 1500123be07bSSukadev Bhattiprolu /* 1501123be07bSSukadev Bhattiprolu * Siblings of global init remain as zombies on exit since they are 1502123be07bSSukadev Bhattiprolu * not reaped by their parent (swapper). To solve this and to avoid 1503123be07bSSukadev Bhattiprolu * multi-rooted process trees, prevent global and container-inits 1504123be07bSSukadev Bhattiprolu * from creating siblings. 1505123be07bSSukadev Bhattiprolu */ 1506123be07bSSukadev Bhattiprolu if ((clone_flags & CLONE_PARENT) && 1507123be07bSSukadev Bhattiprolu current->signal->flags & SIGNAL_UNKILLABLE) 1508123be07bSSukadev Bhattiprolu return ERR_PTR(-EINVAL); 1509123be07bSSukadev Bhattiprolu 15108382fcacSEric W. Biederman /* 151140a0d32dSOleg Nesterov * If the new process will be in a different pid or user namespace 1512faf00da5SEric W. Biederman * do not allow it to share a thread group with the forking task. 15138382fcacSEric W. Biederman */ 1514faf00da5SEric W. Biederman if (clone_flags & CLONE_THREAD) { 151540a0d32dSOleg Nesterov if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || 151640a0d32dSOleg Nesterov (task_active_pid_ns(current) != 1517c2b1df2eSAndy Lutomirski current->nsproxy->pid_ns_for_children)) 15188382fcacSEric W. Biederman return ERR_PTR(-EINVAL); 151940a0d32dSOleg Nesterov } 15208382fcacSEric W. Biederman 15211da177e4SLinus Torvalds retval = security_task_create(clone_flags); 15221da177e4SLinus Torvalds if (retval) 15231da177e4SLinus Torvalds goto fork_out; 15241da177e4SLinus Torvalds 15251da177e4SLinus Torvalds retval = -ENOMEM; 1526725fc629SAndi Kleen p = dup_task_struct(current, node); 15271da177e4SLinus Torvalds if (!p) 15281da177e4SLinus Torvalds goto fork_out; 15291da177e4SLinus Torvalds 1530f7e8b616SSteven Rostedt ftrace_graph_init_task(p); 1531f7e8b616SSteven Rostedt 1532bea493a0SPeter Zijlstra rt_mutex_init_task(p); 1533bea493a0SPeter Zijlstra 1534d12c1a37SIngo Molnar #ifdef CONFIG_PROVE_LOCKING 1535de30a2b3SIngo Molnar DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); 1536de30a2b3SIngo Molnar DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 1537de30a2b3SIngo Molnar #endif 15381da177e4SLinus Torvalds retval = -EAGAIN; 15393b11a1deSDavid Howells if (atomic_read(&p->real_cred->user->processes) >= 154078d7d407SJiri Slaby task_rlimit(p, RLIMIT_NPROC)) { 1541b57922b6SEric Paris if (p->real_cred->user != INIT_USER && 1542b57922b6SEric Paris !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) 15431da177e4SLinus Torvalds goto bad_fork_free; 15441da177e4SLinus Torvalds } 154572fa5997SVasiliy Kulikov current->flags &= ~PF_NPROC_EXCEEDED; 15461da177e4SLinus Torvalds 1547f1752eecSDavid Howells retval = copy_creds(p, clone_flags); 1548f1752eecSDavid Howells if (retval < 0) 1549f1752eecSDavid Howells goto bad_fork_free; 15501da177e4SLinus Torvalds 15511da177e4SLinus Torvalds /* 15521da177e4SLinus Torvalds * If multiple threads are within copy_process(), then this check 15531da177e4SLinus Torvalds * triggers too late. This doesn't hurt, the check is only there 15541da177e4SLinus Torvalds * to stop root fork bombs. 15551da177e4SLinus Torvalds */ 155604ec93feSLi Zefan retval = -EAGAIN; 15571da177e4SLinus Torvalds if (nr_threads >= max_threads) 15581da177e4SLinus Torvalds goto bad_fork_cleanup_count; 15591da177e4SLinus Torvalds 1560ca74e92bSShailabh Nagar delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ 1561c1de45caSPeter Zijlstra p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE); 1562514ddb44SDavid Rientjes p->flags |= PF_FORKNOEXEC; 15631da177e4SLinus Torvalds INIT_LIST_HEAD(&p->children); 15641da177e4SLinus Torvalds INIT_LIST_HEAD(&p->sibling); 1565f41d911fSPaul E. McKenney rcu_copy_process(p); 15661da177e4SLinus Torvalds p->vfork_done = NULL; 15671da177e4SLinus Torvalds spin_lock_init(&p->alloc_lock); 15681da177e4SLinus Torvalds 15691da177e4SLinus Torvalds init_sigpending(&p->pending); 15701da177e4SLinus Torvalds 157164861634SMartin Schwidefsky p->utime = p->stime = p->gtime = 0; 157240565b5aSStanislaw Gruszka #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 157364861634SMartin Schwidefsky p->utimescaled = p->stimescaled = 0; 157440565b5aSStanislaw Gruszka #endif 15759d7fb042SPeter Zijlstra prev_cputime_init(&p->prev_cputime); 15769d7fb042SPeter Zijlstra 15776a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1578b7ce2277SFrederic Weisbecker seqcount_init(&p->vtime_seqcount); 15796a61671bSFrederic Weisbecker p->vtime_snap = 0; 15807098c1eaSFrederic Weisbecker p->vtime_snap_whence = VTIME_INACTIVE; 15816a61671bSFrederic Weisbecker #endif 15826a61671bSFrederic Weisbecker 1583a3a2e76cSKAMEZAWA Hiroyuki #if defined(SPLIT_RSS_COUNTING) 1584a3a2e76cSKAMEZAWA Hiroyuki memset(&p->rss_stat, 0, sizeof(p->rss_stat)); 1585a3a2e76cSKAMEZAWA Hiroyuki #endif 1586172ba844SBalbir Singh 15876976675dSArjan van de Ven p->default_timer_slack_ns = current->timer_slack_ns; 15886976675dSArjan van de Ven 15895995477aSAndrea Righi task_io_accounting_init(&p->ioac); 15901da177e4SLinus Torvalds acct_clear_integrals(p); 15911da177e4SLinus Torvalds 1592f06febc9SFrank Mayhar posix_cpu_timers_init(p); 15931da177e4SLinus Torvalds 1594ccbf62d8SThomas Gleixner p->start_time = ktime_get_ns(); 159557e0be04SThomas Gleixner p->real_start_time = ktime_get_boot_ns(); 15961da177e4SLinus Torvalds p->io_context = NULL; 15971da177e4SLinus Torvalds p->audit_context = NULL; 1598b4f48b63SPaul Menage cgroup_fork(p); 15991da177e4SLinus Torvalds #ifdef CONFIG_NUMA 1600846a16bfSLee Schermerhorn p->mempolicy = mpol_dup(p->mempolicy); 16011da177e4SLinus Torvalds if (IS_ERR(p->mempolicy)) { 16021da177e4SLinus Torvalds retval = PTR_ERR(p->mempolicy); 16031da177e4SLinus Torvalds p->mempolicy = NULL; 1604e8604cb4SLi Zefan goto bad_fork_cleanup_threadgroup_lock; 16051da177e4SLinus Torvalds } 16061da177e4SLinus Torvalds #endif 1607778d3b0fSMichal Hocko #ifdef CONFIG_CPUSETS 1608778d3b0fSMichal Hocko p->cpuset_mem_spread_rotor = NUMA_NO_NODE; 1609778d3b0fSMichal Hocko p->cpuset_slab_spread_rotor = NUMA_NO_NODE; 1610cc9a6c87SMel Gorman seqcount_init(&p->mems_allowed_seq); 1611778d3b0fSMichal Hocko #endif 1612de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS 1613de30a2b3SIngo Molnar p->irq_events = 0; 1614de30a2b3SIngo Molnar p->hardirqs_enabled = 0; 1615de30a2b3SIngo Molnar p->hardirq_enable_ip = 0; 1616de30a2b3SIngo Molnar p->hardirq_enable_event = 0; 1617de30a2b3SIngo Molnar p->hardirq_disable_ip = _THIS_IP_; 1618de30a2b3SIngo Molnar p->hardirq_disable_event = 0; 1619de30a2b3SIngo Molnar p->softirqs_enabled = 1; 1620de30a2b3SIngo Molnar p->softirq_enable_ip = _THIS_IP_; 1621de30a2b3SIngo Molnar p->softirq_enable_event = 0; 1622de30a2b3SIngo Molnar p->softirq_disable_ip = 0; 1623de30a2b3SIngo Molnar p->softirq_disable_event = 0; 1624de30a2b3SIngo Molnar p->hardirq_context = 0; 1625de30a2b3SIngo Molnar p->softirq_context = 0; 1626de30a2b3SIngo Molnar #endif 16278bcbde54SDavid Hildenbrand 16288bcbde54SDavid Hildenbrand p->pagefault_disabled = 0; 16298bcbde54SDavid Hildenbrand 1630fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP 1631fbb9ce95SIngo Molnar p->lockdep_depth = 0; /* no locks held yet */ 1632fbb9ce95SIngo Molnar p->curr_chain_key = 0; 1633fbb9ce95SIngo Molnar p->lockdep_recursion = 0; 1634fbb9ce95SIngo Molnar #endif 16351da177e4SLinus Torvalds 1636408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES 1637408894eeSIngo Molnar p->blocked_on = NULL; /* not blocked yet */ 1638408894eeSIngo Molnar #endif 1639cafe5635SKent Overstreet #ifdef CONFIG_BCACHE 1640cafe5635SKent Overstreet p->sequential_io = 0; 1641cafe5635SKent Overstreet p->sequential_io_avg = 0; 1642cafe5635SKent Overstreet #endif 16430f481406SMarkus Metzger 16443c90e6e9SSrivatsa Vaddagiri /* Perform scheduler related setup. Assign this task to a CPU. */ 1645aab03e05SDario Faggioli retval = sched_fork(clone_flags, p); 1646aab03e05SDario Faggioli if (retval) 1647aab03e05SDario Faggioli goto bad_fork_cleanup_policy; 16486ab423e0SPeter Zijlstra 1649cdd6c482SIngo Molnar retval = perf_event_init_task(p); 16506ab423e0SPeter Zijlstra if (retval) 16516ab423e0SPeter Zijlstra goto bad_fork_cleanup_policy; 1652fb0a685cSDaniel Rebelo de Oliveira retval = audit_alloc(p); 1653fb0a685cSDaniel Rebelo de Oliveira if (retval) 16546c72e350SPeter Zijlstra goto bad_fork_cleanup_perf; 16551da177e4SLinus Torvalds /* copy all the process information */ 1656ab602f79SJack Miller shm_init_task(p); 1657fb0a685cSDaniel Rebelo de Oliveira retval = copy_semundo(clone_flags, p); 1658fb0a685cSDaniel Rebelo de Oliveira if (retval) 16591da177e4SLinus Torvalds goto bad_fork_cleanup_audit; 1660fb0a685cSDaniel Rebelo de Oliveira retval = copy_files(clone_flags, p); 1661fb0a685cSDaniel Rebelo de Oliveira if (retval) 16621da177e4SLinus Torvalds goto bad_fork_cleanup_semundo; 1663fb0a685cSDaniel Rebelo de Oliveira retval = copy_fs(clone_flags, p); 1664fb0a685cSDaniel Rebelo de Oliveira if (retval) 16651da177e4SLinus Torvalds goto bad_fork_cleanup_files; 1666fb0a685cSDaniel Rebelo de Oliveira retval = copy_sighand(clone_flags, p); 1667fb0a685cSDaniel Rebelo de Oliveira if (retval) 16681da177e4SLinus Torvalds goto bad_fork_cleanup_fs; 1669fb0a685cSDaniel Rebelo de Oliveira retval = copy_signal(clone_flags, p); 1670fb0a685cSDaniel Rebelo de Oliveira if (retval) 16711da177e4SLinus Torvalds goto bad_fork_cleanup_sighand; 1672fb0a685cSDaniel Rebelo de Oliveira retval = copy_mm(clone_flags, p); 1673fb0a685cSDaniel Rebelo de Oliveira if (retval) 16741da177e4SLinus Torvalds goto bad_fork_cleanup_signal; 1675fb0a685cSDaniel Rebelo de Oliveira retval = copy_namespaces(clone_flags, p); 1676fb0a685cSDaniel Rebelo de Oliveira if (retval) 1677d84f4f99SDavid Howells goto bad_fork_cleanup_mm; 1678fb0a685cSDaniel Rebelo de Oliveira retval = copy_io(clone_flags, p); 1679fb0a685cSDaniel Rebelo de Oliveira if (retval) 1680fd0928dfSJens Axboe goto bad_fork_cleanup_namespaces; 16813033f14aSJosh Triplett retval = copy_thread_tls(clone_flags, stack_start, stack_size, p, tls); 16821da177e4SLinus Torvalds if (retval) 1683fd0928dfSJens Axboe goto bad_fork_cleanup_io; 16841da177e4SLinus Torvalds 1685425fb2b4SPavel Emelyanov if (pid != &init_struct_pid) { 1686c2b1df2eSAndy Lutomirski pid = alloc_pid(p->nsproxy->pid_ns_for_children); 168735f71bc0SMichal Hocko if (IS_ERR(pid)) { 168835f71bc0SMichal Hocko retval = PTR_ERR(pid); 16890740aa5fSJiri Slaby goto bad_fork_cleanup_thread; 1690425fb2b4SPavel Emelyanov } 169135f71bc0SMichal Hocko } 1692425fb2b4SPavel Emelyanov 16931da177e4SLinus Torvalds p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; 16941da177e4SLinus Torvalds /* 16951da177e4SLinus Torvalds * Clear TID on mm_release()? 16961da177e4SLinus Torvalds */ 16971da177e4SLinus Torvalds p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; 169873c10101SJens Axboe #ifdef CONFIG_BLOCK 169973c10101SJens Axboe p->plug = NULL; 170073c10101SJens Axboe #endif 170142b2dd0aSAlexey Dobriyan #ifdef CONFIG_FUTEX 17028f17d3a5SIngo Molnar p->robust_list = NULL; 17038f17d3a5SIngo Molnar #ifdef CONFIG_COMPAT 17048f17d3a5SIngo Molnar p->compat_robust_list = NULL; 17058f17d3a5SIngo Molnar #endif 1706c87e2837SIngo Molnar INIT_LIST_HEAD(&p->pi_state_list); 1707c87e2837SIngo Molnar p->pi_state_cache = NULL; 170842b2dd0aSAlexey Dobriyan #endif 17091da177e4SLinus Torvalds /* 1710f9a3879aSGOTO Masanori * sigaltstack should be cleared when sharing the same VM 1711f9a3879aSGOTO Masanori */ 1712f9a3879aSGOTO Masanori if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) 17132a742138SStas Sergeev sas_ss_reset(p); 1714f9a3879aSGOTO Masanori 1715f9a3879aSGOTO Masanori /* 17166580807dSOleg Nesterov * Syscall tracing and stepping should be turned off in the 17176580807dSOleg Nesterov * child regardless of CLONE_PTRACE. 17181da177e4SLinus Torvalds */ 17196580807dSOleg Nesterov user_disable_single_step(p); 17201da177e4SLinus Torvalds clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); 1721ed75e8d5SLaurent Vivier #ifdef TIF_SYSCALL_EMU 1722ed75e8d5SLaurent Vivier clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); 1723ed75e8d5SLaurent Vivier #endif 17249745512cSArjan van de Ven clear_all_latency_tracing(p); 17251da177e4SLinus Torvalds 17261da177e4SLinus Torvalds /* ok, now we should be set up.. */ 172718c830dfSOleg Nesterov p->pid = pid_nr(pid); 172818c830dfSOleg Nesterov if (clone_flags & CLONE_THREAD) { 17295f8aadd8SOleg Nesterov p->exit_signal = -1; 173018c830dfSOleg Nesterov p->group_leader = current->group_leader; 173118c830dfSOleg Nesterov p->tgid = current->tgid; 173218c830dfSOleg Nesterov } else { 173318c830dfSOleg Nesterov if (clone_flags & CLONE_PARENT) 17345f8aadd8SOleg Nesterov p->exit_signal = current->group_leader->exit_signal; 17355f8aadd8SOleg Nesterov else 17365f8aadd8SOleg Nesterov p->exit_signal = (clone_flags & CSIGNAL); 173718c830dfSOleg Nesterov p->group_leader = p; 173818c830dfSOleg Nesterov p->tgid = p->pid; 173918c830dfSOleg Nesterov } 17405f8aadd8SOleg Nesterov 17419d823e8fSWu Fengguang p->nr_dirtied = 0; 17429d823e8fSWu Fengguang p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); 174383712358SWu Fengguang p->dirty_paused_when = 0; 17449d823e8fSWu Fengguang 1745bb8cbbfeSOleg Nesterov p->pdeath_signal = 0; 174647e65328SOleg Nesterov INIT_LIST_HEAD(&p->thread_group); 1747158e1645SAl Viro p->task_works = NULL; 17481da177e4SLinus Torvalds 1749568ac888SBalbir Singh threadgroup_change_begin(current); 175018c830dfSOleg Nesterov /* 17517e47682eSAleksa Sarai * Ensure that the cgroup subsystem policies allow the new process to be 17527e47682eSAleksa Sarai * forked. It should be noted the the new process's css_set can be changed 17537e47682eSAleksa Sarai * between here and cgroup_post_fork() if an organisation operation is in 17547e47682eSAleksa Sarai * progress. 17557e47682eSAleksa Sarai */ 1756b53202e6SOleg Nesterov retval = cgroup_can_fork(p); 17577e47682eSAleksa Sarai if (retval) 17587e47682eSAleksa Sarai goto bad_fork_free_pid; 17597e47682eSAleksa Sarai 17607e47682eSAleksa Sarai /* 176118c830dfSOleg Nesterov * Make it visible to the rest of the system, but dont wake it up yet. 176218c830dfSOleg Nesterov * Need tasklist lock for parent etc handling! 176318c830dfSOleg Nesterov */ 17641da177e4SLinus Torvalds write_lock_irq(&tasklist_lock); 17651da177e4SLinus Torvalds 17661da177e4SLinus Torvalds /* CLONE_PARENT re-uses the old parent */ 17672d5516cbSOleg Nesterov if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { 17681da177e4SLinus Torvalds p->real_parent = current->real_parent; 17692d5516cbSOleg Nesterov p->parent_exec_id = current->parent_exec_id; 17702d5516cbSOleg Nesterov } else { 17711da177e4SLinus Torvalds p->real_parent = current; 17722d5516cbSOleg Nesterov p->parent_exec_id = current->self_exec_id; 17732d5516cbSOleg Nesterov } 17741da177e4SLinus Torvalds 17751da177e4SLinus Torvalds spin_lock(¤t->sighand->siglock); 17764a2c7a78SOleg Nesterov 17774a2c7a78SOleg Nesterov /* 1778dbd95212SKees Cook * Copy seccomp details explicitly here, in case they were changed 1779dbd95212SKees Cook * before holding sighand lock. 1780dbd95212SKees Cook */ 1781dbd95212SKees Cook copy_seccomp(p); 1782dbd95212SKees Cook 1783dbd95212SKees Cook /* 17844a2c7a78SOleg Nesterov * Process group and session signals need to be delivered to just the 17854a2c7a78SOleg Nesterov * parent before the fork or both the parent and the child after the 17864a2c7a78SOleg Nesterov * fork. Restart if a signal comes in before we add the new process to 17874a2c7a78SOleg Nesterov * it's process group. 17884a2c7a78SOleg Nesterov * A fatal signal pending means that current will exit, so the new 17894a2c7a78SOleg Nesterov * thread can't slip out of an OOM kill (or normal SIGKILL). 17904a2c7a78SOleg Nesterov */ 17914a2c7a78SOleg Nesterov recalc_sigpending(); 17924a2c7a78SOleg Nesterov if (signal_pending(current)) { 17934a2c7a78SOleg Nesterov spin_unlock(¤t->sighand->siglock); 17944a2c7a78SOleg Nesterov write_unlock_irq(&tasklist_lock); 17954a2c7a78SOleg Nesterov retval = -ERESTARTNOINTR; 17967e47682eSAleksa Sarai goto bad_fork_cancel_cgroup; 17974a2c7a78SOleg Nesterov } 17984a2c7a78SOleg Nesterov 179973b9ebfeSOleg Nesterov if (likely(p->pid)) { 18004b9d33e6STejun Heo ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); 18011da177e4SLinus Torvalds 180281907739SOleg Nesterov init_task_pid(p, PIDTYPE_PID, pid); 18031da177e4SLinus Torvalds if (thread_group_leader(p)) { 180481907739SOleg Nesterov init_task_pid(p, PIDTYPE_PGID, task_pgrp(current)); 180581907739SOleg Nesterov init_task_pid(p, PIDTYPE_SID, task_session(current)); 180681907739SOleg Nesterov 18071c4042c2SEric W. Biederman if (is_child_reaper(pid)) { 180817cf22c3SEric W. Biederman ns_of_pid(pid)->child_reaper = p; 18091c4042c2SEric W. Biederman p->signal->flags |= SIGNAL_UNKILLABLE; 18101c4042c2SEric W. Biederman } 18115cd17569SEric W. Biederman 1812fea9d175SOleg Nesterov p->signal->leader_pid = pid; 18139c9f4dedSAlan Cox p->signal->tty = tty_kref_get(current->signal->tty); 1814749860ceSPavel Tikhomirov /* 1815749860ceSPavel Tikhomirov * Inherit has_child_subreaper flag under the same 1816749860ceSPavel Tikhomirov * tasklist_lock with adding child to the process tree 1817749860ceSPavel Tikhomirov * for propagate_has_child_subreaper optimization. 1818749860ceSPavel Tikhomirov */ 1819749860ceSPavel Tikhomirov p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper || 1820749860ceSPavel Tikhomirov p->real_parent->signal->is_child_subreaper; 18219cd80bbbSOleg Nesterov list_add_tail(&p->sibling, &p->real_parent->children); 18225e85d4abSEric W. Biederman list_add_tail_rcu(&p->tasks, &init_task.tasks); 182381907739SOleg Nesterov attach_pid(p, PIDTYPE_PGID); 182481907739SOleg Nesterov attach_pid(p, PIDTYPE_SID); 1825909ea964SChristoph Lameter __this_cpu_inc(process_counts); 182680628ca0SOleg Nesterov } else { 182780628ca0SOleg Nesterov current->signal->nr_threads++; 182880628ca0SOleg Nesterov atomic_inc(¤t->signal->live); 182980628ca0SOleg Nesterov atomic_inc(¤t->signal->sigcnt); 183080628ca0SOleg Nesterov list_add_tail_rcu(&p->thread_group, 183180628ca0SOleg Nesterov &p->group_leader->thread_group); 18320c740d0aSOleg Nesterov list_add_tail_rcu(&p->thread_node, 18330c740d0aSOleg Nesterov &p->signal->thread_head); 18341da177e4SLinus Torvalds } 183581907739SOleg Nesterov attach_pid(p, PIDTYPE_PID); 18361da177e4SLinus Torvalds nr_threads++; 183773b9ebfeSOleg Nesterov } 183873b9ebfeSOleg Nesterov 18391da177e4SLinus Torvalds total_forks++; 18403f17da69SOleg Nesterov spin_unlock(¤t->sighand->siglock); 18414af4206bSOleg Nesterov syscall_tracepoint_update(p); 18421da177e4SLinus Torvalds write_unlock_irq(&tasklist_lock); 18434af4206bSOleg Nesterov 1844c13cf856SAndrew Morton proc_fork_connector(p); 1845b53202e6SOleg Nesterov cgroup_post_fork(p); 1846257058aeSTejun Heo threadgroup_change_end(current); 1847cdd6c482SIngo Molnar perf_event_fork(p); 184843d2b113SKAMEZAWA Hiroyuki 184943d2b113SKAMEZAWA Hiroyuki trace_task_newtask(p, clone_flags); 18503ab67966SOleg Nesterov uprobe_copy_process(p, clone_flags); 185143d2b113SKAMEZAWA Hiroyuki 18521da177e4SLinus Torvalds return p; 18531da177e4SLinus Torvalds 18547e47682eSAleksa Sarai bad_fork_cancel_cgroup: 1855b53202e6SOleg Nesterov cgroup_cancel_fork(p); 1856425fb2b4SPavel Emelyanov bad_fork_free_pid: 1857568ac888SBalbir Singh threadgroup_change_end(current); 1858425fb2b4SPavel Emelyanov if (pid != &init_struct_pid) 1859425fb2b4SPavel Emelyanov free_pid(pid); 18600740aa5fSJiri Slaby bad_fork_cleanup_thread: 18610740aa5fSJiri Slaby exit_thread(p); 1862fd0928dfSJens Axboe bad_fork_cleanup_io: 1863b69f2292SLouis Rilling if (p->io_context) 1864b69f2292SLouis Rilling exit_io_context(p); 1865ab516013SSerge E. Hallyn bad_fork_cleanup_namespaces: 1866444f378bSLinus Torvalds exit_task_namespaces(p); 18671da177e4SLinus Torvalds bad_fork_cleanup_mm: 1868c9f01245SDavid Rientjes if (p->mm) 18691da177e4SLinus Torvalds mmput(p->mm); 18701da177e4SLinus Torvalds bad_fork_cleanup_signal: 18714ab6c083SOleg Nesterov if (!(clone_flags & CLONE_THREAD)) 18721c5354deSMike Galbraith free_signal_struct(p->signal); 18731da177e4SLinus Torvalds bad_fork_cleanup_sighand: 1874a7e5328aSOleg Nesterov __cleanup_sighand(p->sighand); 18751da177e4SLinus Torvalds bad_fork_cleanup_fs: 18761da177e4SLinus Torvalds exit_fs(p); /* blocking */ 18771da177e4SLinus Torvalds bad_fork_cleanup_files: 18781da177e4SLinus Torvalds exit_files(p); /* blocking */ 18791da177e4SLinus Torvalds bad_fork_cleanup_semundo: 18801da177e4SLinus Torvalds exit_sem(p); 18811da177e4SLinus Torvalds bad_fork_cleanup_audit: 18821da177e4SLinus Torvalds audit_free(p); 18836c72e350SPeter Zijlstra bad_fork_cleanup_perf: 1884cdd6c482SIngo Molnar perf_event_free_task(p); 18856c72e350SPeter Zijlstra bad_fork_cleanup_policy: 18861da177e4SLinus Torvalds #ifdef CONFIG_NUMA 1887f0be3d32SLee Schermerhorn mpol_put(p->mempolicy); 1888e8604cb4SLi Zefan bad_fork_cleanup_threadgroup_lock: 18891da177e4SLinus Torvalds #endif 189035df17c5SShailabh Nagar delayacct_tsk_free(p); 18911da177e4SLinus Torvalds bad_fork_cleanup_count: 1892d84f4f99SDavid Howells atomic_dec(&p->cred->user->processes); 1893e0e81739SDavid Howells exit_creds(p); 18941da177e4SLinus Torvalds bad_fork_free: 1895405c0759SAndy Lutomirski p->state = TASK_DEAD; 189668f24b08SAndy Lutomirski put_task_stack(p); 18971da177e4SLinus Torvalds free_task(p); 1898fe7d37d1SOleg Nesterov fork_out: 1899fe7d37d1SOleg Nesterov return ERR_PTR(retval); 19001da177e4SLinus Torvalds } 19011da177e4SLinus Torvalds 1902f106eee1SOleg Nesterov static inline void init_idle_pids(struct pid_link *links) 1903f106eee1SOleg Nesterov { 1904f106eee1SOleg Nesterov enum pid_type type; 1905f106eee1SOleg Nesterov 1906f106eee1SOleg Nesterov for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { 1907f106eee1SOleg Nesterov INIT_HLIST_NODE(&links[type].node); /* not really needed */ 1908f106eee1SOleg Nesterov links[type].pid = &init_struct_pid; 1909f106eee1SOleg Nesterov } 1910f106eee1SOleg Nesterov } 1911f106eee1SOleg Nesterov 19120db0628dSPaul Gortmaker struct task_struct *fork_idle(int cpu) 19131da177e4SLinus Torvalds { 191436c8b586SIngo Molnar struct task_struct *task; 1915725fc629SAndi Kleen task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0, 1916725fc629SAndi Kleen cpu_to_node(cpu)); 1917f106eee1SOleg Nesterov if (!IS_ERR(task)) { 1918f106eee1SOleg Nesterov init_idle_pids(task->pids); 19191da177e4SLinus Torvalds init_idle(task, cpu); 1920f106eee1SOleg Nesterov } 192173b9ebfeSOleg Nesterov 19221da177e4SLinus Torvalds return task; 19231da177e4SLinus Torvalds } 19241da177e4SLinus Torvalds 19251da177e4SLinus Torvalds /* 19261da177e4SLinus Torvalds * Ok, this is the main fork-routine. 19271da177e4SLinus Torvalds * 19281da177e4SLinus Torvalds * It copies the process, and if successful kick-starts 19291da177e4SLinus Torvalds * it and waits for it to finish using the VM if required. 19301da177e4SLinus Torvalds */ 19313033f14aSJosh Triplett long _do_fork(unsigned long clone_flags, 19321da177e4SLinus Torvalds unsigned long stack_start, 19331da177e4SLinus Torvalds unsigned long stack_size, 19341da177e4SLinus Torvalds int __user *parent_tidptr, 19353033f14aSJosh Triplett int __user *child_tidptr, 19363033f14aSJosh Triplett unsigned long tls) 19371da177e4SLinus Torvalds { 19381da177e4SLinus Torvalds struct task_struct *p; 19391da177e4SLinus Torvalds int trace = 0; 194092476d7fSEric W. Biederman long nr; 19411da177e4SLinus Torvalds 1942bdff746aSAndrew Morton /* 19434b9d33e6STejun Heo * Determine whether and which event to report to ptracer. When 19444b9d33e6STejun Heo * called from kernel_thread or CLONE_UNTRACED is explicitly 19454b9d33e6STejun Heo * requested, no event is reported; otherwise, report if the event 19464b9d33e6STejun Heo * for the type of forking is enabled. 194709a05394SRoland McGrath */ 1948e80d6661SAl Viro if (!(clone_flags & CLONE_UNTRACED)) { 19494b9d33e6STejun Heo if (clone_flags & CLONE_VFORK) 19504b9d33e6STejun Heo trace = PTRACE_EVENT_VFORK; 19514b9d33e6STejun Heo else if ((clone_flags & CSIGNAL) != SIGCHLD) 19524b9d33e6STejun Heo trace = PTRACE_EVENT_CLONE; 19534b9d33e6STejun Heo else 19544b9d33e6STejun Heo trace = PTRACE_EVENT_FORK; 19554b9d33e6STejun Heo 19564b9d33e6STejun Heo if (likely(!ptrace_event_enabled(current, trace))) 19574b9d33e6STejun Heo trace = 0; 19584b9d33e6STejun Heo } 19591da177e4SLinus Torvalds 196062e791c1SAl Viro p = copy_process(clone_flags, stack_start, stack_size, 1961725fc629SAndi Kleen child_tidptr, NULL, trace, tls, NUMA_NO_NODE); 196238addce8SEmese Revfy add_latent_entropy(); 19631da177e4SLinus Torvalds /* 19641da177e4SLinus Torvalds * Do this prior waking up the new thread - the thread pointer 19651da177e4SLinus Torvalds * might get invalid after that point, if the thread exits quickly. 19661da177e4SLinus Torvalds */ 19671da177e4SLinus Torvalds if (!IS_ERR(p)) { 19681da177e4SLinus Torvalds struct completion vfork; 19694e52365fSMatthew Dempsky struct pid *pid; 19701da177e4SLinus Torvalds 19710a16b607SMathieu Desnoyers trace_sched_process_fork(current, p); 19720a16b607SMathieu Desnoyers 19734e52365fSMatthew Dempsky pid = get_task_pid(p, PIDTYPE_PID); 19744e52365fSMatthew Dempsky nr = pid_vnr(pid); 197530e49c26SPavel Emelyanov 197630e49c26SPavel Emelyanov if (clone_flags & CLONE_PARENT_SETTID) 197730e49c26SPavel Emelyanov put_user(nr, parent_tidptr); 1978a6f5e063SSukadev Bhattiprolu 19791da177e4SLinus Torvalds if (clone_flags & CLONE_VFORK) { 19801da177e4SLinus Torvalds p->vfork_done = &vfork; 19811da177e4SLinus Torvalds init_completion(&vfork); 1982d68b46feSOleg Nesterov get_task_struct(p); 19831da177e4SLinus Torvalds } 19841da177e4SLinus Torvalds 19853e51e3edSSamir Bellabes wake_up_new_task(p); 19861da177e4SLinus Torvalds 19874b9d33e6STejun Heo /* forking complete and child started to run, tell ptracer */ 19884b9d33e6STejun Heo if (unlikely(trace)) 19894e52365fSMatthew Dempsky ptrace_event_pid(trace, pid); 199009a05394SRoland McGrath 19911da177e4SLinus Torvalds if (clone_flags & CLONE_VFORK) { 1992d68b46feSOleg Nesterov if (!wait_for_vfork_done(p, &vfork)) 19934e52365fSMatthew Dempsky ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); 19949f59ce5dSChuck Ebbert } 19954e52365fSMatthew Dempsky 19964e52365fSMatthew Dempsky put_pid(pid); 19971da177e4SLinus Torvalds } else { 199892476d7fSEric W. Biederman nr = PTR_ERR(p); 19991da177e4SLinus Torvalds } 200092476d7fSEric W. Biederman return nr; 20011da177e4SLinus Torvalds } 20021da177e4SLinus Torvalds 20033033f14aSJosh Triplett #ifndef CONFIG_HAVE_COPY_THREAD_TLS 20043033f14aSJosh Triplett /* For compatibility with architectures that call do_fork directly rather than 20053033f14aSJosh Triplett * using the syscall entry points below. */ 20063033f14aSJosh Triplett long do_fork(unsigned long clone_flags, 20073033f14aSJosh Triplett unsigned long stack_start, 20083033f14aSJosh Triplett unsigned long stack_size, 20093033f14aSJosh Triplett int __user *parent_tidptr, 20103033f14aSJosh Triplett int __user *child_tidptr) 20113033f14aSJosh Triplett { 20123033f14aSJosh Triplett return _do_fork(clone_flags, stack_start, stack_size, 20133033f14aSJosh Triplett parent_tidptr, child_tidptr, 0); 20143033f14aSJosh Triplett } 20153033f14aSJosh Triplett #endif 20163033f14aSJosh Triplett 20172aa3a7f8SAl Viro /* 20182aa3a7f8SAl Viro * Create a kernel thread. 20192aa3a7f8SAl Viro */ 20202aa3a7f8SAl Viro pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 20212aa3a7f8SAl Viro { 20223033f14aSJosh Triplett return _do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn, 20233033f14aSJosh Triplett (unsigned long)arg, NULL, NULL, 0); 20242aa3a7f8SAl Viro } 20252aa3a7f8SAl Viro 2026d2125043SAl Viro #ifdef __ARCH_WANT_SYS_FORK 2027d2125043SAl Viro SYSCALL_DEFINE0(fork) 2028d2125043SAl Viro { 2029d2125043SAl Viro #ifdef CONFIG_MMU 20303033f14aSJosh Triplett return _do_fork(SIGCHLD, 0, 0, NULL, NULL, 0); 2031d2125043SAl Viro #else 2032d2125043SAl Viro /* can not support in nommu mode */ 20335d59e182SDaeseok Youn return -EINVAL; 2034d2125043SAl Viro #endif 2035d2125043SAl Viro } 2036d2125043SAl Viro #endif 2037d2125043SAl Viro 2038d2125043SAl Viro #ifdef __ARCH_WANT_SYS_VFORK 2039d2125043SAl Viro SYSCALL_DEFINE0(vfork) 2040d2125043SAl Viro { 20413033f14aSJosh Triplett return _do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0, 20423033f14aSJosh Triplett 0, NULL, NULL, 0); 2043d2125043SAl Viro } 2044d2125043SAl Viro #endif 2045d2125043SAl Viro 2046d2125043SAl Viro #ifdef __ARCH_WANT_SYS_CLONE 2047d2125043SAl Viro #ifdef CONFIG_CLONE_BACKWARDS 2048d2125043SAl Viro SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 2049d2125043SAl Viro int __user *, parent_tidptr, 20503033f14aSJosh Triplett unsigned long, tls, 2051d2125043SAl Viro int __user *, child_tidptr) 2052d2125043SAl Viro #elif defined(CONFIG_CLONE_BACKWARDS2) 2053d2125043SAl Viro SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, 2054d2125043SAl Viro int __user *, parent_tidptr, 2055d2125043SAl Viro int __user *, child_tidptr, 20563033f14aSJosh Triplett unsigned long, tls) 2057dfa9771aSMichal Simek #elif defined(CONFIG_CLONE_BACKWARDS3) 2058dfa9771aSMichal Simek SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, 2059dfa9771aSMichal Simek int, stack_size, 2060dfa9771aSMichal Simek int __user *, parent_tidptr, 2061dfa9771aSMichal Simek int __user *, child_tidptr, 20623033f14aSJosh Triplett unsigned long, tls) 2063d2125043SAl Viro #else 2064d2125043SAl Viro SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 2065d2125043SAl Viro int __user *, parent_tidptr, 2066d2125043SAl Viro int __user *, child_tidptr, 20673033f14aSJosh Triplett unsigned long, tls) 2068d2125043SAl Viro #endif 2069d2125043SAl Viro { 20703033f14aSJosh Triplett return _do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr, tls); 2071d2125043SAl Viro } 2072d2125043SAl Viro #endif 2073d2125043SAl Viro 20740f1b92cbSOleg Nesterov void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data) 20750f1b92cbSOleg Nesterov { 20760f1b92cbSOleg Nesterov struct task_struct *leader, *parent, *child; 20770f1b92cbSOleg Nesterov int res; 20780f1b92cbSOleg Nesterov 20790f1b92cbSOleg Nesterov read_lock(&tasklist_lock); 20800f1b92cbSOleg Nesterov leader = top = top->group_leader; 20810f1b92cbSOleg Nesterov down: 20820f1b92cbSOleg Nesterov for_each_thread(leader, parent) { 20830f1b92cbSOleg Nesterov list_for_each_entry(child, &parent->children, sibling) { 20840f1b92cbSOleg Nesterov res = visitor(child, data); 20850f1b92cbSOleg Nesterov if (res) { 20860f1b92cbSOleg Nesterov if (res < 0) 20870f1b92cbSOleg Nesterov goto out; 20880f1b92cbSOleg Nesterov leader = child; 20890f1b92cbSOleg Nesterov goto down; 20900f1b92cbSOleg Nesterov } 20910f1b92cbSOleg Nesterov up: 20920f1b92cbSOleg Nesterov ; 20930f1b92cbSOleg Nesterov } 20940f1b92cbSOleg Nesterov } 20950f1b92cbSOleg Nesterov 20960f1b92cbSOleg Nesterov if (leader != top) { 20970f1b92cbSOleg Nesterov child = leader; 20980f1b92cbSOleg Nesterov parent = child->real_parent; 20990f1b92cbSOleg Nesterov leader = parent->group_leader; 21000f1b92cbSOleg Nesterov goto up; 21010f1b92cbSOleg Nesterov } 21020f1b92cbSOleg Nesterov out: 21030f1b92cbSOleg Nesterov read_unlock(&tasklist_lock); 21040f1b92cbSOleg Nesterov } 21050f1b92cbSOleg Nesterov 21065fd63b30SRavikiran G Thirumalai #ifndef ARCH_MIN_MMSTRUCT_ALIGN 21075fd63b30SRavikiran G Thirumalai #define ARCH_MIN_MMSTRUCT_ALIGN 0 21085fd63b30SRavikiran G Thirumalai #endif 21095fd63b30SRavikiran G Thirumalai 211051cc5068SAlexey Dobriyan static void sighand_ctor(void *data) 2111aa1757f9SOleg Nesterov { 2112aa1757f9SOleg Nesterov struct sighand_struct *sighand = data; 2113aa1757f9SOleg Nesterov 2114aa1757f9SOleg Nesterov spin_lock_init(&sighand->siglock); 2115b8fceee1SDavide Libenzi init_waitqueue_head(&sighand->signalfd_wqh); 2116fba2afaaSDavide Libenzi } 2117aa1757f9SOleg Nesterov 21181da177e4SLinus Torvalds void __init proc_caches_init(void) 21191da177e4SLinus Torvalds { 21201da177e4SLinus Torvalds sighand_cachep = kmem_cache_create("sighand_cache", 21211da177e4SLinus Torvalds sizeof(struct sighand_struct), 0, 21222dff4405SVegard Nossum SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU| 21235d097056SVladimir Davydov SLAB_NOTRACK|SLAB_ACCOUNT, sighand_ctor); 21241da177e4SLinus Torvalds signal_cachep = kmem_cache_create("signal_cache", 21251da177e4SLinus Torvalds sizeof(struct signal_struct), 0, 21265d097056SVladimir Davydov SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, 21275d097056SVladimir Davydov NULL); 21281da177e4SLinus Torvalds files_cachep = kmem_cache_create("files_cache", 21291da177e4SLinus Torvalds sizeof(struct files_struct), 0, 21305d097056SVladimir Davydov SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, 21315d097056SVladimir Davydov NULL); 21321da177e4SLinus Torvalds fs_cachep = kmem_cache_create("fs_cache", 21331da177e4SLinus Torvalds sizeof(struct fs_struct), 0, 21345d097056SVladimir Davydov SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, 21355d097056SVladimir Davydov NULL); 21366345d24dSLinus Torvalds /* 21376345d24dSLinus Torvalds * FIXME! The "sizeof(struct mm_struct)" currently includes the 21386345d24dSLinus Torvalds * whole struct cpumask for the OFFSTACK case. We could change 21396345d24dSLinus Torvalds * this to *only* allocate as much of it as required by the 21406345d24dSLinus Torvalds * maximum number of CPU's we can ever have. The cpumask_allocation 21416345d24dSLinus Torvalds * is at the end of the structure, exactly for that reason. 21426345d24dSLinus Torvalds */ 21431da177e4SLinus Torvalds mm_cachep = kmem_cache_create("mm_struct", 21445fd63b30SRavikiran G Thirumalai sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, 21455d097056SVladimir Davydov SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, 21465d097056SVladimir Davydov NULL); 21475d097056SVladimir Davydov vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); 21488feae131SDavid Howells mmap_init(); 214966577193SAl Viro nsproxy_cache_init(); 21501da177e4SLinus Torvalds } 2151cf2e340fSJANAK DESAI 2152cf2e340fSJANAK DESAI /* 21539bfb23fcSOleg Nesterov * Check constraints on flags passed to the unshare system call. 2154cf2e340fSJANAK DESAI */ 21559bfb23fcSOleg Nesterov static int check_unshare_flags(unsigned long unshare_flags) 2156cf2e340fSJANAK DESAI { 21579bfb23fcSOleg Nesterov if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| 21589bfb23fcSOleg Nesterov CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| 215950804fe3SEric W. Biederman CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET| 2160a79a908fSAditya Kali CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP)) 2161cf2e340fSJANAK DESAI return -EINVAL; 21629bfb23fcSOleg Nesterov /* 216312c641abSEric W. Biederman * Not implemented, but pretend it works if there is nothing 216412c641abSEric W. Biederman * to unshare. Note that unsharing the address space or the 216512c641abSEric W. Biederman * signal handlers also need to unshare the signal queues (aka 216612c641abSEric W. Biederman * CLONE_THREAD). 21679bfb23fcSOleg Nesterov */ 21689bfb23fcSOleg Nesterov if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { 216912c641abSEric W. Biederman if (!thread_group_empty(current)) 217012c641abSEric W. Biederman return -EINVAL; 217112c641abSEric W. Biederman } 217212c641abSEric W. Biederman if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) { 217312c641abSEric W. Biederman if (atomic_read(¤t->sighand->count) > 1) 217412c641abSEric W. Biederman return -EINVAL; 217512c641abSEric W. Biederman } 217612c641abSEric W. Biederman if (unshare_flags & CLONE_VM) { 217712c641abSEric W. Biederman if (!current_is_single_threaded()) 21789bfb23fcSOleg Nesterov return -EINVAL; 21799bfb23fcSOleg Nesterov } 2180cf2e340fSJANAK DESAI 2181cf2e340fSJANAK DESAI return 0; 2182cf2e340fSJANAK DESAI } 2183cf2e340fSJANAK DESAI 2184cf2e340fSJANAK DESAI /* 218599d1419dSJANAK DESAI * Unshare the filesystem structure if it is being shared 2186cf2e340fSJANAK DESAI */ 2187cf2e340fSJANAK DESAI static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) 2188cf2e340fSJANAK DESAI { 2189cf2e340fSJANAK DESAI struct fs_struct *fs = current->fs; 2190cf2e340fSJANAK DESAI 2191498052bbSAl Viro if (!(unshare_flags & CLONE_FS) || !fs) 2192498052bbSAl Viro return 0; 2193498052bbSAl Viro 2194498052bbSAl Viro /* don't need lock here; in the worst case we'll do useless copy */ 2195498052bbSAl Viro if (fs->users == 1) 2196498052bbSAl Viro return 0; 2197498052bbSAl Viro 2198498052bbSAl Viro *new_fsp = copy_fs_struct(fs); 219999d1419dSJANAK DESAI if (!*new_fsp) 220099d1419dSJANAK DESAI return -ENOMEM; 2201cf2e340fSJANAK DESAI 2202cf2e340fSJANAK DESAI return 0; 2203cf2e340fSJANAK DESAI } 2204cf2e340fSJANAK DESAI 2205cf2e340fSJANAK DESAI /* 2206a016f338SJANAK DESAI * Unshare file descriptor table if it is being shared 2207cf2e340fSJANAK DESAI */ 2208cf2e340fSJANAK DESAI static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) 2209cf2e340fSJANAK DESAI { 2210cf2e340fSJANAK DESAI struct files_struct *fd = current->files; 2211a016f338SJANAK DESAI int error = 0; 2212cf2e340fSJANAK DESAI 2213cf2e340fSJANAK DESAI if ((unshare_flags & CLONE_FILES) && 2214a016f338SJANAK DESAI (fd && atomic_read(&fd->count) > 1)) { 2215a016f338SJANAK DESAI *new_fdp = dup_fd(fd, &error); 2216a016f338SJANAK DESAI if (!*new_fdp) 2217a016f338SJANAK DESAI return error; 2218a016f338SJANAK DESAI } 2219cf2e340fSJANAK DESAI 2220cf2e340fSJANAK DESAI return 0; 2221cf2e340fSJANAK DESAI } 2222cf2e340fSJANAK DESAI 2223cf2e340fSJANAK DESAI /* 2224cf2e340fSJANAK DESAI * unshare allows a process to 'unshare' part of the process 2225cf2e340fSJANAK DESAI * context which was originally shared using clone. copy_* 2226cf2e340fSJANAK DESAI * functions used by do_fork() cannot be used here directly 2227cf2e340fSJANAK DESAI * because they modify an inactive task_struct that is being 2228cf2e340fSJANAK DESAI * constructed. Here we are modifying the current, active, 2229cf2e340fSJANAK DESAI * task_struct. 2230cf2e340fSJANAK DESAI */ 22316559eed8SHeiko Carstens SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) 2232cf2e340fSJANAK DESAI { 2233cf2e340fSJANAK DESAI struct fs_struct *fs, *new_fs = NULL; 2234cf2e340fSJANAK DESAI struct files_struct *fd, *new_fd = NULL; 2235b2e0d987SEric W. Biederman struct cred *new_cred = NULL; 2236cf7b708cSPavel Emelyanov struct nsproxy *new_nsproxy = NULL; 22379edff4abSManfred Spraul int do_sysvsem = 0; 22389bfb23fcSOleg Nesterov int err; 2239cf2e340fSJANAK DESAI 224050804fe3SEric W. Biederman /* 2241faf00da5SEric W. Biederman * If unsharing a user namespace must also unshare the thread group 2242faf00da5SEric W. Biederman * and unshare the filesystem root and working directories. 2243b2e0d987SEric W. Biederman */ 2244b2e0d987SEric W. Biederman if (unshare_flags & CLONE_NEWUSER) 2245e66eded8SEric W. Biederman unshare_flags |= CLONE_THREAD | CLONE_FS; 2246b2e0d987SEric W. Biederman /* 224750804fe3SEric W. Biederman * If unsharing vm, must also unshare signal handlers. 224850804fe3SEric W. Biederman */ 224950804fe3SEric W. Biederman if (unshare_flags & CLONE_VM) 225050804fe3SEric W. Biederman unshare_flags |= CLONE_SIGHAND; 22516013f67fSManfred Spraul /* 225212c641abSEric W. Biederman * If unsharing a signal handlers, must also unshare the signal queues. 225312c641abSEric W. Biederman */ 225412c641abSEric W. Biederman if (unshare_flags & CLONE_SIGHAND) 225512c641abSEric W. Biederman unshare_flags |= CLONE_THREAD; 225612c641abSEric W. Biederman /* 22579bfb23fcSOleg Nesterov * If unsharing namespace, must also unshare filesystem information. 22589bfb23fcSOleg Nesterov */ 22599bfb23fcSOleg Nesterov if (unshare_flags & CLONE_NEWNS) 22609bfb23fcSOleg Nesterov unshare_flags |= CLONE_FS; 226150804fe3SEric W. Biederman 226250804fe3SEric W. Biederman err = check_unshare_flags(unshare_flags); 226350804fe3SEric W. Biederman if (err) 226450804fe3SEric W. Biederman goto bad_unshare_out; 22659bfb23fcSOleg Nesterov /* 22666013f67fSManfred Spraul * CLONE_NEWIPC must also detach from the undolist: after switching 22676013f67fSManfred Spraul * to a new ipc namespace, the semaphore arrays from the old 22686013f67fSManfred Spraul * namespace are unreachable. 22696013f67fSManfred Spraul */ 22706013f67fSManfred Spraul if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) 22719edff4abSManfred Spraul do_sysvsem = 1; 2272fb0a685cSDaniel Rebelo de Oliveira err = unshare_fs(unshare_flags, &new_fs); 2273fb0a685cSDaniel Rebelo de Oliveira if (err) 22749bfb23fcSOleg Nesterov goto bad_unshare_out; 2275fb0a685cSDaniel Rebelo de Oliveira err = unshare_fd(unshare_flags, &new_fd); 2276fb0a685cSDaniel Rebelo de Oliveira if (err) 22779bfb23fcSOleg Nesterov goto bad_unshare_cleanup_fs; 2278b2e0d987SEric W. Biederman err = unshare_userns(unshare_flags, &new_cred); 2279fb0a685cSDaniel Rebelo de Oliveira if (err) 22809edff4abSManfred Spraul goto bad_unshare_cleanup_fd; 2281b2e0d987SEric W. Biederman err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, 2282b2e0d987SEric W. Biederman new_cred, new_fs); 2283b2e0d987SEric W. Biederman if (err) 2284b2e0d987SEric W. Biederman goto bad_unshare_cleanup_cred; 2285cf2e340fSJANAK DESAI 2286b2e0d987SEric W. Biederman if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) { 22879edff4abSManfred Spraul if (do_sysvsem) { 22889edff4abSManfred Spraul /* 22899edff4abSManfred Spraul * CLONE_SYSVSEM is equivalent to sys_exit(). 22909edff4abSManfred Spraul */ 22919edff4abSManfred Spraul exit_sem(current); 22929edff4abSManfred Spraul } 2293ab602f79SJack Miller if (unshare_flags & CLONE_NEWIPC) { 2294ab602f79SJack Miller /* Orphan segments in old ns (see sem above). */ 2295ab602f79SJack Miller exit_shm(current); 2296ab602f79SJack Miller shm_init_task(current); 2297ab602f79SJack Miller } 2298ab516013SSerge E. Hallyn 22996f977e6bSAlan Cox if (new_nsproxy) 2300cf7b708cSPavel Emelyanov switch_task_namespaces(current, new_nsproxy); 2301cf2e340fSJANAK DESAI 2302cf7b708cSPavel Emelyanov task_lock(current); 2303cf7b708cSPavel Emelyanov 2304cf2e340fSJANAK DESAI if (new_fs) { 2305cf2e340fSJANAK DESAI fs = current->fs; 23062a4419b5SNick Piggin spin_lock(&fs->lock); 2307cf2e340fSJANAK DESAI current->fs = new_fs; 2308498052bbSAl Viro if (--fs->users) 2309498052bbSAl Viro new_fs = NULL; 2310498052bbSAl Viro else 2311cf2e340fSJANAK DESAI new_fs = fs; 23122a4419b5SNick Piggin spin_unlock(&fs->lock); 2313cf2e340fSJANAK DESAI } 2314cf2e340fSJANAK DESAI 2315cf2e340fSJANAK DESAI if (new_fd) { 2316cf2e340fSJANAK DESAI fd = current->files; 2317cf2e340fSJANAK DESAI current->files = new_fd; 2318cf2e340fSJANAK DESAI new_fd = fd; 2319cf2e340fSJANAK DESAI } 2320cf2e340fSJANAK DESAI 2321cf2e340fSJANAK DESAI task_unlock(current); 2322b2e0d987SEric W. Biederman 2323b2e0d987SEric W. Biederman if (new_cred) { 2324b2e0d987SEric W. Biederman /* Install the new user namespace */ 2325b2e0d987SEric W. Biederman commit_creds(new_cred); 2326b2e0d987SEric W. Biederman new_cred = NULL; 2327b2e0d987SEric W. Biederman } 2328cf2e340fSJANAK DESAI } 2329cf2e340fSJANAK DESAI 2330b2e0d987SEric W. Biederman bad_unshare_cleanup_cred: 2331b2e0d987SEric W. Biederman if (new_cred) 2332b2e0d987SEric W. Biederman put_cred(new_cred); 2333cf2e340fSJANAK DESAI bad_unshare_cleanup_fd: 2334cf2e340fSJANAK DESAI if (new_fd) 2335cf2e340fSJANAK DESAI put_files_struct(new_fd); 2336cf2e340fSJANAK DESAI 2337cf2e340fSJANAK DESAI bad_unshare_cleanup_fs: 2338cf2e340fSJANAK DESAI if (new_fs) 2339498052bbSAl Viro free_fs_struct(new_fs); 2340cf2e340fSJANAK DESAI 2341cf2e340fSJANAK DESAI bad_unshare_out: 2342cf2e340fSJANAK DESAI return err; 2343cf2e340fSJANAK DESAI } 23443b125388SAl Viro 23453b125388SAl Viro /* 23463b125388SAl Viro * Helper to unshare the files of the current task. 23473b125388SAl Viro * We don't want to expose copy_files internals to 23483b125388SAl Viro * the exec layer of the kernel. 23493b125388SAl Viro */ 23503b125388SAl Viro 23513b125388SAl Viro int unshare_files(struct files_struct **displaced) 23523b125388SAl Viro { 23533b125388SAl Viro struct task_struct *task = current; 235450704516SAl Viro struct files_struct *copy = NULL; 23553b125388SAl Viro int error; 23563b125388SAl Viro 23573b125388SAl Viro error = unshare_fd(CLONE_FILES, ©); 23583b125388SAl Viro if (error || !copy) { 23593b125388SAl Viro *displaced = NULL; 23603b125388SAl Viro return error; 23613b125388SAl Viro } 23623b125388SAl Viro *displaced = task->files; 23633b125388SAl Viro task_lock(task); 23643b125388SAl Viro task->files = copy; 23653b125388SAl Viro task_unlock(task); 23663b125388SAl Viro return 0; 23673b125388SAl Viro } 236816db3d3fSHeinrich Schuchardt 236916db3d3fSHeinrich Schuchardt int sysctl_max_threads(struct ctl_table *table, int write, 237016db3d3fSHeinrich Schuchardt void __user *buffer, size_t *lenp, loff_t *ppos) 237116db3d3fSHeinrich Schuchardt { 237216db3d3fSHeinrich Schuchardt struct ctl_table t; 237316db3d3fSHeinrich Schuchardt int ret; 237416db3d3fSHeinrich Schuchardt int threads = max_threads; 237516db3d3fSHeinrich Schuchardt int min = MIN_THREADS; 237616db3d3fSHeinrich Schuchardt int max = MAX_THREADS; 237716db3d3fSHeinrich Schuchardt 237816db3d3fSHeinrich Schuchardt t = *table; 237916db3d3fSHeinrich Schuchardt t.data = &threads; 238016db3d3fSHeinrich Schuchardt t.extra1 = &min; 238116db3d3fSHeinrich Schuchardt t.extra2 = &max; 238216db3d3fSHeinrich Schuchardt 238316db3d3fSHeinrich Schuchardt ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 238416db3d3fSHeinrich Schuchardt if (ret || !write) 238516db3d3fSHeinrich Schuchardt return ret; 238616db3d3fSHeinrich Schuchardt 238716db3d3fSHeinrich Schuchardt set_max_threads(threads); 238816db3d3fSHeinrich Schuchardt 238916db3d3fSHeinrich Schuchardt return 0; 239016db3d3fSHeinrich Schuchardt } 2391