11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/fork.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1991, 1992 Linus Torvalds 51da177e4SLinus Torvalds */ 61da177e4SLinus Torvalds 71da177e4SLinus Torvalds /* 81da177e4SLinus Torvalds * 'fork.c' contains the help-routines for the 'fork' system call 91da177e4SLinus Torvalds * (see also entry.S and others). 101da177e4SLinus Torvalds * Fork is rather simple, once you get the hang of it, but the memory 111da177e4SLinus Torvalds * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' 121da177e4SLinus Torvalds */ 131da177e4SLinus Torvalds 141da177e4SLinus Torvalds #include <linux/slab.h> 151da177e4SLinus Torvalds #include <linux/init.h> 161da177e4SLinus Torvalds #include <linux/unistd.h> 171da177e4SLinus Torvalds #include <linux/module.h> 181da177e4SLinus Torvalds #include <linux/vmalloc.h> 191da177e4SLinus Torvalds #include <linux/completion.h> 201da177e4SLinus Torvalds #include <linux/personality.h> 211da177e4SLinus Torvalds #include <linux/mempolicy.h> 221da177e4SLinus Torvalds #include <linux/sem.h> 231da177e4SLinus Torvalds #include <linux/file.h> 249f3acc31SAl Viro #include <linux/fdtable.h> 25da9cbc87SJens Axboe #include <linux/iocontext.h> 261da177e4SLinus Torvalds #include <linux/key.h> 271da177e4SLinus Torvalds #include <linux/binfmts.h> 281da177e4SLinus Torvalds #include <linux/mman.h> 29cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 301da177e4SLinus Torvalds #include <linux/fs.h> 31615d6e87SDavidlohr Bueso #include <linux/mm.h> 32615d6e87SDavidlohr Bueso #include <linux/vmacache.h> 33ab516013SSerge E. Hallyn #include <linux/nsproxy.h> 34c59ede7bSRandy.Dunlap #include <linux/capability.h> 351da177e4SLinus Torvalds #include <linux/cpu.h> 36b4f48b63SPaul Menage #include <linux/cgroup.h> 371da177e4SLinus Torvalds #include <linux/security.h> 38a1e78772SMel Gorman #include <linux/hugetlb.h> 39e2cfabdfSWill Drewry #include <linux/seccomp.h> 401da177e4SLinus Torvalds #include <linux/swap.h> 411da177e4SLinus Torvalds #include <linux/syscalls.h> 421da177e4SLinus Torvalds #include <linux/jiffies.h> 431da177e4SLinus Torvalds #include <linux/futex.h> 448141c7f3SLinus Torvalds #include <linux/compat.h> 45207205a2SEric Dumazet #include <linux/kthread.h> 467c3ab738SAndrew Morton #include <linux/task_io_accounting_ops.h> 47ab2af1f5SDipankar Sarma #include <linux/rcupdate.h> 481da177e4SLinus Torvalds #include <linux/ptrace.h> 491da177e4SLinus Torvalds #include <linux/mount.h> 501da177e4SLinus Torvalds #include <linux/audit.h> 5178fb7466SPavel Emelianov #include <linux/memcontrol.h> 52f201ae23SFrederic Weisbecker #include <linux/ftrace.h> 535e2bf014SMike Galbraith #include <linux/proc_fs.h> 541da177e4SLinus Torvalds #include <linux/profile.h> 551da177e4SLinus Torvalds #include <linux/rmap.h> 56f8af4da3SHugh Dickins #include <linux/ksm.h> 571da177e4SLinus Torvalds #include <linux/acct.h> 588f0ab514SJay Lan #include <linux/tsacct_kern.h> 599f46080cSMatt Helsley #include <linux/cn_proc.h> 60ba96a0c8SRafael J. Wysocki #include <linux/freezer.h> 61ca74e92bSShailabh Nagar #include <linux/delayacct.h> 62ad4ecbcbSShailabh Nagar #include <linux/taskstats_kern.h> 630a425405SArjan van de Ven #include <linux/random.h> 64522ed776SMiloslav Trmac #include <linux/tty.h> 65fd0928dfSJens Axboe #include <linux/blkdev.h> 665ad4e53bSAl Viro #include <linux/fs_struct.h> 677c9f8861SEric Sandeen #include <linux/magic.h> 68cdd6c482SIngo Molnar #include <linux/perf_event.h> 6942c4ab41SStanislaw Gruszka #include <linux/posix-timers.h> 708e7cac79SAvi Kivity #include <linux/user-return-notifier.h> 713d5992d2SYing Han #include <linux/oom.h> 72ba76149fSAndrea Arcangeli #include <linux/khugepaged.h> 73d80e731eSOleg Nesterov #include <linux/signalfd.h> 740326f5a9SSrikar Dronamraju #include <linux/uprobes.h> 75a27bb332SKent Overstreet #include <linux/aio.h> 7652f5684cSGideon Israel Dsouza #include <linux/compiler.h> 7716db3d3fSHeinrich Schuchardt #include <linux/sysctl.h> 785c9a8750SDmitry Vyukov #include <linux/kcov.h> 791da177e4SLinus Torvalds 801da177e4SLinus Torvalds #include <asm/pgtable.h> 811da177e4SLinus Torvalds #include <asm/pgalloc.h> 821da177e4SLinus Torvalds #include <asm/uaccess.h> 831da177e4SLinus Torvalds #include <asm/mmu_context.h> 841da177e4SLinus Torvalds #include <asm/cacheflush.h> 851da177e4SLinus Torvalds #include <asm/tlbflush.h> 861da177e4SLinus Torvalds 87ad8d75ffSSteven Rostedt #include <trace/events/sched.h> 88ad8d75ffSSteven Rostedt 8943d2b113SKAMEZAWA Hiroyuki #define CREATE_TRACE_POINTS 9043d2b113SKAMEZAWA Hiroyuki #include <trace/events/task.h> 9143d2b113SKAMEZAWA Hiroyuki 921da177e4SLinus Torvalds /* 93ac1b398dSHeinrich Schuchardt * Minimum number of threads to boot the kernel 94ac1b398dSHeinrich Schuchardt */ 95ac1b398dSHeinrich Schuchardt #define MIN_THREADS 20 96ac1b398dSHeinrich Schuchardt 97ac1b398dSHeinrich Schuchardt /* 98ac1b398dSHeinrich Schuchardt * Maximum number of threads 99ac1b398dSHeinrich Schuchardt */ 100ac1b398dSHeinrich Schuchardt #define MAX_THREADS FUTEX_TID_MASK 101ac1b398dSHeinrich Schuchardt 102ac1b398dSHeinrich Schuchardt /* 1031da177e4SLinus Torvalds * Protected counters by write_lock_irq(&tasklist_lock) 1041da177e4SLinus Torvalds */ 1051da177e4SLinus Torvalds unsigned long total_forks; /* Handle normal Linux uptimes. */ 1061da177e4SLinus Torvalds int nr_threads; /* The idle threads do not count.. */ 1071da177e4SLinus Torvalds 1081da177e4SLinus Torvalds int max_threads; /* tunable limit on nr_threads */ 1091da177e4SLinus Torvalds 1101da177e4SLinus Torvalds DEFINE_PER_CPU(unsigned long, process_counts) = 0; 1111da177e4SLinus Torvalds 1121da177e4SLinus Torvalds __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 113db1466b3SPaul E. McKenney 114db1466b3SPaul E. McKenney #ifdef CONFIG_PROVE_RCU 115db1466b3SPaul E. McKenney int lockdep_tasklist_lock_is_held(void) 116db1466b3SPaul E. McKenney { 117db1466b3SPaul E. McKenney return lockdep_is_held(&tasklist_lock); 118db1466b3SPaul E. McKenney } 119db1466b3SPaul E. McKenney EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); 120db1466b3SPaul E. McKenney #endif /* #ifdef CONFIG_PROVE_RCU */ 1211da177e4SLinus Torvalds 1221da177e4SLinus Torvalds int nr_processes(void) 1231da177e4SLinus Torvalds { 1241da177e4SLinus Torvalds int cpu; 1251da177e4SLinus Torvalds int total = 0; 1261da177e4SLinus Torvalds 1271d510750SIan Campbell for_each_possible_cpu(cpu) 1281da177e4SLinus Torvalds total += per_cpu(process_counts, cpu); 1291da177e4SLinus Torvalds 1301da177e4SLinus Torvalds return total; 1311da177e4SLinus Torvalds } 1321da177e4SLinus Torvalds 133f19b9f74SAkinobu Mita void __weak arch_release_task_struct(struct task_struct *tsk) 134f19b9f74SAkinobu Mita { 135f19b9f74SAkinobu Mita } 136f19b9f74SAkinobu Mita 137f5e10287SThomas Gleixner #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 138e18b890bSChristoph Lameter static struct kmem_cache *task_struct_cachep; 13941101809SThomas Gleixner 14041101809SThomas Gleixner static inline struct task_struct *alloc_task_struct_node(int node) 14141101809SThomas Gleixner { 14241101809SThomas Gleixner return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); 14341101809SThomas Gleixner } 14441101809SThomas Gleixner 14541101809SThomas Gleixner static inline void free_task_struct(struct task_struct *tsk) 14641101809SThomas Gleixner { 14741101809SThomas Gleixner kmem_cache_free(task_struct_cachep, tsk); 14841101809SThomas Gleixner } 1491da177e4SLinus Torvalds #endif 1501da177e4SLinus Torvalds 151b235beeaSLinus Torvalds void __weak arch_release_thread_stack(unsigned long *stack) 152f19b9f74SAkinobu Mita { 153f19b9f74SAkinobu Mita } 154f19b9f74SAkinobu Mita 155b235beeaSLinus Torvalds #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR 15641101809SThomas Gleixner 1570d15d74aSThomas Gleixner /* 1580d15d74aSThomas Gleixner * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a 1590d15d74aSThomas Gleixner * kmemcache based allocator. 1600d15d74aSThomas Gleixner */ 161ba14a194SAndy Lutomirski # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) 162ac496bf4SAndy Lutomirski 163ac496bf4SAndy Lutomirski #ifdef CONFIG_VMAP_STACK 164ac496bf4SAndy Lutomirski /* 165ac496bf4SAndy Lutomirski * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB 166ac496bf4SAndy Lutomirski * flush. Try to minimize the number of calls by caching stacks. 167ac496bf4SAndy Lutomirski */ 168ac496bf4SAndy Lutomirski #define NR_CACHED_STACKS 2 169ac496bf4SAndy Lutomirski static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]); 170ac496bf4SAndy Lutomirski #endif 171ac496bf4SAndy Lutomirski 172ba14a194SAndy Lutomirski static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) 173b69c49b7SFUJITA Tomonori { 174ba14a194SAndy Lutomirski #ifdef CONFIG_VMAP_STACK 175ac496bf4SAndy Lutomirski void *stack; 176ac496bf4SAndy Lutomirski int i; 177ac496bf4SAndy Lutomirski 178ac496bf4SAndy Lutomirski local_irq_disable(); 179ac496bf4SAndy Lutomirski for (i = 0; i < NR_CACHED_STACKS; i++) { 180ac496bf4SAndy Lutomirski struct vm_struct *s = this_cpu_read(cached_stacks[i]); 181ac496bf4SAndy Lutomirski 182ac496bf4SAndy Lutomirski if (!s) 183ac496bf4SAndy Lutomirski continue; 184ac496bf4SAndy Lutomirski this_cpu_write(cached_stacks[i], NULL); 185ac496bf4SAndy Lutomirski 186ac496bf4SAndy Lutomirski tsk->stack_vm_area = s; 187ac496bf4SAndy Lutomirski local_irq_enable(); 188ac496bf4SAndy Lutomirski return s->addr; 189ac496bf4SAndy Lutomirski } 190ac496bf4SAndy Lutomirski local_irq_enable(); 191ac496bf4SAndy Lutomirski 192ac496bf4SAndy Lutomirski stack = __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE, 193ba14a194SAndy Lutomirski VMALLOC_START, VMALLOC_END, 194ba14a194SAndy Lutomirski THREADINFO_GFP | __GFP_HIGHMEM, 195ba14a194SAndy Lutomirski PAGE_KERNEL, 196ac496bf4SAndy Lutomirski 0, node, __builtin_return_address(0)); 197ba14a194SAndy Lutomirski 198ba14a194SAndy Lutomirski /* 199ba14a194SAndy Lutomirski * We can't call find_vm_area() in interrupt context, and 200ba14a194SAndy Lutomirski * free_thread_stack() can be called in interrupt context, 201ba14a194SAndy Lutomirski * so cache the vm_struct. 202ba14a194SAndy Lutomirski */ 203ba14a194SAndy Lutomirski if (stack) 204ba14a194SAndy Lutomirski tsk->stack_vm_area = find_vm_area(stack); 205ba14a194SAndy Lutomirski return stack; 206ba14a194SAndy Lutomirski #else 2074949148aSVladimir Davydov struct page *page = alloc_pages_node(node, THREADINFO_GFP, 2082889f608SThomas Gleixner THREAD_SIZE_ORDER); 209b6a84016SEric Dumazet 210b6a84016SEric Dumazet return page ? page_address(page) : NULL; 211ba14a194SAndy Lutomirski #endif 212b69c49b7SFUJITA Tomonori } 213b69c49b7SFUJITA Tomonori 214ba14a194SAndy Lutomirski static inline void free_thread_stack(struct task_struct *tsk) 215b69c49b7SFUJITA Tomonori { 216ac496bf4SAndy Lutomirski #ifdef CONFIG_VMAP_STACK 217ac496bf4SAndy Lutomirski if (task_stack_vm_area(tsk)) { 218ac496bf4SAndy Lutomirski unsigned long flags; 219ac496bf4SAndy Lutomirski int i; 220ac496bf4SAndy Lutomirski 221ac496bf4SAndy Lutomirski local_irq_save(flags); 222ac496bf4SAndy Lutomirski for (i = 0; i < NR_CACHED_STACKS; i++) { 223ac496bf4SAndy Lutomirski if (this_cpu_read(cached_stacks[i])) 224ac496bf4SAndy Lutomirski continue; 225ac496bf4SAndy Lutomirski 226ac496bf4SAndy Lutomirski this_cpu_write(cached_stacks[i], tsk->stack_vm_area); 227ac496bf4SAndy Lutomirski local_irq_restore(flags); 228ac496bf4SAndy Lutomirski return; 229ac496bf4SAndy Lutomirski } 230ac496bf4SAndy Lutomirski local_irq_restore(flags); 231ac496bf4SAndy Lutomirski 232ba14a194SAndy Lutomirski vfree(tsk->stack); 233ac496bf4SAndy Lutomirski return; 234ac496bf4SAndy Lutomirski } 235ac496bf4SAndy Lutomirski #endif 236ac496bf4SAndy Lutomirski 237ba14a194SAndy Lutomirski __free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER); 238b69c49b7SFUJITA Tomonori } 2390d15d74aSThomas Gleixner # else 240b235beeaSLinus Torvalds static struct kmem_cache *thread_stack_cache; 2410d15d74aSThomas Gleixner 2429521d399SMichael Ellerman static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, 2430d15d74aSThomas Gleixner int node) 2440d15d74aSThomas Gleixner { 245b235beeaSLinus Torvalds return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); 2460d15d74aSThomas Gleixner } 2470d15d74aSThomas Gleixner 248ba14a194SAndy Lutomirski static void free_thread_stack(struct task_struct *tsk) 2490d15d74aSThomas Gleixner { 250ba14a194SAndy Lutomirski kmem_cache_free(thread_stack_cache, tsk->stack); 2510d15d74aSThomas Gleixner } 2520d15d74aSThomas Gleixner 253b235beeaSLinus Torvalds void thread_stack_cache_init(void) 2540d15d74aSThomas Gleixner { 255b235beeaSLinus Torvalds thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE, 2560d15d74aSThomas Gleixner THREAD_SIZE, 0, NULL); 257b235beeaSLinus Torvalds BUG_ON(thread_stack_cache == NULL); 2580d15d74aSThomas Gleixner } 2590d15d74aSThomas Gleixner # endif 260b69c49b7SFUJITA Tomonori #endif 261b69c49b7SFUJITA Tomonori 2621da177e4SLinus Torvalds /* SLAB cache for signal_struct structures (tsk->signal) */ 263e18b890bSChristoph Lameter static struct kmem_cache *signal_cachep; 2641da177e4SLinus Torvalds 2651da177e4SLinus Torvalds /* SLAB cache for sighand_struct structures (tsk->sighand) */ 266e18b890bSChristoph Lameter struct kmem_cache *sighand_cachep; 2671da177e4SLinus Torvalds 2681da177e4SLinus Torvalds /* SLAB cache for files_struct structures (tsk->files) */ 269e18b890bSChristoph Lameter struct kmem_cache *files_cachep; 2701da177e4SLinus Torvalds 2711da177e4SLinus Torvalds /* SLAB cache for fs_struct structures (tsk->fs) */ 272e18b890bSChristoph Lameter struct kmem_cache *fs_cachep; 2731da177e4SLinus Torvalds 2741da177e4SLinus Torvalds /* SLAB cache for vm_area_struct structures */ 275e18b890bSChristoph Lameter struct kmem_cache *vm_area_cachep; 2761da177e4SLinus Torvalds 2771da177e4SLinus Torvalds /* SLAB cache for mm_struct structures (tsk->mm) */ 278e18b890bSChristoph Lameter static struct kmem_cache *mm_cachep; 2791da177e4SLinus Torvalds 280ba14a194SAndy Lutomirski static void account_kernel_stack(struct task_struct *tsk, int account) 281c6a7f572SKOSAKI Motohiro { 282ba14a194SAndy Lutomirski void *stack = task_stack_page(tsk); 283ba14a194SAndy Lutomirski struct vm_struct *vm = task_stack_vm_area(tsk); 284ba14a194SAndy Lutomirski 285ba14a194SAndy Lutomirski BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0); 286ba14a194SAndy Lutomirski 287ba14a194SAndy Lutomirski if (vm) { 288ba14a194SAndy Lutomirski int i; 289ba14a194SAndy Lutomirski 290ba14a194SAndy Lutomirski BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE); 291ba14a194SAndy Lutomirski 292ba14a194SAndy Lutomirski for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { 293ba14a194SAndy Lutomirski mod_zone_page_state(page_zone(vm->pages[i]), 294ba14a194SAndy Lutomirski NR_KERNEL_STACK_KB, 295ba14a194SAndy Lutomirski PAGE_SIZE / 1024 * account); 296ba14a194SAndy Lutomirski } 297ba14a194SAndy Lutomirski 298ba14a194SAndy Lutomirski /* All stack pages belong to the same memcg. */ 299ba14a194SAndy Lutomirski memcg_kmem_update_page_stat(vm->pages[0], MEMCG_KERNEL_STACK_KB, 300ba14a194SAndy Lutomirski account * (THREAD_SIZE / 1024)); 301ba14a194SAndy Lutomirski } else { 302ba14a194SAndy Lutomirski /* 303ba14a194SAndy Lutomirski * All stack pages are in the same zone and belong to the 304ba14a194SAndy Lutomirski * same memcg. 305ba14a194SAndy Lutomirski */ 306efdc9490SAndy Lutomirski struct page *first_page = virt_to_page(stack); 307c6a7f572SKOSAKI Motohiro 308efdc9490SAndy Lutomirski mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB, 309d30dd8beSAndy Lutomirski THREAD_SIZE / 1024 * account); 310efdc9490SAndy Lutomirski 311ba14a194SAndy Lutomirski memcg_kmem_update_page_stat(first_page, MEMCG_KERNEL_STACK_KB, 312efdc9490SAndy Lutomirski account * (THREAD_SIZE / 1024)); 313c6a7f572SKOSAKI Motohiro } 314ba14a194SAndy Lutomirski } 315c6a7f572SKOSAKI Motohiro 31668f24b08SAndy Lutomirski static void release_task_stack(struct task_struct *tsk) 3171da177e4SLinus Torvalds { 318405c0759SAndy Lutomirski if (WARN_ON(tsk->state != TASK_DEAD)) 319405c0759SAndy Lutomirski return; /* Better to leak the stack than to free prematurely */ 320405c0759SAndy Lutomirski 321ba14a194SAndy Lutomirski account_kernel_stack(tsk, -1); 322b235beeaSLinus Torvalds arch_release_thread_stack(tsk->stack); 323ba14a194SAndy Lutomirski free_thread_stack(tsk); 32468f24b08SAndy Lutomirski tsk->stack = NULL; 32568f24b08SAndy Lutomirski #ifdef CONFIG_VMAP_STACK 32668f24b08SAndy Lutomirski tsk->stack_vm_area = NULL; 32768f24b08SAndy Lutomirski #endif 32868f24b08SAndy Lutomirski } 32968f24b08SAndy Lutomirski 33068f24b08SAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 33168f24b08SAndy Lutomirski void put_task_stack(struct task_struct *tsk) 33268f24b08SAndy Lutomirski { 33368f24b08SAndy Lutomirski if (atomic_dec_and_test(&tsk->stack_refcount)) 33468f24b08SAndy Lutomirski release_task_stack(tsk); 33568f24b08SAndy Lutomirski } 33668f24b08SAndy Lutomirski #endif 33768f24b08SAndy Lutomirski 33868f24b08SAndy Lutomirski void free_task(struct task_struct *tsk) 33968f24b08SAndy Lutomirski { 34068f24b08SAndy Lutomirski #ifndef CONFIG_THREAD_INFO_IN_TASK 34168f24b08SAndy Lutomirski /* 34268f24b08SAndy Lutomirski * The task is finally done with both the stack and thread_info, 34368f24b08SAndy Lutomirski * so free both. 34468f24b08SAndy Lutomirski */ 34568f24b08SAndy Lutomirski release_task_stack(tsk); 34668f24b08SAndy Lutomirski #else 34768f24b08SAndy Lutomirski /* 34868f24b08SAndy Lutomirski * If the task had a separate stack allocation, it should be gone 34968f24b08SAndy Lutomirski * by now. 35068f24b08SAndy Lutomirski */ 35168f24b08SAndy Lutomirski WARN_ON_ONCE(atomic_read(&tsk->stack_refcount) != 0); 35268f24b08SAndy Lutomirski #endif 35323f78d4aSIngo Molnar rt_mutex_debug_task_free(tsk); 354fb52607aSFrederic Weisbecker ftrace_graph_exit_task(tsk); 355e2cfabdfSWill Drewry put_seccomp_filter(tsk); 356f19b9f74SAkinobu Mita arch_release_task_struct(tsk); 3571da177e4SLinus Torvalds free_task_struct(tsk); 3581da177e4SLinus Torvalds } 3591da177e4SLinus Torvalds EXPORT_SYMBOL(free_task); 3601da177e4SLinus Torvalds 361ea6d290cSOleg Nesterov static inline void free_signal_struct(struct signal_struct *sig) 362ea6d290cSOleg Nesterov { 36397101eb4SOleg Nesterov taskstats_tgid_free(sig); 3641c5354deSMike Galbraith sched_autogroup_exit(sig); 3657283094eSMichal Hocko /* 3667283094eSMichal Hocko * __mmdrop is not safe to call from softirq context on x86 due to 3677283094eSMichal Hocko * pgd_dtor so postpone it to the async context 3687283094eSMichal Hocko */ 36926db62f1SMichal Hocko if (sig->oom_mm) 3707283094eSMichal Hocko mmdrop_async(sig->oom_mm); 371ea6d290cSOleg Nesterov kmem_cache_free(signal_cachep, sig); 372ea6d290cSOleg Nesterov } 373ea6d290cSOleg Nesterov 374ea6d290cSOleg Nesterov static inline void put_signal_struct(struct signal_struct *sig) 375ea6d290cSOleg Nesterov { 3761c5354deSMike Galbraith if (atomic_dec_and_test(&sig->sigcnt)) 377ea6d290cSOleg Nesterov free_signal_struct(sig); 378ea6d290cSOleg Nesterov } 379ea6d290cSOleg Nesterov 380158d9ebdSAndrew Morton void __put_task_struct(struct task_struct *tsk) 3811da177e4SLinus Torvalds { 382270f722dSEugene Teo WARN_ON(!tsk->exit_state); 3831da177e4SLinus Torvalds WARN_ON(atomic_read(&tsk->usage)); 3841da177e4SLinus Torvalds WARN_ON(tsk == current); 3851da177e4SLinus Torvalds 3862e91fa7fSTejun Heo cgroup_free(tsk); 387156654f4SMike Galbraith task_numa_free(tsk); 3881a2a4d06SKees Cook security_task_free(tsk); 389e0e81739SDavid Howells exit_creds(tsk); 39035df17c5SShailabh Nagar delayacct_tsk_free(tsk); 391ea6d290cSOleg Nesterov put_signal_struct(tsk->signal); 3921da177e4SLinus Torvalds 3931da177e4SLinus Torvalds if (!profile_handoff_task(tsk)) 3941da177e4SLinus Torvalds free_task(tsk); 3951da177e4SLinus Torvalds } 39677c100c8SRik van Riel EXPORT_SYMBOL_GPL(__put_task_struct); 3971da177e4SLinus Torvalds 3986c0a9fa6SThomas Gleixner void __init __weak arch_task_cache_init(void) { } 39961c4628bSSuresh Siddha 400ff691f6eSHeinrich Schuchardt /* 401ff691f6eSHeinrich Schuchardt * set_max_threads 402ff691f6eSHeinrich Schuchardt */ 40316db3d3fSHeinrich Schuchardt static void set_max_threads(unsigned int max_threads_suggested) 404ff691f6eSHeinrich Schuchardt { 405ac1b398dSHeinrich Schuchardt u64 threads; 406ff691f6eSHeinrich Schuchardt 407ff691f6eSHeinrich Schuchardt /* 408ac1b398dSHeinrich Schuchardt * The number of threads shall be limited such that the thread 409ac1b398dSHeinrich Schuchardt * structures may only consume a small part of the available memory. 410ff691f6eSHeinrich Schuchardt */ 411ac1b398dSHeinrich Schuchardt if (fls64(totalram_pages) + fls64(PAGE_SIZE) > 64) 412ac1b398dSHeinrich Schuchardt threads = MAX_THREADS; 413ac1b398dSHeinrich Schuchardt else 414ac1b398dSHeinrich Schuchardt threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE, 415ac1b398dSHeinrich Schuchardt (u64) THREAD_SIZE * 8UL); 416ac1b398dSHeinrich Schuchardt 41716db3d3fSHeinrich Schuchardt if (threads > max_threads_suggested) 41816db3d3fSHeinrich Schuchardt threads = max_threads_suggested; 41916db3d3fSHeinrich Schuchardt 420ac1b398dSHeinrich Schuchardt max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); 421ff691f6eSHeinrich Schuchardt } 422ff691f6eSHeinrich Schuchardt 4235aaeb5c0SIngo Molnar #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT 4245aaeb5c0SIngo Molnar /* Initialized by the architecture: */ 4255aaeb5c0SIngo Molnar int arch_task_struct_size __read_mostly; 4265aaeb5c0SIngo Molnar #endif 4270c8c0f03SDave Hansen 428ff691f6eSHeinrich Schuchardt void __init fork_init(void) 4291da177e4SLinus Torvalds { 43025f9c081SEric W. Biederman int i; 431f5e10287SThomas Gleixner #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 4321da177e4SLinus Torvalds #ifndef ARCH_MIN_TASKALIGN 4331da177e4SLinus Torvalds #define ARCH_MIN_TASKALIGN L1_CACHE_BYTES 4341da177e4SLinus Torvalds #endif 4351da177e4SLinus Torvalds /* create a slab on which task_structs can be allocated */ 4365d097056SVladimir Davydov task_struct_cachep = kmem_cache_create("task_struct", 4375d097056SVladimir Davydov arch_task_struct_size, ARCH_MIN_TASKALIGN, 4385d097056SVladimir Davydov SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, NULL); 4391da177e4SLinus Torvalds #endif 4401da177e4SLinus Torvalds 44161c4628bSSuresh Siddha /* do the arch specific task caches init */ 44261c4628bSSuresh Siddha arch_task_cache_init(); 44361c4628bSSuresh Siddha 44416db3d3fSHeinrich Schuchardt set_max_threads(MAX_THREADS); 4451da177e4SLinus Torvalds 4461da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; 4471da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; 4481da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_SIGPENDING] = 4491da177e4SLinus Torvalds init_task.signal->rlim[RLIMIT_NPROC]; 450b376c3e1SEric W. Biederman 45125f9c081SEric W. Biederman for (i = 0; i < UCOUNT_COUNTS; i++) { 45225f9c081SEric W. Biederman init_user_ns.ucount_max[i] = max_threads/2; 45325f9c081SEric W. Biederman } 4541da177e4SLinus Torvalds } 4551da177e4SLinus Torvalds 45652f5684cSGideon Israel Dsouza int __weak arch_dup_task_struct(struct task_struct *dst, 45761c4628bSSuresh Siddha struct task_struct *src) 45861c4628bSSuresh Siddha { 45961c4628bSSuresh Siddha *dst = *src; 46061c4628bSSuresh Siddha return 0; 46161c4628bSSuresh Siddha } 46261c4628bSSuresh Siddha 463d4311ff1SAaron Tomlin void set_task_stack_end_magic(struct task_struct *tsk) 464d4311ff1SAaron Tomlin { 465d4311ff1SAaron Tomlin unsigned long *stackend; 466d4311ff1SAaron Tomlin 467d4311ff1SAaron Tomlin stackend = end_of_stack(tsk); 468d4311ff1SAaron Tomlin *stackend = STACK_END_MAGIC; /* for overflow detection */ 469d4311ff1SAaron Tomlin } 470d4311ff1SAaron Tomlin 471725fc629SAndi Kleen static struct task_struct *dup_task_struct(struct task_struct *orig, int node) 4721da177e4SLinus Torvalds { 4731da177e4SLinus Torvalds struct task_struct *tsk; 474b235beeaSLinus Torvalds unsigned long *stack; 475ba14a194SAndy Lutomirski struct vm_struct *stack_vm_area; 4763e26c149SPeter Zijlstra int err; 4771da177e4SLinus Torvalds 478725fc629SAndi Kleen if (node == NUMA_NO_NODE) 479725fc629SAndi Kleen node = tsk_fork_get_node(orig); 480504f52b5SEric Dumazet tsk = alloc_task_struct_node(node); 4811da177e4SLinus Torvalds if (!tsk) 4821da177e4SLinus Torvalds return NULL; 4831da177e4SLinus Torvalds 484b235beeaSLinus Torvalds stack = alloc_thread_stack_node(tsk, node); 485b235beeaSLinus Torvalds if (!stack) 486f19b9f74SAkinobu Mita goto free_tsk; 4871da177e4SLinus Torvalds 488ba14a194SAndy Lutomirski stack_vm_area = task_stack_vm_area(tsk); 489ba14a194SAndy Lutomirski 49061c4628bSSuresh Siddha err = arch_dup_task_struct(tsk, orig); 491ba14a194SAndy Lutomirski 492ba14a194SAndy Lutomirski /* 493ba14a194SAndy Lutomirski * arch_dup_task_struct() clobbers the stack-related fields. Make 494ba14a194SAndy Lutomirski * sure they're properly initialized before using any stack-related 495ba14a194SAndy Lutomirski * functions again. 496ba14a194SAndy Lutomirski */ 497ba14a194SAndy Lutomirski tsk->stack = stack; 498ba14a194SAndy Lutomirski #ifdef CONFIG_VMAP_STACK 499ba14a194SAndy Lutomirski tsk->stack_vm_area = stack_vm_area; 500ba14a194SAndy Lutomirski #endif 50168f24b08SAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 50268f24b08SAndy Lutomirski atomic_set(&tsk->stack_refcount, 1); 50368f24b08SAndy Lutomirski #endif 504ba14a194SAndy Lutomirski 50561c4628bSSuresh Siddha if (err) 506b235beeaSLinus Torvalds goto free_stack; 50761c4628bSSuresh Siddha 508dbd95212SKees Cook #ifdef CONFIG_SECCOMP 509dbd95212SKees Cook /* 510dbd95212SKees Cook * We must handle setting up seccomp filters once we're under 511dbd95212SKees Cook * the sighand lock in case orig has changed between now and 512dbd95212SKees Cook * then. Until then, filter must be NULL to avoid messing up 513dbd95212SKees Cook * the usage counts on the error path calling free_task. 514dbd95212SKees Cook */ 515dbd95212SKees Cook tsk->seccomp.filter = NULL; 516dbd95212SKees Cook #endif 51787bec58aSAndrew Morton 51887bec58aSAndrew Morton setup_thread_stack(tsk, orig); 5198e7cac79SAvi Kivity clear_user_return_notifier(tsk); 520f26f9affSMike Galbraith clear_tsk_need_resched(tsk); 521d4311ff1SAaron Tomlin set_task_stack_end_magic(tsk); 5221da177e4SLinus Torvalds 5230a425405SArjan van de Ven #ifdef CONFIG_CC_STACKPROTECTOR 5240a425405SArjan van de Ven tsk->stack_canary = get_random_int(); 5250a425405SArjan van de Ven #endif 5260a425405SArjan van de Ven 527fb0a685cSDaniel Rebelo de Oliveira /* 528fb0a685cSDaniel Rebelo de Oliveira * One for us, one for whoever does the "release_task()" (usually 529fb0a685cSDaniel Rebelo de Oliveira * parent) 530fb0a685cSDaniel Rebelo de Oliveira */ 5311da177e4SLinus Torvalds atomic_set(&tsk->usage, 2); 5326c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE 5332056a782SJens Axboe tsk->btrace_seq = 0; 5346c5c9341SAlexey Dobriyan #endif 535a0aa7f68SJens Axboe tsk->splice_pipe = NULL; 5365640f768SEric Dumazet tsk->task_frag.page = NULL; 537093e5840SSebastian Andrzej Siewior tsk->wake_q.next = NULL; 538c6a7f572SKOSAKI Motohiro 539ba14a194SAndy Lutomirski account_kernel_stack(tsk, 1); 540c6a7f572SKOSAKI Motohiro 5415c9a8750SDmitry Vyukov kcov_task_init(tsk); 5425c9a8750SDmitry Vyukov 5431da177e4SLinus Torvalds return tsk; 54461c4628bSSuresh Siddha 545b235beeaSLinus Torvalds free_stack: 546ba14a194SAndy Lutomirski free_thread_stack(tsk); 547f19b9f74SAkinobu Mita free_tsk: 54861c4628bSSuresh Siddha free_task_struct(tsk); 54961c4628bSSuresh Siddha return NULL; 5501da177e4SLinus Torvalds } 5511da177e4SLinus Torvalds 5521da177e4SLinus Torvalds #ifdef CONFIG_MMU 5530766f788SEmese Revfy static __latent_entropy int dup_mmap(struct mm_struct *mm, 5540766f788SEmese Revfy struct mm_struct *oldmm) 5551da177e4SLinus Torvalds { 556297c5eeeSLinus Torvalds struct vm_area_struct *mpnt, *tmp, *prev, **pprev; 5571da177e4SLinus Torvalds struct rb_node **rb_link, *rb_parent; 5581da177e4SLinus Torvalds int retval; 5591da177e4SLinus Torvalds unsigned long charge; 5601da177e4SLinus Torvalds 56132cdba1eSOleg Nesterov uprobe_start_dup_mmap(); 5627c051267SMichal Hocko if (down_write_killable(&oldmm->mmap_sem)) { 5637c051267SMichal Hocko retval = -EINTR; 5647c051267SMichal Hocko goto fail_uprobe_end; 5657c051267SMichal Hocko } 566ec8c0446SRalf Baechle flush_cache_dup_mm(oldmm); 567f8ac4ec9SOleg Nesterov uprobe_dup_mmap(oldmm, mm); 568ad339451SIngo Molnar /* 569ad339451SIngo Molnar * Not linked in yet - no deadlock potential: 570ad339451SIngo Molnar */ 571ad339451SIngo Molnar down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); 5727ee78232SHugh Dickins 57390f31d0eSKonstantin Khlebnikov /* No ordering required: file already has been exposed. */ 57490f31d0eSKonstantin Khlebnikov RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); 57590f31d0eSKonstantin Khlebnikov 5764f7d4614SVladimir Davydov mm->total_vm = oldmm->total_vm; 57784638335SKonstantin Khlebnikov mm->data_vm = oldmm->data_vm; 5784f7d4614SVladimir Davydov mm->exec_vm = oldmm->exec_vm; 5794f7d4614SVladimir Davydov mm->stack_vm = oldmm->stack_vm; 5804f7d4614SVladimir Davydov 5811da177e4SLinus Torvalds rb_link = &mm->mm_rb.rb_node; 5821da177e4SLinus Torvalds rb_parent = NULL; 5831da177e4SLinus Torvalds pprev = &mm->mmap; 584f8af4da3SHugh Dickins retval = ksm_fork(mm, oldmm); 585f8af4da3SHugh Dickins if (retval) 586f8af4da3SHugh Dickins goto out; 587ba76149fSAndrea Arcangeli retval = khugepaged_fork(mm, oldmm); 588ba76149fSAndrea Arcangeli if (retval) 589ba76149fSAndrea Arcangeli goto out; 5901da177e4SLinus Torvalds 591297c5eeeSLinus Torvalds prev = NULL; 592fd3e42fcSHugh Dickins for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { 5931da177e4SLinus Torvalds struct file *file; 5941da177e4SLinus Torvalds 5951da177e4SLinus Torvalds if (mpnt->vm_flags & VM_DONTCOPY) { 59684638335SKonstantin Khlebnikov vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt)); 5971da177e4SLinus Torvalds continue; 5981da177e4SLinus Torvalds } 5991da177e4SLinus Torvalds charge = 0; 6001da177e4SLinus Torvalds if (mpnt->vm_flags & VM_ACCOUNT) { 601b2412b7fSHuang Shijie unsigned long len = vma_pages(mpnt); 602b2412b7fSHuang Shijie 603191c5424SAl Viro if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ 6041da177e4SLinus Torvalds goto fail_nomem; 6051da177e4SLinus Torvalds charge = len; 6061da177e4SLinus Torvalds } 607e94b1766SChristoph Lameter tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 6081da177e4SLinus Torvalds if (!tmp) 6091da177e4SLinus Torvalds goto fail_nomem; 6101da177e4SLinus Torvalds *tmp = *mpnt; 6115beb4930SRik van Riel INIT_LIST_HEAD(&tmp->anon_vma_chain); 612ef0855d3SOleg Nesterov retval = vma_dup_policy(mpnt, tmp); 613ef0855d3SOleg Nesterov if (retval) 6141da177e4SLinus Torvalds goto fail_nomem_policy; 615a247c3a9SAndrea Arcangeli tmp->vm_mm = mm; 6165beb4930SRik van Riel if (anon_vma_fork(tmp, mpnt)) 6175beb4930SRik van Riel goto fail_nomem_anon_vma_fork; 618de60f5f1SEric B Munson tmp->vm_flags &= 619de60f5f1SEric B Munson ~(VM_LOCKED|VM_LOCKONFAULT|VM_UFFD_MISSING|VM_UFFD_WP); 620297c5eeeSLinus Torvalds tmp->vm_next = tmp->vm_prev = NULL; 621745f234bSAndrea Arcangeli tmp->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 6221da177e4SLinus Torvalds file = tmp->vm_file; 6231da177e4SLinus Torvalds if (file) { 624496ad9aaSAl Viro struct inode *inode = file_inode(file); 625b88ed205SHugh Dickins struct address_space *mapping = file->f_mapping; 626b88ed205SHugh Dickins 6271da177e4SLinus Torvalds get_file(file); 6281da177e4SLinus Torvalds if (tmp->vm_flags & VM_DENYWRITE) 6291da177e4SLinus Torvalds atomic_dec(&inode->i_writecount); 63083cde9e8SDavidlohr Bueso i_mmap_lock_write(mapping); 631b88ed205SHugh Dickins if (tmp->vm_flags & VM_SHARED) 6324bb5f5d9SDavid Herrmann atomic_inc(&mapping->i_mmap_writable); 633b88ed205SHugh Dickins flush_dcache_mmap_lock(mapping); 634b88ed205SHugh Dickins /* insert tmp into the share list, just after mpnt */ 6359826a516SMichel Lespinasse vma_interval_tree_insert_after(tmp, mpnt, 6369826a516SMichel Lespinasse &mapping->i_mmap); 637b88ed205SHugh Dickins flush_dcache_mmap_unlock(mapping); 63883cde9e8SDavidlohr Bueso i_mmap_unlock_write(mapping); 6391da177e4SLinus Torvalds } 6401da177e4SLinus Torvalds 6411da177e4SLinus Torvalds /* 642a1e78772SMel Gorman * Clear hugetlb-related page reserves for children. This only 643a1e78772SMel Gorman * affects MAP_PRIVATE mappings. Faults generated by the child 644a1e78772SMel Gorman * are not guaranteed to succeed, even if read-only 645a1e78772SMel Gorman */ 646a1e78772SMel Gorman if (is_vm_hugetlb_page(tmp)) 647a1e78772SMel Gorman reset_vma_resv_huge_pages(tmp); 648a1e78772SMel Gorman 649a1e78772SMel Gorman /* 6507ee78232SHugh Dickins * Link in the new vma and copy the page table entries. 6511da177e4SLinus Torvalds */ 6521da177e4SLinus Torvalds *pprev = tmp; 6531da177e4SLinus Torvalds pprev = &tmp->vm_next; 654297c5eeeSLinus Torvalds tmp->vm_prev = prev; 655297c5eeeSLinus Torvalds prev = tmp; 6561da177e4SLinus Torvalds 6571da177e4SLinus Torvalds __vma_link_rb(mm, tmp, rb_link, rb_parent); 6581da177e4SLinus Torvalds rb_link = &tmp->vm_rb.rb_right; 6591da177e4SLinus Torvalds rb_parent = &tmp->vm_rb; 6601da177e4SLinus Torvalds 6611da177e4SLinus Torvalds mm->map_count++; 6620b0db14cSHugh Dickins retval = copy_page_range(mm, oldmm, mpnt); 6631da177e4SLinus Torvalds 6641da177e4SLinus Torvalds if (tmp->vm_ops && tmp->vm_ops->open) 6651da177e4SLinus Torvalds tmp->vm_ops->open(tmp); 6661da177e4SLinus Torvalds 6671da177e4SLinus Torvalds if (retval) 6681da177e4SLinus Torvalds goto out; 6691da177e4SLinus Torvalds } 670d6dd61c8SJeremy Fitzhardinge /* a new mm has just been created */ 671d6dd61c8SJeremy Fitzhardinge arch_dup_mmap(oldmm, mm); 6721da177e4SLinus Torvalds retval = 0; 6731da177e4SLinus Torvalds out: 6747ee78232SHugh Dickins up_write(&mm->mmap_sem); 675fd3e42fcSHugh Dickins flush_tlb_mm(oldmm); 6761da177e4SLinus Torvalds up_write(&oldmm->mmap_sem); 6777c051267SMichal Hocko fail_uprobe_end: 67832cdba1eSOleg Nesterov uprobe_end_dup_mmap(); 6791da177e4SLinus Torvalds return retval; 6805beb4930SRik van Riel fail_nomem_anon_vma_fork: 681ef0855d3SOleg Nesterov mpol_put(vma_policy(tmp)); 6821da177e4SLinus Torvalds fail_nomem_policy: 6831da177e4SLinus Torvalds kmem_cache_free(vm_area_cachep, tmp); 6841da177e4SLinus Torvalds fail_nomem: 6851da177e4SLinus Torvalds retval = -ENOMEM; 6861da177e4SLinus Torvalds vm_unacct_memory(charge); 6871da177e4SLinus Torvalds goto out; 6881da177e4SLinus Torvalds } 6891da177e4SLinus Torvalds 6901da177e4SLinus Torvalds static inline int mm_alloc_pgd(struct mm_struct *mm) 6911da177e4SLinus Torvalds { 6921da177e4SLinus Torvalds mm->pgd = pgd_alloc(mm); 6931da177e4SLinus Torvalds if (unlikely(!mm->pgd)) 6941da177e4SLinus Torvalds return -ENOMEM; 6951da177e4SLinus Torvalds return 0; 6961da177e4SLinus Torvalds } 6971da177e4SLinus Torvalds 6981da177e4SLinus Torvalds static inline void mm_free_pgd(struct mm_struct *mm) 6991da177e4SLinus Torvalds { 7005e541973SBenjamin Herrenschmidt pgd_free(mm, mm->pgd); 7011da177e4SLinus Torvalds } 7021da177e4SLinus Torvalds #else 70390f31d0eSKonstantin Khlebnikov static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 70490f31d0eSKonstantin Khlebnikov { 70590f31d0eSKonstantin Khlebnikov down_write(&oldmm->mmap_sem); 70690f31d0eSKonstantin Khlebnikov RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); 70790f31d0eSKonstantin Khlebnikov up_write(&oldmm->mmap_sem); 70890f31d0eSKonstantin Khlebnikov return 0; 70990f31d0eSKonstantin Khlebnikov } 7101da177e4SLinus Torvalds #define mm_alloc_pgd(mm) (0) 7111da177e4SLinus Torvalds #define mm_free_pgd(mm) 7121da177e4SLinus Torvalds #endif /* CONFIG_MMU */ 7131da177e4SLinus Torvalds 7141da177e4SLinus Torvalds __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); 7151da177e4SLinus Torvalds 716e94b1766SChristoph Lameter #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) 7171da177e4SLinus Torvalds #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) 7181da177e4SLinus Torvalds 7194cb0e11bSHidehiro Kawai static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; 7204cb0e11bSHidehiro Kawai 7214cb0e11bSHidehiro Kawai static int __init coredump_filter_setup(char *s) 7224cb0e11bSHidehiro Kawai { 7234cb0e11bSHidehiro Kawai default_dump_filter = 7244cb0e11bSHidehiro Kawai (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & 7254cb0e11bSHidehiro Kawai MMF_DUMP_FILTER_MASK; 7264cb0e11bSHidehiro Kawai return 1; 7274cb0e11bSHidehiro Kawai } 7284cb0e11bSHidehiro Kawai 7294cb0e11bSHidehiro Kawai __setup("coredump_filter=", coredump_filter_setup); 7304cb0e11bSHidehiro Kawai 7311da177e4SLinus Torvalds #include <linux/init_task.h> 7321da177e4SLinus Torvalds 733858f0993SAlexey Dobriyan static void mm_init_aio(struct mm_struct *mm) 734858f0993SAlexey Dobriyan { 735858f0993SAlexey Dobriyan #ifdef CONFIG_AIO 736858f0993SAlexey Dobriyan spin_lock_init(&mm->ioctx_lock); 737db446a08SBenjamin LaHaise mm->ioctx_table = NULL; 738858f0993SAlexey Dobriyan #endif 739858f0993SAlexey Dobriyan } 740858f0993SAlexey Dobriyan 74133144e84SVladimir Davydov static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) 74233144e84SVladimir Davydov { 74333144e84SVladimir Davydov #ifdef CONFIG_MEMCG 74433144e84SVladimir Davydov mm->owner = p; 74533144e84SVladimir Davydov #endif 74633144e84SVladimir Davydov } 74733144e84SVladimir Davydov 74878fb7466SPavel Emelianov static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) 7491da177e4SLinus Torvalds { 75041f727fdSVladimir Davydov mm->mmap = NULL; 75141f727fdSVladimir Davydov mm->mm_rb = RB_ROOT; 75241f727fdSVladimir Davydov mm->vmacache_seqnum = 0; 7531da177e4SLinus Torvalds atomic_set(&mm->mm_users, 1); 7541da177e4SLinus Torvalds atomic_set(&mm->mm_count, 1); 7551da177e4SLinus Torvalds init_rwsem(&mm->mmap_sem); 7561da177e4SLinus Torvalds INIT_LIST_HEAD(&mm->mmlist); 757999d9fc1SOleg Nesterov mm->core_state = NULL; 758e1f56c89SKirill A. Shutemov atomic_long_set(&mm->nr_ptes, 0); 7592d2f5119SKirill A. Shutemov mm_nr_pmds_init(mm); 76041f727fdSVladimir Davydov mm->map_count = 0; 76141f727fdSVladimir Davydov mm->locked_vm = 0; 762ce65cefaSVladimir Davydov mm->pinned_vm = 0; 763d559db08SKAMEZAWA Hiroyuki memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); 7641da177e4SLinus Torvalds spin_lock_init(&mm->page_table_lock); 76541f727fdSVladimir Davydov mm_init_cpumask(mm); 766858f0993SAlexey Dobriyan mm_init_aio(mm); 767cf475ad2SBalbir Singh mm_init_owner(mm, p); 76841f727fdSVladimir Davydov mmu_notifier_mm_init(mm); 76920841405SRik van Riel clear_tlb_flush_pending(mm); 77041f727fdSVladimir Davydov #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 77141f727fdSVladimir Davydov mm->pmd_huge_pte = NULL; 77241f727fdSVladimir Davydov #endif 7731da177e4SLinus Torvalds 774a0715cc2SAlex Thorlton if (current->mm) { 775a0715cc2SAlex Thorlton mm->flags = current->mm->flags & MMF_INIT_MASK; 776a0715cc2SAlex Thorlton mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; 777a0715cc2SAlex Thorlton } else { 778a0715cc2SAlex Thorlton mm->flags = default_dump_filter; 7791da177e4SLinus Torvalds mm->def_flags = 0; 780a0715cc2SAlex Thorlton } 781a0715cc2SAlex Thorlton 78241f727fdSVladimir Davydov if (mm_alloc_pgd(mm)) 78341f727fdSVladimir Davydov goto fail_nopgd; 78478fb7466SPavel Emelianov 78541f727fdSVladimir Davydov if (init_new_context(p, mm)) 78641f727fdSVladimir Davydov goto fail_nocontext; 78741f727fdSVladimir Davydov 78841f727fdSVladimir Davydov return mm; 78941f727fdSVladimir Davydov 79041f727fdSVladimir Davydov fail_nocontext: 79141f727fdSVladimir Davydov mm_free_pgd(mm); 79241f727fdSVladimir Davydov fail_nopgd: 7931da177e4SLinus Torvalds free_mm(mm); 7941da177e4SLinus Torvalds return NULL; 7951da177e4SLinus Torvalds } 7961da177e4SLinus Torvalds 797c3f0327fSKonstantin Khlebnikov static void check_mm(struct mm_struct *mm) 798c3f0327fSKonstantin Khlebnikov { 799c3f0327fSKonstantin Khlebnikov int i; 800c3f0327fSKonstantin Khlebnikov 801c3f0327fSKonstantin Khlebnikov for (i = 0; i < NR_MM_COUNTERS; i++) { 802c3f0327fSKonstantin Khlebnikov long x = atomic_long_read(&mm->rss_stat.count[i]); 803c3f0327fSKonstantin Khlebnikov 804c3f0327fSKonstantin Khlebnikov if (unlikely(x)) 805c3f0327fSKonstantin Khlebnikov printk(KERN_ALERT "BUG: Bad rss-counter state " 806c3f0327fSKonstantin Khlebnikov "mm:%p idx:%d val:%ld\n", mm, i, x); 807c3f0327fSKonstantin Khlebnikov } 808b30fe6c7SKirill A. Shutemov 809b30fe6c7SKirill A. Shutemov if (atomic_long_read(&mm->nr_ptes)) 810b30fe6c7SKirill A. Shutemov pr_alert("BUG: non-zero nr_ptes on freeing mm: %ld\n", 811b30fe6c7SKirill A. Shutemov atomic_long_read(&mm->nr_ptes)); 812b30fe6c7SKirill A. Shutemov if (mm_nr_pmds(mm)) 813b30fe6c7SKirill A. Shutemov pr_alert("BUG: non-zero nr_pmds on freeing mm: %ld\n", 814b30fe6c7SKirill A. Shutemov mm_nr_pmds(mm)); 815b30fe6c7SKirill A. Shutemov 816e009bb30SKirill A. Shutemov #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 81796dad67fSSasha Levin VM_BUG_ON_MM(mm->pmd_huge_pte, mm); 818c3f0327fSKonstantin Khlebnikov #endif 819c3f0327fSKonstantin Khlebnikov } 820c3f0327fSKonstantin Khlebnikov 8211da177e4SLinus Torvalds /* 8221da177e4SLinus Torvalds * Allocate and initialize an mm_struct. 8231da177e4SLinus Torvalds */ 8241da177e4SLinus Torvalds struct mm_struct *mm_alloc(void) 8251da177e4SLinus Torvalds { 8261da177e4SLinus Torvalds struct mm_struct *mm; 8271da177e4SLinus Torvalds 8281da177e4SLinus Torvalds mm = allocate_mm(); 829de03c72cSKOSAKI Motohiro if (!mm) 830de03c72cSKOSAKI Motohiro return NULL; 831de03c72cSKOSAKI Motohiro 8321da177e4SLinus Torvalds memset(mm, 0, sizeof(*mm)); 8336345d24dSLinus Torvalds return mm_init(mm, current); 8341da177e4SLinus Torvalds } 8351da177e4SLinus Torvalds 8361da177e4SLinus Torvalds /* 8371da177e4SLinus Torvalds * Called when the last reference to the mm 8381da177e4SLinus Torvalds * is dropped: either by a lazy thread or by 8391da177e4SLinus Torvalds * mmput. Free the page directory and the mm. 8401da177e4SLinus Torvalds */ 8417ad5b3a5SHarvey Harrison void __mmdrop(struct mm_struct *mm) 8421da177e4SLinus Torvalds { 8431da177e4SLinus Torvalds BUG_ON(mm == &init_mm); 8441da177e4SLinus Torvalds mm_free_pgd(mm); 8451da177e4SLinus Torvalds destroy_context(mm); 846cddb8a5cSAndrea Arcangeli mmu_notifier_mm_destroy(mm); 847c3f0327fSKonstantin Khlebnikov check_mm(mm); 8481da177e4SLinus Torvalds free_mm(mm); 8491da177e4SLinus Torvalds } 8506d4e4c4fSAvi Kivity EXPORT_SYMBOL_GPL(__mmdrop); 8511da177e4SLinus Torvalds 852ec8d7c14SMichal Hocko static inline void __mmput(struct mm_struct *mm) 8531da177e4SLinus Torvalds { 854ec8d7c14SMichal Hocko VM_BUG_ON(atomic_read(&mm->mm_users)); 8550ae26f1bSAndrew Morton 856d4b3b638SSrikar Dronamraju uprobe_clear_state(mm); 8571da177e4SLinus Torvalds exit_aio(mm); 8581c2fb7a4SAndrea Arcangeli ksm_exit(mm); 859ba76149fSAndrea Arcangeli khugepaged_exit(mm); /* must run before exit_mmap */ 8601da177e4SLinus Torvalds exit_mmap(mm); 8616fcb52a5SAaron Lu mm_put_huge_zero_page(mm); 862925d1c40SMatt Helsley set_mm_exe_file(mm, NULL); 8631da177e4SLinus Torvalds if (!list_empty(&mm->mmlist)) { 8641da177e4SLinus Torvalds spin_lock(&mmlist_lock); 8651da177e4SLinus Torvalds list_del(&mm->mmlist); 8661da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 8671da177e4SLinus Torvalds } 868801460d0SHiroshi Shimamoto if (mm->binfmt) 869801460d0SHiroshi Shimamoto module_put(mm->binfmt->module); 870862e3073SMichal Hocko set_bit(MMF_OOM_SKIP, &mm->flags); 8711da177e4SLinus Torvalds mmdrop(mm); 8721da177e4SLinus Torvalds } 873ec8d7c14SMichal Hocko 874ec8d7c14SMichal Hocko /* 875ec8d7c14SMichal Hocko * Decrement the use count and release all resources for an mm. 876ec8d7c14SMichal Hocko */ 877ec8d7c14SMichal Hocko void mmput(struct mm_struct *mm) 878ec8d7c14SMichal Hocko { 879ec8d7c14SMichal Hocko might_sleep(); 880ec8d7c14SMichal Hocko 881ec8d7c14SMichal Hocko if (atomic_dec_and_test(&mm->mm_users)) 882ec8d7c14SMichal Hocko __mmput(mm); 8831da177e4SLinus Torvalds } 8841da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(mmput); 8851da177e4SLinus Torvalds 8867ef949d7SMichal Hocko #ifdef CONFIG_MMU 887ec8d7c14SMichal Hocko static void mmput_async_fn(struct work_struct *work) 888ec8d7c14SMichal Hocko { 889ec8d7c14SMichal Hocko struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); 890ec8d7c14SMichal Hocko __mmput(mm); 891ec8d7c14SMichal Hocko } 892ec8d7c14SMichal Hocko 893ec8d7c14SMichal Hocko void mmput_async(struct mm_struct *mm) 894ec8d7c14SMichal Hocko { 895ec8d7c14SMichal Hocko if (atomic_dec_and_test(&mm->mm_users)) { 896ec8d7c14SMichal Hocko INIT_WORK(&mm->async_put_work, mmput_async_fn); 897ec8d7c14SMichal Hocko schedule_work(&mm->async_put_work); 898ec8d7c14SMichal Hocko } 899ec8d7c14SMichal Hocko } 9007ef949d7SMichal Hocko #endif 901ec8d7c14SMichal Hocko 90290f31d0eSKonstantin Khlebnikov /** 90390f31d0eSKonstantin Khlebnikov * set_mm_exe_file - change a reference to the mm's executable file 90490f31d0eSKonstantin Khlebnikov * 90590f31d0eSKonstantin Khlebnikov * This changes mm's executable file (shown as symlink /proc/[pid]/exe). 90690f31d0eSKonstantin Khlebnikov * 9076e399cd1SDavidlohr Bueso * Main users are mmput() and sys_execve(). Callers prevent concurrent 9086e399cd1SDavidlohr Bueso * invocations: in mmput() nobody alive left, in execve task is single 9096e399cd1SDavidlohr Bueso * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the 9106e399cd1SDavidlohr Bueso * mm->exe_file, but does so without using set_mm_exe_file() in order 9116e399cd1SDavidlohr Bueso * to do avoid the need for any locks. 91290f31d0eSKonstantin Khlebnikov */ 91338646013SJiri Slaby void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) 91438646013SJiri Slaby { 9156e399cd1SDavidlohr Bueso struct file *old_exe_file; 9166e399cd1SDavidlohr Bueso 9176e399cd1SDavidlohr Bueso /* 9186e399cd1SDavidlohr Bueso * It is safe to dereference the exe_file without RCU as 9196e399cd1SDavidlohr Bueso * this function is only called if nobody else can access 9206e399cd1SDavidlohr Bueso * this mm -- see comment above for justification. 9216e399cd1SDavidlohr Bueso */ 9226e399cd1SDavidlohr Bueso old_exe_file = rcu_dereference_raw(mm->exe_file); 92390f31d0eSKonstantin Khlebnikov 92438646013SJiri Slaby if (new_exe_file) 92538646013SJiri Slaby get_file(new_exe_file); 92690f31d0eSKonstantin Khlebnikov rcu_assign_pointer(mm->exe_file, new_exe_file); 92790f31d0eSKonstantin Khlebnikov if (old_exe_file) 92890f31d0eSKonstantin Khlebnikov fput(old_exe_file); 92938646013SJiri Slaby } 93038646013SJiri Slaby 93190f31d0eSKonstantin Khlebnikov /** 93290f31d0eSKonstantin Khlebnikov * get_mm_exe_file - acquire a reference to the mm's executable file 93390f31d0eSKonstantin Khlebnikov * 93490f31d0eSKonstantin Khlebnikov * Returns %NULL if mm has no associated executable file. 93590f31d0eSKonstantin Khlebnikov * User must release file via fput(). 93690f31d0eSKonstantin Khlebnikov */ 93738646013SJiri Slaby struct file *get_mm_exe_file(struct mm_struct *mm) 93838646013SJiri Slaby { 93938646013SJiri Slaby struct file *exe_file; 94038646013SJiri Slaby 94190f31d0eSKonstantin Khlebnikov rcu_read_lock(); 94290f31d0eSKonstantin Khlebnikov exe_file = rcu_dereference(mm->exe_file); 94390f31d0eSKonstantin Khlebnikov if (exe_file && !get_file_rcu(exe_file)) 94490f31d0eSKonstantin Khlebnikov exe_file = NULL; 94590f31d0eSKonstantin Khlebnikov rcu_read_unlock(); 94638646013SJiri Slaby return exe_file; 94738646013SJiri Slaby } 94811163348SDavidlohr Bueso EXPORT_SYMBOL(get_mm_exe_file); 94938646013SJiri Slaby 9501da177e4SLinus Torvalds /** 951cd81a917SMateusz Guzik * get_task_exe_file - acquire a reference to the task's executable file 952cd81a917SMateusz Guzik * 953cd81a917SMateusz Guzik * Returns %NULL if task's mm (if any) has no associated executable file or 954cd81a917SMateusz Guzik * this is a kernel thread with borrowed mm (see the comment above get_task_mm). 955cd81a917SMateusz Guzik * User must release file via fput(). 956cd81a917SMateusz Guzik */ 957cd81a917SMateusz Guzik struct file *get_task_exe_file(struct task_struct *task) 958cd81a917SMateusz Guzik { 959cd81a917SMateusz Guzik struct file *exe_file = NULL; 960cd81a917SMateusz Guzik struct mm_struct *mm; 961cd81a917SMateusz Guzik 962cd81a917SMateusz Guzik task_lock(task); 963cd81a917SMateusz Guzik mm = task->mm; 964cd81a917SMateusz Guzik if (mm) { 965cd81a917SMateusz Guzik if (!(task->flags & PF_KTHREAD)) 966cd81a917SMateusz Guzik exe_file = get_mm_exe_file(mm); 967cd81a917SMateusz Guzik } 968cd81a917SMateusz Guzik task_unlock(task); 969cd81a917SMateusz Guzik return exe_file; 970cd81a917SMateusz Guzik } 971cd81a917SMateusz Guzik EXPORT_SYMBOL(get_task_exe_file); 972cd81a917SMateusz Guzik 973cd81a917SMateusz Guzik /** 9741da177e4SLinus Torvalds * get_task_mm - acquire a reference to the task's mm 9751da177e4SLinus Torvalds * 976246bb0b1SOleg Nesterov * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning 9771da177e4SLinus Torvalds * this kernel workthread has transiently adopted a user mm with use_mm, 9781da177e4SLinus Torvalds * to do its AIO) is not set and if so returns a reference to it, after 9791da177e4SLinus Torvalds * bumping up the use count. User must release the mm via mmput() 9801da177e4SLinus Torvalds * after use. Typically used by /proc and ptrace. 9811da177e4SLinus Torvalds */ 9821da177e4SLinus Torvalds struct mm_struct *get_task_mm(struct task_struct *task) 9831da177e4SLinus Torvalds { 9841da177e4SLinus Torvalds struct mm_struct *mm; 9851da177e4SLinus Torvalds 9861da177e4SLinus Torvalds task_lock(task); 9871da177e4SLinus Torvalds mm = task->mm; 9881da177e4SLinus Torvalds if (mm) { 989246bb0b1SOleg Nesterov if (task->flags & PF_KTHREAD) 9901da177e4SLinus Torvalds mm = NULL; 9911da177e4SLinus Torvalds else 9921da177e4SLinus Torvalds atomic_inc(&mm->mm_users); 9931da177e4SLinus Torvalds } 9941da177e4SLinus Torvalds task_unlock(task); 9951da177e4SLinus Torvalds return mm; 9961da177e4SLinus Torvalds } 9971da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(get_task_mm); 9981da177e4SLinus Torvalds 9998cdb878dSChristopher Yeoh struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) 10008cdb878dSChristopher Yeoh { 10018cdb878dSChristopher Yeoh struct mm_struct *mm; 10028cdb878dSChristopher Yeoh int err; 10038cdb878dSChristopher Yeoh 10048cdb878dSChristopher Yeoh err = mutex_lock_killable(&task->signal->cred_guard_mutex); 10058cdb878dSChristopher Yeoh if (err) 10068cdb878dSChristopher Yeoh return ERR_PTR(err); 10078cdb878dSChristopher Yeoh 10088cdb878dSChristopher Yeoh mm = get_task_mm(task); 10098cdb878dSChristopher Yeoh if (mm && mm != current->mm && 10108cdb878dSChristopher Yeoh !ptrace_may_access(task, mode)) { 10118cdb878dSChristopher Yeoh mmput(mm); 10128cdb878dSChristopher Yeoh mm = ERR_PTR(-EACCES); 10138cdb878dSChristopher Yeoh } 10148cdb878dSChristopher Yeoh mutex_unlock(&task->signal->cred_guard_mutex); 10158cdb878dSChristopher Yeoh 10168cdb878dSChristopher Yeoh return mm; 10178cdb878dSChristopher Yeoh } 10188cdb878dSChristopher Yeoh 101957b59c4aSOleg Nesterov static void complete_vfork_done(struct task_struct *tsk) 1020c415c3b4SOleg Nesterov { 1021d68b46feSOleg Nesterov struct completion *vfork; 1022c415c3b4SOleg Nesterov 1023d68b46feSOleg Nesterov task_lock(tsk); 1024d68b46feSOleg Nesterov vfork = tsk->vfork_done; 1025d68b46feSOleg Nesterov if (likely(vfork)) { 1026c415c3b4SOleg Nesterov tsk->vfork_done = NULL; 1027d68b46feSOleg Nesterov complete(vfork); 1028d68b46feSOleg Nesterov } 1029d68b46feSOleg Nesterov task_unlock(tsk); 1030d68b46feSOleg Nesterov } 1031d68b46feSOleg Nesterov 1032d68b46feSOleg Nesterov static int wait_for_vfork_done(struct task_struct *child, 1033d68b46feSOleg Nesterov struct completion *vfork) 1034d68b46feSOleg Nesterov { 1035d68b46feSOleg Nesterov int killed; 1036d68b46feSOleg Nesterov 1037d68b46feSOleg Nesterov freezer_do_not_count(); 1038d68b46feSOleg Nesterov killed = wait_for_completion_killable(vfork); 1039d68b46feSOleg Nesterov freezer_count(); 1040d68b46feSOleg Nesterov 1041d68b46feSOleg Nesterov if (killed) { 1042d68b46feSOleg Nesterov task_lock(child); 1043d68b46feSOleg Nesterov child->vfork_done = NULL; 1044d68b46feSOleg Nesterov task_unlock(child); 1045d68b46feSOleg Nesterov } 1046d68b46feSOleg Nesterov 1047d68b46feSOleg Nesterov put_task_struct(child); 1048d68b46feSOleg Nesterov return killed; 1049c415c3b4SOleg Nesterov } 1050c415c3b4SOleg Nesterov 10511da177e4SLinus Torvalds /* Please note the differences between mmput and mm_release. 10521da177e4SLinus Torvalds * mmput is called whenever we stop holding onto a mm_struct, 10531da177e4SLinus Torvalds * error success whatever. 10541da177e4SLinus Torvalds * 10551da177e4SLinus Torvalds * mm_release is called after a mm_struct has been removed 10561da177e4SLinus Torvalds * from the current process. 10571da177e4SLinus Torvalds * 10581da177e4SLinus Torvalds * This difference is important for error handling, when we 10591da177e4SLinus Torvalds * only half set up a mm_struct for a new process and need to restore 10601da177e4SLinus Torvalds * the old one. Because we mmput the new mm_struct before 10611da177e4SLinus Torvalds * restoring the old one. . . 10621da177e4SLinus Torvalds * Eric Biederman 10 January 1998 10631da177e4SLinus Torvalds */ 10641da177e4SLinus Torvalds void mm_release(struct task_struct *tsk, struct mm_struct *mm) 10651da177e4SLinus Torvalds { 10668141c7f3SLinus Torvalds /* Get rid of any futexes when releasing the mm */ 10678141c7f3SLinus Torvalds #ifdef CONFIG_FUTEX 1068fc6b177dSPeter Zijlstra if (unlikely(tsk->robust_list)) { 10698141c7f3SLinus Torvalds exit_robust_list(tsk); 1070fc6b177dSPeter Zijlstra tsk->robust_list = NULL; 1071fc6b177dSPeter Zijlstra } 10728141c7f3SLinus Torvalds #ifdef CONFIG_COMPAT 1073fc6b177dSPeter Zijlstra if (unlikely(tsk->compat_robust_list)) { 10748141c7f3SLinus Torvalds compat_exit_robust_list(tsk); 1075fc6b177dSPeter Zijlstra tsk->compat_robust_list = NULL; 1076fc6b177dSPeter Zijlstra } 10778141c7f3SLinus Torvalds #endif 1078322a2c10SThomas Gleixner if (unlikely(!list_empty(&tsk->pi_state_list))) 1079322a2c10SThomas Gleixner exit_pi_state_list(tsk); 10808141c7f3SLinus Torvalds #endif 10818141c7f3SLinus Torvalds 10820326f5a9SSrikar Dronamraju uprobe_free_utask(tsk); 10830326f5a9SSrikar Dronamraju 10841da177e4SLinus Torvalds /* Get rid of any cached register state */ 10851da177e4SLinus Torvalds deactivate_mm(tsk, mm); 10861da177e4SLinus Torvalds 1087fec1d011SRoland McGrath /* 1088735f2770SMichal Hocko * Signal userspace if we're not exiting with a core dump 1089735f2770SMichal Hocko * because we want to leave the value intact for debugging 1090735f2770SMichal Hocko * purposes. 1091fec1d011SRoland McGrath */ 10929c8a8228SEric Dumazet if (tsk->clear_child_tid) { 1093735f2770SMichal Hocko if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) && 10949c8a8228SEric Dumazet atomic_read(&mm->mm_users) > 1) { 10951da177e4SLinus Torvalds /* 10961da177e4SLinus Torvalds * We don't check the error code - if userspace has 10971da177e4SLinus Torvalds * not set up a proper pointer then tough luck. 10981da177e4SLinus Torvalds */ 10999c8a8228SEric Dumazet put_user(0, tsk->clear_child_tid); 11009c8a8228SEric Dumazet sys_futex(tsk->clear_child_tid, FUTEX_WAKE, 11019c8a8228SEric Dumazet 1, NULL, NULL, 0); 11029c8a8228SEric Dumazet } 11039c8a8228SEric Dumazet tsk->clear_child_tid = NULL; 11041da177e4SLinus Torvalds } 1105f7505d64SKonstantin Khlebnikov 1106f7505d64SKonstantin Khlebnikov /* 1107f7505d64SKonstantin Khlebnikov * All done, finally we can wake up parent and return this mm to him. 1108f7505d64SKonstantin Khlebnikov * Also kthread_stop() uses this completion for synchronization. 1109f7505d64SKonstantin Khlebnikov */ 1110f7505d64SKonstantin Khlebnikov if (tsk->vfork_done) 1111f7505d64SKonstantin Khlebnikov complete_vfork_done(tsk); 11121da177e4SLinus Torvalds } 11131da177e4SLinus Torvalds 1114a0a7ec30SJANAK DESAI /* 1115a0a7ec30SJANAK DESAI * Allocate a new mm structure and copy contents from the 1116a0a7ec30SJANAK DESAI * mm structure of the passed in task structure. 1117a0a7ec30SJANAK DESAI */ 1118ff252c1fSDaeSeok Youn static struct mm_struct *dup_mm(struct task_struct *tsk) 1119a0a7ec30SJANAK DESAI { 1120a0a7ec30SJANAK DESAI struct mm_struct *mm, *oldmm = current->mm; 1121a0a7ec30SJANAK DESAI int err; 1122a0a7ec30SJANAK DESAI 1123a0a7ec30SJANAK DESAI mm = allocate_mm(); 1124a0a7ec30SJANAK DESAI if (!mm) 1125a0a7ec30SJANAK DESAI goto fail_nomem; 1126a0a7ec30SJANAK DESAI 1127a0a7ec30SJANAK DESAI memcpy(mm, oldmm, sizeof(*mm)); 1128a0a7ec30SJANAK DESAI 112978fb7466SPavel Emelianov if (!mm_init(mm, tsk)) 1130a0a7ec30SJANAK DESAI goto fail_nomem; 1131a0a7ec30SJANAK DESAI 1132a0a7ec30SJANAK DESAI err = dup_mmap(mm, oldmm); 1133a0a7ec30SJANAK DESAI if (err) 1134a0a7ec30SJANAK DESAI goto free_pt; 1135a0a7ec30SJANAK DESAI 1136a0a7ec30SJANAK DESAI mm->hiwater_rss = get_mm_rss(mm); 1137a0a7ec30SJANAK DESAI mm->hiwater_vm = mm->total_vm; 1138a0a7ec30SJANAK DESAI 1139801460d0SHiroshi Shimamoto if (mm->binfmt && !try_module_get(mm->binfmt->module)) 1140801460d0SHiroshi Shimamoto goto free_pt; 1141801460d0SHiroshi Shimamoto 1142a0a7ec30SJANAK DESAI return mm; 1143a0a7ec30SJANAK DESAI 1144a0a7ec30SJANAK DESAI free_pt: 1145801460d0SHiroshi Shimamoto /* don't put binfmt in mmput, we haven't got module yet */ 1146801460d0SHiroshi Shimamoto mm->binfmt = NULL; 1147a0a7ec30SJANAK DESAI mmput(mm); 1148a0a7ec30SJANAK DESAI 1149a0a7ec30SJANAK DESAI fail_nomem: 1150a0a7ec30SJANAK DESAI return NULL; 1151a0a7ec30SJANAK DESAI } 1152a0a7ec30SJANAK DESAI 11531da177e4SLinus Torvalds static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) 11541da177e4SLinus Torvalds { 11551da177e4SLinus Torvalds struct mm_struct *mm, *oldmm; 11561da177e4SLinus Torvalds int retval; 11571da177e4SLinus Torvalds 11581da177e4SLinus Torvalds tsk->min_flt = tsk->maj_flt = 0; 11591da177e4SLinus Torvalds tsk->nvcsw = tsk->nivcsw = 0; 116017406b82SMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK 116117406b82SMandeep Singh Baines tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; 116217406b82SMandeep Singh Baines #endif 11631da177e4SLinus Torvalds 11641da177e4SLinus Torvalds tsk->mm = NULL; 11651da177e4SLinus Torvalds tsk->active_mm = NULL; 11661da177e4SLinus Torvalds 11671da177e4SLinus Torvalds /* 11681da177e4SLinus Torvalds * Are we cloning a kernel thread? 11691da177e4SLinus Torvalds * 11701da177e4SLinus Torvalds * We need to steal a active VM for that.. 11711da177e4SLinus Torvalds */ 11721da177e4SLinus Torvalds oldmm = current->mm; 11731da177e4SLinus Torvalds if (!oldmm) 11741da177e4SLinus Torvalds return 0; 11751da177e4SLinus Torvalds 1176615d6e87SDavidlohr Bueso /* initialize the new vmacache entries */ 1177615d6e87SDavidlohr Bueso vmacache_flush(tsk); 1178615d6e87SDavidlohr Bueso 11791da177e4SLinus Torvalds if (clone_flags & CLONE_VM) { 11801da177e4SLinus Torvalds atomic_inc(&oldmm->mm_users); 11811da177e4SLinus Torvalds mm = oldmm; 11821da177e4SLinus Torvalds goto good_mm; 11831da177e4SLinus Torvalds } 11841da177e4SLinus Torvalds 11851da177e4SLinus Torvalds retval = -ENOMEM; 1186a0a7ec30SJANAK DESAI mm = dup_mm(tsk); 11871da177e4SLinus Torvalds if (!mm) 11881da177e4SLinus Torvalds goto fail_nomem; 11891da177e4SLinus Torvalds 11901da177e4SLinus Torvalds good_mm: 11911da177e4SLinus Torvalds tsk->mm = mm; 11921da177e4SLinus Torvalds tsk->active_mm = mm; 11931da177e4SLinus Torvalds return 0; 11941da177e4SLinus Torvalds 11951da177e4SLinus Torvalds fail_nomem: 11961da177e4SLinus Torvalds return retval; 11971da177e4SLinus Torvalds } 11981da177e4SLinus Torvalds 1199a39bc516SAlexey Dobriyan static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) 12001da177e4SLinus Torvalds { 1201498052bbSAl Viro struct fs_struct *fs = current->fs; 12021da177e4SLinus Torvalds if (clone_flags & CLONE_FS) { 1203498052bbSAl Viro /* tsk->fs is already what we want */ 12042a4419b5SNick Piggin spin_lock(&fs->lock); 1205498052bbSAl Viro if (fs->in_exec) { 12062a4419b5SNick Piggin spin_unlock(&fs->lock); 1207498052bbSAl Viro return -EAGAIN; 1208498052bbSAl Viro } 1209498052bbSAl Viro fs->users++; 12102a4419b5SNick Piggin spin_unlock(&fs->lock); 12111da177e4SLinus Torvalds return 0; 12121da177e4SLinus Torvalds } 1213498052bbSAl Viro tsk->fs = copy_fs_struct(fs); 12141da177e4SLinus Torvalds if (!tsk->fs) 12151da177e4SLinus Torvalds return -ENOMEM; 12161da177e4SLinus Torvalds return 0; 12171da177e4SLinus Torvalds } 12181da177e4SLinus Torvalds 1219a016f338SJANAK DESAI static int copy_files(unsigned long clone_flags, struct task_struct *tsk) 1220a016f338SJANAK DESAI { 1221a016f338SJANAK DESAI struct files_struct *oldf, *newf; 1222a016f338SJANAK DESAI int error = 0; 1223a016f338SJANAK DESAI 1224a016f338SJANAK DESAI /* 1225a016f338SJANAK DESAI * A background process may not have any files ... 1226a016f338SJANAK DESAI */ 1227a016f338SJANAK DESAI oldf = current->files; 1228a016f338SJANAK DESAI if (!oldf) 1229a016f338SJANAK DESAI goto out; 1230a016f338SJANAK DESAI 1231a016f338SJANAK DESAI if (clone_flags & CLONE_FILES) { 1232a016f338SJANAK DESAI atomic_inc(&oldf->count); 1233a016f338SJANAK DESAI goto out; 1234a016f338SJANAK DESAI } 1235a016f338SJANAK DESAI 1236a016f338SJANAK DESAI newf = dup_fd(oldf, &error); 1237a016f338SJANAK DESAI if (!newf) 1238a016f338SJANAK DESAI goto out; 1239a016f338SJANAK DESAI 1240a016f338SJANAK DESAI tsk->files = newf; 1241a016f338SJANAK DESAI error = 0; 1242a016f338SJANAK DESAI out: 1243a016f338SJANAK DESAI return error; 1244a016f338SJANAK DESAI } 1245a016f338SJANAK DESAI 1246fadad878SJens Axboe static int copy_io(unsigned long clone_flags, struct task_struct *tsk) 1247fd0928dfSJens Axboe { 1248fd0928dfSJens Axboe #ifdef CONFIG_BLOCK 1249fd0928dfSJens Axboe struct io_context *ioc = current->io_context; 12506e736be7STejun Heo struct io_context *new_ioc; 1251fd0928dfSJens Axboe 1252fd0928dfSJens Axboe if (!ioc) 1253fd0928dfSJens Axboe return 0; 1254fadad878SJens Axboe /* 1255fadad878SJens Axboe * Share io context with parent, if CLONE_IO is set 1256fadad878SJens Axboe */ 1257fadad878SJens Axboe if (clone_flags & CLONE_IO) { 12583d48749dSTejun Heo ioc_task_link(ioc); 12593d48749dSTejun Heo tsk->io_context = ioc; 1260fadad878SJens Axboe } else if (ioprio_valid(ioc->ioprio)) { 12616e736be7STejun Heo new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE); 12626e736be7STejun Heo if (unlikely(!new_ioc)) 1263fd0928dfSJens Axboe return -ENOMEM; 1264fd0928dfSJens Axboe 12656e736be7STejun Heo new_ioc->ioprio = ioc->ioprio; 126611a3122fSTejun Heo put_io_context(new_ioc); 1267fd0928dfSJens Axboe } 1268fd0928dfSJens Axboe #endif 1269fd0928dfSJens Axboe return 0; 1270fd0928dfSJens Axboe } 1271fd0928dfSJens Axboe 1272a39bc516SAlexey Dobriyan static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) 12731da177e4SLinus Torvalds { 12741da177e4SLinus Torvalds struct sighand_struct *sig; 12751da177e4SLinus Torvalds 127660348802SZhaolei if (clone_flags & CLONE_SIGHAND) { 12771da177e4SLinus Torvalds atomic_inc(¤t->sighand->count); 12781da177e4SLinus Torvalds return 0; 12791da177e4SLinus Torvalds } 12801da177e4SLinus Torvalds sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); 1281e56d0903SIngo Molnar rcu_assign_pointer(tsk->sighand, sig); 12821da177e4SLinus Torvalds if (!sig) 12831da177e4SLinus Torvalds return -ENOMEM; 12849d7fb042SPeter Zijlstra 12851da177e4SLinus Torvalds atomic_set(&sig->count, 1); 12861da177e4SLinus Torvalds memcpy(sig->action, current->sighand->action, sizeof(sig->action)); 12871da177e4SLinus Torvalds return 0; 12881da177e4SLinus Torvalds } 12891da177e4SLinus Torvalds 1290a7e5328aSOleg Nesterov void __cleanup_sighand(struct sighand_struct *sighand) 1291c81addc9SOleg Nesterov { 1292d80e731eSOleg Nesterov if (atomic_dec_and_test(&sighand->count)) { 1293d80e731eSOleg Nesterov signalfd_cleanup(sighand); 1294392809b2SOleg Nesterov /* 1295392809b2SOleg Nesterov * sighand_cachep is SLAB_DESTROY_BY_RCU so we can free it 1296392809b2SOleg Nesterov * without an RCU grace period, see __lock_task_sighand(). 1297392809b2SOleg Nesterov */ 1298c81addc9SOleg Nesterov kmem_cache_free(sighand_cachep, sighand); 1299c81addc9SOleg Nesterov } 1300d80e731eSOleg Nesterov } 1301c81addc9SOleg Nesterov 1302f06febc9SFrank Mayhar /* 1303f06febc9SFrank Mayhar * Initialize POSIX timer handling for a thread group. 1304f06febc9SFrank Mayhar */ 1305f06febc9SFrank Mayhar static void posix_cpu_timers_init_group(struct signal_struct *sig) 1306f06febc9SFrank Mayhar { 130778d7d407SJiri Slaby unsigned long cpu_limit; 130878d7d407SJiri Slaby 1309316c1608SJason Low cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); 131078d7d407SJiri Slaby if (cpu_limit != RLIM_INFINITY) { 131178d7d407SJiri Slaby sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit); 1312d5c373ebSJason Low sig->cputimer.running = true; 13136279a751SOleg Nesterov } 13146279a751SOleg Nesterov 1315f06febc9SFrank Mayhar /* The timer lists. */ 1316f06febc9SFrank Mayhar INIT_LIST_HEAD(&sig->cpu_timers[0]); 1317f06febc9SFrank Mayhar INIT_LIST_HEAD(&sig->cpu_timers[1]); 1318f06febc9SFrank Mayhar INIT_LIST_HEAD(&sig->cpu_timers[2]); 1319f06febc9SFrank Mayhar } 1320f06febc9SFrank Mayhar 1321a39bc516SAlexey Dobriyan static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) 13221da177e4SLinus Torvalds { 13231da177e4SLinus Torvalds struct signal_struct *sig; 13241da177e4SLinus Torvalds 13254ab6c083SOleg Nesterov if (clone_flags & CLONE_THREAD) 1326490dea45SPeter Zijlstra return 0; 13276279a751SOleg Nesterov 1328a56704efSVeaceslav Falico sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL); 13291da177e4SLinus Torvalds tsk->signal = sig; 13301da177e4SLinus Torvalds if (!sig) 13311da177e4SLinus Torvalds return -ENOMEM; 13321da177e4SLinus Torvalds 1333b3ac022cSOleg Nesterov sig->nr_threads = 1; 13341da177e4SLinus Torvalds atomic_set(&sig->live, 1); 1335b3ac022cSOleg Nesterov atomic_set(&sig->sigcnt, 1); 13360c740d0aSOleg Nesterov 13370c740d0aSOleg Nesterov /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */ 13380c740d0aSOleg Nesterov sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node); 13390c740d0aSOleg Nesterov tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head); 13400c740d0aSOleg Nesterov 13411da177e4SLinus Torvalds init_waitqueue_head(&sig->wait_chldexit); 1342db51aeccSOleg Nesterov sig->curr_target = tsk; 13431da177e4SLinus Torvalds init_sigpending(&sig->shared_pending); 13441da177e4SLinus Torvalds INIT_LIST_HEAD(&sig->posix_timers); 1345e78c3496SRik van Riel seqlock_init(&sig->stats_lock); 13469d7fb042SPeter Zijlstra prev_cputime_init(&sig->prev_cputime); 13471da177e4SLinus Torvalds 1348c9cb2e3dSThomas Gleixner hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 13491da177e4SLinus Torvalds sig->real_timer.function = it_real_fn; 13501da177e4SLinus Torvalds 13511da177e4SLinus Torvalds task_lock(current->group_leader); 13521da177e4SLinus Torvalds memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 13531da177e4SLinus Torvalds task_unlock(current->group_leader); 13541da177e4SLinus Torvalds 13556279a751SOleg Nesterov posix_cpu_timers_init_group(sig); 13566279a751SOleg Nesterov 1357522ed776SMiloslav Trmac tty_audit_fork(sig); 13585091faa4SMike Galbraith sched_autogroup_fork(sig); 1359522ed776SMiloslav Trmac 1360a63d83f4SDavid Rientjes sig->oom_score_adj = current->signal->oom_score_adj; 1361dabb16f6SMandeep Singh Baines sig->oom_score_adj_min = current->signal->oom_score_adj_min; 136228b83c51SKOSAKI Motohiro 1363ebec18a6SLennart Poettering sig->has_child_subreaper = current->signal->has_child_subreaper || 1364ebec18a6SLennart Poettering current->signal->is_child_subreaper; 1365ebec18a6SLennart Poettering 13669b1bf12dSKOSAKI Motohiro mutex_init(&sig->cred_guard_mutex); 13679b1bf12dSKOSAKI Motohiro 13681da177e4SLinus Torvalds return 0; 13691da177e4SLinus Torvalds } 13701da177e4SLinus Torvalds 1371dbd95212SKees Cook static void copy_seccomp(struct task_struct *p) 1372dbd95212SKees Cook { 1373dbd95212SKees Cook #ifdef CONFIG_SECCOMP 1374dbd95212SKees Cook /* 1375dbd95212SKees Cook * Must be called with sighand->lock held, which is common to 1376dbd95212SKees Cook * all threads in the group. Holding cred_guard_mutex is not 1377dbd95212SKees Cook * needed because this new task is not yet running and cannot 1378dbd95212SKees Cook * be racing exec. 1379dbd95212SKees Cook */ 138069f6a34bSGuenter Roeck assert_spin_locked(¤t->sighand->siglock); 1381dbd95212SKees Cook 1382dbd95212SKees Cook /* Ref-count the new filter user, and assign it. */ 1383dbd95212SKees Cook get_seccomp_filter(current); 1384dbd95212SKees Cook p->seccomp = current->seccomp; 1385dbd95212SKees Cook 1386dbd95212SKees Cook /* 1387dbd95212SKees Cook * Explicitly enable no_new_privs here in case it got set 1388dbd95212SKees Cook * between the task_struct being duplicated and holding the 1389dbd95212SKees Cook * sighand lock. The seccomp state and nnp must be in sync. 1390dbd95212SKees Cook */ 1391dbd95212SKees Cook if (task_no_new_privs(current)) 1392dbd95212SKees Cook task_set_no_new_privs(p); 1393dbd95212SKees Cook 1394dbd95212SKees Cook /* 1395dbd95212SKees Cook * If the parent gained a seccomp mode after copying thread 1396dbd95212SKees Cook * flags and between before we held the sighand lock, we have 1397dbd95212SKees Cook * to manually enable the seccomp thread flag here. 1398dbd95212SKees Cook */ 1399dbd95212SKees Cook if (p->seccomp.mode != SECCOMP_MODE_DISABLED) 1400dbd95212SKees Cook set_tsk_thread_flag(p, TIF_SECCOMP); 1401dbd95212SKees Cook #endif 1402dbd95212SKees Cook } 1403dbd95212SKees Cook 140417da2bd9SHeiko Carstens SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) 14051da177e4SLinus Torvalds { 14061da177e4SLinus Torvalds current->clear_child_tid = tidptr; 14071da177e4SLinus Torvalds 1408b488893aSPavel Emelyanov return task_pid_vnr(current); 14091da177e4SLinus Torvalds } 14101da177e4SLinus Torvalds 1411a39bc516SAlexey Dobriyan static void rt_mutex_init_task(struct task_struct *p) 141223f78d4aSIngo Molnar { 14131d615482SThomas Gleixner raw_spin_lock_init(&p->pi_lock); 1414e29e175bSZilvinas Valinskas #ifdef CONFIG_RT_MUTEXES 1415fb00aca4SPeter Zijlstra p->pi_waiters = RB_ROOT; 1416fb00aca4SPeter Zijlstra p->pi_waiters_leftmost = NULL; 141723f78d4aSIngo Molnar p->pi_blocked_on = NULL; 141823f78d4aSIngo Molnar #endif 141923f78d4aSIngo Molnar } 142023f78d4aSIngo Molnar 14211da177e4SLinus Torvalds /* 1422f06febc9SFrank Mayhar * Initialize POSIX timer handling for a single task. 1423f06febc9SFrank Mayhar */ 1424f06febc9SFrank Mayhar static void posix_cpu_timers_init(struct task_struct *tsk) 1425f06febc9SFrank Mayhar { 142664861634SMartin Schwidefsky tsk->cputime_expires.prof_exp = 0; 142764861634SMartin Schwidefsky tsk->cputime_expires.virt_exp = 0; 1428f06febc9SFrank Mayhar tsk->cputime_expires.sched_exp = 0; 1429f06febc9SFrank Mayhar INIT_LIST_HEAD(&tsk->cpu_timers[0]); 1430f06febc9SFrank Mayhar INIT_LIST_HEAD(&tsk->cpu_timers[1]); 1431f06febc9SFrank Mayhar INIT_LIST_HEAD(&tsk->cpu_timers[2]); 1432f06febc9SFrank Mayhar } 1433f06febc9SFrank Mayhar 143481907739SOleg Nesterov static inline void 143581907739SOleg Nesterov init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) 143681907739SOleg Nesterov { 143781907739SOleg Nesterov task->pids[type].pid = pid; 143881907739SOleg Nesterov } 143981907739SOleg Nesterov 1440f06febc9SFrank Mayhar /* 14411da177e4SLinus Torvalds * This creates a new process as a copy of the old one, 14421da177e4SLinus Torvalds * but does not actually start it yet. 14431da177e4SLinus Torvalds * 14441da177e4SLinus Torvalds * It copies the registers, and all the appropriate 14451da177e4SLinus Torvalds * parts of the process environment (as per the clone 14461da177e4SLinus Torvalds * flags). The actual kick-off is left to the caller. 14471da177e4SLinus Torvalds */ 14480766f788SEmese Revfy static __latent_entropy struct task_struct *copy_process( 14490766f788SEmese Revfy unsigned long clone_flags, 14501da177e4SLinus Torvalds unsigned long stack_start, 14511da177e4SLinus Torvalds unsigned long stack_size, 14521da177e4SLinus Torvalds int __user *child_tidptr, 145309a05394SRoland McGrath struct pid *pid, 14543033f14aSJosh Triplett int trace, 1455725fc629SAndi Kleen unsigned long tls, 1456725fc629SAndi Kleen int node) 14571da177e4SLinus Torvalds { 14581da177e4SLinus Torvalds int retval; 1459a24efe62SMariusz Kozlowski struct task_struct *p; 14601da177e4SLinus Torvalds 14611da177e4SLinus Torvalds if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) 14621da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 14631da177e4SLinus Torvalds 1464e66eded8SEric W. Biederman if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) 1465e66eded8SEric W. Biederman return ERR_PTR(-EINVAL); 1466e66eded8SEric W. Biederman 14671da177e4SLinus Torvalds /* 14681da177e4SLinus Torvalds * Thread groups must share signals as well, and detached threads 14691da177e4SLinus Torvalds * can only be started up within the thread group. 14701da177e4SLinus Torvalds */ 14711da177e4SLinus Torvalds if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) 14721da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 14731da177e4SLinus Torvalds 14741da177e4SLinus Torvalds /* 14751da177e4SLinus Torvalds * Shared signal handlers imply shared VM. By way of the above, 14761da177e4SLinus Torvalds * thread groups also imply shared VM. Blocking this case allows 14771da177e4SLinus Torvalds * for various simplifications in other code. 14781da177e4SLinus Torvalds */ 14791da177e4SLinus Torvalds if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) 14801da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 14811da177e4SLinus Torvalds 1482123be07bSSukadev Bhattiprolu /* 1483123be07bSSukadev Bhattiprolu * Siblings of global init remain as zombies on exit since they are 1484123be07bSSukadev Bhattiprolu * not reaped by their parent (swapper). To solve this and to avoid 1485123be07bSSukadev Bhattiprolu * multi-rooted process trees, prevent global and container-inits 1486123be07bSSukadev Bhattiprolu * from creating siblings. 1487123be07bSSukadev Bhattiprolu */ 1488123be07bSSukadev Bhattiprolu if ((clone_flags & CLONE_PARENT) && 1489123be07bSSukadev Bhattiprolu current->signal->flags & SIGNAL_UNKILLABLE) 1490123be07bSSukadev Bhattiprolu return ERR_PTR(-EINVAL); 1491123be07bSSukadev Bhattiprolu 14928382fcacSEric W. Biederman /* 149340a0d32dSOleg Nesterov * If the new process will be in a different pid or user namespace 1494faf00da5SEric W. Biederman * do not allow it to share a thread group with the forking task. 14958382fcacSEric W. Biederman */ 1496faf00da5SEric W. Biederman if (clone_flags & CLONE_THREAD) { 149740a0d32dSOleg Nesterov if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || 149840a0d32dSOleg Nesterov (task_active_pid_ns(current) != 1499c2b1df2eSAndy Lutomirski current->nsproxy->pid_ns_for_children)) 15008382fcacSEric W. Biederman return ERR_PTR(-EINVAL); 150140a0d32dSOleg Nesterov } 15028382fcacSEric W. Biederman 15031da177e4SLinus Torvalds retval = security_task_create(clone_flags); 15041da177e4SLinus Torvalds if (retval) 15051da177e4SLinus Torvalds goto fork_out; 15061da177e4SLinus Torvalds 15071da177e4SLinus Torvalds retval = -ENOMEM; 1508725fc629SAndi Kleen p = dup_task_struct(current, node); 15091da177e4SLinus Torvalds if (!p) 15101da177e4SLinus Torvalds goto fork_out; 15111da177e4SLinus Torvalds 1512f7e8b616SSteven Rostedt ftrace_graph_init_task(p); 1513f7e8b616SSteven Rostedt 1514bea493a0SPeter Zijlstra rt_mutex_init_task(p); 1515bea493a0SPeter Zijlstra 1516d12c1a37SIngo Molnar #ifdef CONFIG_PROVE_LOCKING 1517de30a2b3SIngo Molnar DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); 1518de30a2b3SIngo Molnar DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 1519de30a2b3SIngo Molnar #endif 15201da177e4SLinus Torvalds retval = -EAGAIN; 15213b11a1deSDavid Howells if (atomic_read(&p->real_cred->user->processes) >= 152278d7d407SJiri Slaby task_rlimit(p, RLIMIT_NPROC)) { 1523b57922b6SEric Paris if (p->real_cred->user != INIT_USER && 1524b57922b6SEric Paris !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) 15251da177e4SLinus Torvalds goto bad_fork_free; 15261da177e4SLinus Torvalds } 152772fa5997SVasiliy Kulikov current->flags &= ~PF_NPROC_EXCEEDED; 15281da177e4SLinus Torvalds 1529f1752eecSDavid Howells retval = copy_creds(p, clone_flags); 1530f1752eecSDavid Howells if (retval < 0) 1531f1752eecSDavid Howells goto bad_fork_free; 15321da177e4SLinus Torvalds 15331da177e4SLinus Torvalds /* 15341da177e4SLinus Torvalds * If multiple threads are within copy_process(), then this check 15351da177e4SLinus Torvalds * triggers too late. This doesn't hurt, the check is only there 15361da177e4SLinus Torvalds * to stop root fork bombs. 15371da177e4SLinus Torvalds */ 153804ec93feSLi Zefan retval = -EAGAIN; 15391da177e4SLinus Torvalds if (nr_threads >= max_threads) 15401da177e4SLinus Torvalds goto bad_fork_cleanup_count; 15411da177e4SLinus Torvalds 1542ca74e92bSShailabh Nagar delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ 1543514ddb44SDavid Rientjes p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER); 1544514ddb44SDavid Rientjes p->flags |= PF_FORKNOEXEC; 15451da177e4SLinus Torvalds INIT_LIST_HEAD(&p->children); 15461da177e4SLinus Torvalds INIT_LIST_HEAD(&p->sibling); 1547f41d911fSPaul E. McKenney rcu_copy_process(p); 15481da177e4SLinus Torvalds p->vfork_done = NULL; 15491da177e4SLinus Torvalds spin_lock_init(&p->alloc_lock); 15501da177e4SLinus Torvalds 15511da177e4SLinus Torvalds init_sigpending(&p->pending); 15521da177e4SLinus Torvalds 155364861634SMartin Schwidefsky p->utime = p->stime = p->gtime = 0; 1554*40565b5aSStanislaw Gruszka #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 155564861634SMartin Schwidefsky p->utimescaled = p->stimescaled = 0; 1556*40565b5aSStanislaw Gruszka #endif 15579d7fb042SPeter Zijlstra prev_cputime_init(&p->prev_cputime); 15589d7fb042SPeter Zijlstra 15596a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1560b7ce2277SFrederic Weisbecker seqcount_init(&p->vtime_seqcount); 15616a61671bSFrederic Weisbecker p->vtime_snap = 0; 15627098c1eaSFrederic Weisbecker p->vtime_snap_whence = VTIME_INACTIVE; 15636a61671bSFrederic Weisbecker #endif 15646a61671bSFrederic Weisbecker 1565a3a2e76cSKAMEZAWA Hiroyuki #if defined(SPLIT_RSS_COUNTING) 1566a3a2e76cSKAMEZAWA Hiroyuki memset(&p->rss_stat, 0, sizeof(p->rss_stat)); 1567a3a2e76cSKAMEZAWA Hiroyuki #endif 1568172ba844SBalbir Singh 15696976675dSArjan van de Ven p->default_timer_slack_ns = current->timer_slack_ns; 15706976675dSArjan van de Ven 15715995477aSAndrea Righi task_io_accounting_init(&p->ioac); 15721da177e4SLinus Torvalds acct_clear_integrals(p); 15731da177e4SLinus Torvalds 1574f06febc9SFrank Mayhar posix_cpu_timers_init(p); 15751da177e4SLinus Torvalds 1576ccbf62d8SThomas Gleixner p->start_time = ktime_get_ns(); 157757e0be04SThomas Gleixner p->real_start_time = ktime_get_boot_ns(); 15781da177e4SLinus Torvalds p->io_context = NULL; 15791da177e4SLinus Torvalds p->audit_context = NULL; 1580b4f48b63SPaul Menage cgroup_fork(p); 15811da177e4SLinus Torvalds #ifdef CONFIG_NUMA 1582846a16bfSLee Schermerhorn p->mempolicy = mpol_dup(p->mempolicy); 15831da177e4SLinus Torvalds if (IS_ERR(p->mempolicy)) { 15841da177e4SLinus Torvalds retval = PTR_ERR(p->mempolicy); 15851da177e4SLinus Torvalds p->mempolicy = NULL; 1586e8604cb4SLi Zefan goto bad_fork_cleanup_threadgroup_lock; 15871da177e4SLinus Torvalds } 15881da177e4SLinus Torvalds #endif 1589778d3b0fSMichal Hocko #ifdef CONFIG_CPUSETS 1590778d3b0fSMichal Hocko p->cpuset_mem_spread_rotor = NUMA_NO_NODE; 1591778d3b0fSMichal Hocko p->cpuset_slab_spread_rotor = NUMA_NO_NODE; 1592cc9a6c87SMel Gorman seqcount_init(&p->mems_allowed_seq); 1593778d3b0fSMichal Hocko #endif 1594de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS 1595de30a2b3SIngo Molnar p->irq_events = 0; 1596de30a2b3SIngo Molnar p->hardirqs_enabled = 0; 1597de30a2b3SIngo Molnar p->hardirq_enable_ip = 0; 1598de30a2b3SIngo Molnar p->hardirq_enable_event = 0; 1599de30a2b3SIngo Molnar p->hardirq_disable_ip = _THIS_IP_; 1600de30a2b3SIngo Molnar p->hardirq_disable_event = 0; 1601de30a2b3SIngo Molnar p->softirqs_enabled = 1; 1602de30a2b3SIngo Molnar p->softirq_enable_ip = _THIS_IP_; 1603de30a2b3SIngo Molnar p->softirq_enable_event = 0; 1604de30a2b3SIngo Molnar p->softirq_disable_ip = 0; 1605de30a2b3SIngo Molnar p->softirq_disable_event = 0; 1606de30a2b3SIngo Molnar p->hardirq_context = 0; 1607de30a2b3SIngo Molnar p->softirq_context = 0; 1608de30a2b3SIngo Molnar #endif 16098bcbde54SDavid Hildenbrand 16108bcbde54SDavid Hildenbrand p->pagefault_disabled = 0; 16118bcbde54SDavid Hildenbrand 1612fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP 1613fbb9ce95SIngo Molnar p->lockdep_depth = 0; /* no locks held yet */ 1614fbb9ce95SIngo Molnar p->curr_chain_key = 0; 1615fbb9ce95SIngo Molnar p->lockdep_recursion = 0; 1616fbb9ce95SIngo Molnar #endif 16171da177e4SLinus Torvalds 1618408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES 1619408894eeSIngo Molnar p->blocked_on = NULL; /* not blocked yet */ 1620408894eeSIngo Molnar #endif 1621cafe5635SKent Overstreet #ifdef CONFIG_BCACHE 1622cafe5635SKent Overstreet p->sequential_io = 0; 1623cafe5635SKent Overstreet p->sequential_io_avg = 0; 1624cafe5635SKent Overstreet #endif 16250f481406SMarkus Metzger 16263c90e6e9SSrivatsa Vaddagiri /* Perform scheduler related setup. Assign this task to a CPU. */ 1627aab03e05SDario Faggioli retval = sched_fork(clone_flags, p); 1628aab03e05SDario Faggioli if (retval) 1629aab03e05SDario Faggioli goto bad_fork_cleanup_policy; 16306ab423e0SPeter Zijlstra 1631cdd6c482SIngo Molnar retval = perf_event_init_task(p); 16326ab423e0SPeter Zijlstra if (retval) 16336ab423e0SPeter Zijlstra goto bad_fork_cleanup_policy; 1634fb0a685cSDaniel Rebelo de Oliveira retval = audit_alloc(p); 1635fb0a685cSDaniel Rebelo de Oliveira if (retval) 16366c72e350SPeter Zijlstra goto bad_fork_cleanup_perf; 16371da177e4SLinus Torvalds /* copy all the process information */ 1638ab602f79SJack Miller shm_init_task(p); 1639fb0a685cSDaniel Rebelo de Oliveira retval = copy_semundo(clone_flags, p); 1640fb0a685cSDaniel Rebelo de Oliveira if (retval) 16411da177e4SLinus Torvalds goto bad_fork_cleanup_audit; 1642fb0a685cSDaniel Rebelo de Oliveira retval = copy_files(clone_flags, p); 1643fb0a685cSDaniel Rebelo de Oliveira if (retval) 16441da177e4SLinus Torvalds goto bad_fork_cleanup_semundo; 1645fb0a685cSDaniel Rebelo de Oliveira retval = copy_fs(clone_flags, p); 1646fb0a685cSDaniel Rebelo de Oliveira if (retval) 16471da177e4SLinus Torvalds goto bad_fork_cleanup_files; 1648fb0a685cSDaniel Rebelo de Oliveira retval = copy_sighand(clone_flags, p); 1649fb0a685cSDaniel Rebelo de Oliveira if (retval) 16501da177e4SLinus Torvalds goto bad_fork_cleanup_fs; 1651fb0a685cSDaniel Rebelo de Oliveira retval = copy_signal(clone_flags, p); 1652fb0a685cSDaniel Rebelo de Oliveira if (retval) 16531da177e4SLinus Torvalds goto bad_fork_cleanup_sighand; 1654fb0a685cSDaniel Rebelo de Oliveira retval = copy_mm(clone_flags, p); 1655fb0a685cSDaniel Rebelo de Oliveira if (retval) 16561da177e4SLinus Torvalds goto bad_fork_cleanup_signal; 1657fb0a685cSDaniel Rebelo de Oliveira retval = copy_namespaces(clone_flags, p); 1658fb0a685cSDaniel Rebelo de Oliveira if (retval) 1659d84f4f99SDavid Howells goto bad_fork_cleanup_mm; 1660fb0a685cSDaniel Rebelo de Oliveira retval = copy_io(clone_flags, p); 1661fb0a685cSDaniel Rebelo de Oliveira if (retval) 1662fd0928dfSJens Axboe goto bad_fork_cleanup_namespaces; 16633033f14aSJosh Triplett retval = copy_thread_tls(clone_flags, stack_start, stack_size, p, tls); 16641da177e4SLinus Torvalds if (retval) 1665fd0928dfSJens Axboe goto bad_fork_cleanup_io; 16661da177e4SLinus Torvalds 1667425fb2b4SPavel Emelyanov if (pid != &init_struct_pid) { 1668c2b1df2eSAndy Lutomirski pid = alloc_pid(p->nsproxy->pid_ns_for_children); 166935f71bc0SMichal Hocko if (IS_ERR(pid)) { 167035f71bc0SMichal Hocko retval = PTR_ERR(pid); 16710740aa5fSJiri Slaby goto bad_fork_cleanup_thread; 1672425fb2b4SPavel Emelyanov } 167335f71bc0SMichal Hocko } 1674425fb2b4SPavel Emelyanov 16751da177e4SLinus Torvalds p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; 16761da177e4SLinus Torvalds /* 16771da177e4SLinus Torvalds * Clear TID on mm_release()? 16781da177e4SLinus Torvalds */ 16791da177e4SLinus Torvalds p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; 168073c10101SJens Axboe #ifdef CONFIG_BLOCK 168173c10101SJens Axboe p->plug = NULL; 168273c10101SJens Axboe #endif 168342b2dd0aSAlexey Dobriyan #ifdef CONFIG_FUTEX 16848f17d3a5SIngo Molnar p->robust_list = NULL; 16858f17d3a5SIngo Molnar #ifdef CONFIG_COMPAT 16868f17d3a5SIngo Molnar p->compat_robust_list = NULL; 16878f17d3a5SIngo Molnar #endif 1688c87e2837SIngo Molnar INIT_LIST_HEAD(&p->pi_state_list); 1689c87e2837SIngo Molnar p->pi_state_cache = NULL; 169042b2dd0aSAlexey Dobriyan #endif 16911da177e4SLinus Torvalds /* 1692f9a3879aSGOTO Masanori * sigaltstack should be cleared when sharing the same VM 1693f9a3879aSGOTO Masanori */ 1694f9a3879aSGOTO Masanori if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) 16952a742138SStas Sergeev sas_ss_reset(p); 1696f9a3879aSGOTO Masanori 1697f9a3879aSGOTO Masanori /* 16986580807dSOleg Nesterov * Syscall tracing and stepping should be turned off in the 16996580807dSOleg Nesterov * child regardless of CLONE_PTRACE. 17001da177e4SLinus Torvalds */ 17016580807dSOleg Nesterov user_disable_single_step(p); 17021da177e4SLinus Torvalds clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); 1703ed75e8d5SLaurent Vivier #ifdef TIF_SYSCALL_EMU 1704ed75e8d5SLaurent Vivier clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); 1705ed75e8d5SLaurent Vivier #endif 17069745512cSArjan van de Ven clear_all_latency_tracing(p); 17071da177e4SLinus Torvalds 17081da177e4SLinus Torvalds /* ok, now we should be set up.. */ 170918c830dfSOleg Nesterov p->pid = pid_nr(pid); 171018c830dfSOleg Nesterov if (clone_flags & CLONE_THREAD) { 17115f8aadd8SOleg Nesterov p->exit_signal = -1; 171218c830dfSOleg Nesterov p->group_leader = current->group_leader; 171318c830dfSOleg Nesterov p->tgid = current->tgid; 171418c830dfSOleg Nesterov } else { 171518c830dfSOleg Nesterov if (clone_flags & CLONE_PARENT) 17165f8aadd8SOleg Nesterov p->exit_signal = current->group_leader->exit_signal; 17175f8aadd8SOleg Nesterov else 17185f8aadd8SOleg Nesterov p->exit_signal = (clone_flags & CSIGNAL); 171918c830dfSOleg Nesterov p->group_leader = p; 172018c830dfSOleg Nesterov p->tgid = p->pid; 172118c830dfSOleg Nesterov } 17225f8aadd8SOleg Nesterov 17239d823e8fSWu Fengguang p->nr_dirtied = 0; 17249d823e8fSWu Fengguang p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); 172583712358SWu Fengguang p->dirty_paused_when = 0; 17269d823e8fSWu Fengguang 1727bb8cbbfeSOleg Nesterov p->pdeath_signal = 0; 172847e65328SOleg Nesterov INIT_LIST_HEAD(&p->thread_group); 1729158e1645SAl Viro p->task_works = NULL; 17301da177e4SLinus Torvalds 1731568ac888SBalbir Singh threadgroup_change_begin(current); 173218c830dfSOleg Nesterov /* 17337e47682eSAleksa Sarai * Ensure that the cgroup subsystem policies allow the new process to be 17347e47682eSAleksa Sarai * forked. It should be noted the the new process's css_set can be changed 17357e47682eSAleksa Sarai * between here and cgroup_post_fork() if an organisation operation is in 17367e47682eSAleksa Sarai * progress. 17377e47682eSAleksa Sarai */ 1738b53202e6SOleg Nesterov retval = cgroup_can_fork(p); 17397e47682eSAleksa Sarai if (retval) 17407e47682eSAleksa Sarai goto bad_fork_free_pid; 17417e47682eSAleksa Sarai 17427e47682eSAleksa Sarai /* 174318c830dfSOleg Nesterov * Make it visible to the rest of the system, but dont wake it up yet. 174418c830dfSOleg Nesterov * Need tasklist lock for parent etc handling! 174518c830dfSOleg Nesterov */ 17461da177e4SLinus Torvalds write_lock_irq(&tasklist_lock); 17471da177e4SLinus Torvalds 17481da177e4SLinus Torvalds /* CLONE_PARENT re-uses the old parent */ 17492d5516cbSOleg Nesterov if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { 17501da177e4SLinus Torvalds p->real_parent = current->real_parent; 17512d5516cbSOleg Nesterov p->parent_exec_id = current->parent_exec_id; 17522d5516cbSOleg Nesterov } else { 17531da177e4SLinus Torvalds p->real_parent = current; 17542d5516cbSOleg Nesterov p->parent_exec_id = current->self_exec_id; 17552d5516cbSOleg Nesterov } 17561da177e4SLinus Torvalds 17571da177e4SLinus Torvalds spin_lock(¤t->sighand->siglock); 17584a2c7a78SOleg Nesterov 17594a2c7a78SOleg Nesterov /* 1760dbd95212SKees Cook * Copy seccomp details explicitly here, in case they were changed 1761dbd95212SKees Cook * before holding sighand lock. 1762dbd95212SKees Cook */ 1763dbd95212SKees Cook copy_seccomp(p); 1764dbd95212SKees Cook 1765dbd95212SKees Cook /* 17664a2c7a78SOleg Nesterov * Process group and session signals need to be delivered to just the 17674a2c7a78SOleg Nesterov * parent before the fork or both the parent and the child after the 17684a2c7a78SOleg Nesterov * fork. Restart if a signal comes in before we add the new process to 17694a2c7a78SOleg Nesterov * it's process group. 17704a2c7a78SOleg Nesterov * A fatal signal pending means that current will exit, so the new 17714a2c7a78SOleg Nesterov * thread can't slip out of an OOM kill (or normal SIGKILL). 17724a2c7a78SOleg Nesterov */ 17734a2c7a78SOleg Nesterov recalc_sigpending(); 17744a2c7a78SOleg Nesterov if (signal_pending(current)) { 17754a2c7a78SOleg Nesterov spin_unlock(¤t->sighand->siglock); 17764a2c7a78SOleg Nesterov write_unlock_irq(&tasklist_lock); 17774a2c7a78SOleg Nesterov retval = -ERESTARTNOINTR; 17787e47682eSAleksa Sarai goto bad_fork_cancel_cgroup; 17794a2c7a78SOleg Nesterov } 17804a2c7a78SOleg Nesterov 178173b9ebfeSOleg Nesterov if (likely(p->pid)) { 17824b9d33e6STejun Heo ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); 17831da177e4SLinus Torvalds 178481907739SOleg Nesterov init_task_pid(p, PIDTYPE_PID, pid); 17851da177e4SLinus Torvalds if (thread_group_leader(p)) { 178681907739SOleg Nesterov init_task_pid(p, PIDTYPE_PGID, task_pgrp(current)); 178781907739SOleg Nesterov init_task_pid(p, PIDTYPE_SID, task_session(current)); 178881907739SOleg Nesterov 17891c4042c2SEric W. Biederman if (is_child_reaper(pid)) { 179017cf22c3SEric W. Biederman ns_of_pid(pid)->child_reaper = p; 17911c4042c2SEric W. Biederman p->signal->flags |= SIGNAL_UNKILLABLE; 17921c4042c2SEric W. Biederman } 17935cd17569SEric W. Biederman 1794fea9d175SOleg Nesterov p->signal->leader_pid = pid; 17959c9f4dedSAlan Cox p->signal->tty = tty_kref_get(current->signal->tty); 17969cd80bbbSOleg Nesterov list_add_tail(&p->sibling, &p->real_parent->children); 17975e85d4abSEric W. Biederman list_add_tail_rcu(&p->tasks, &init_task.tasks); 179881907739SOleg Nesterov attach_pid(p, PIDTYPE_PGID); 179981907739SOleg Nesterov attach_pid(p, PIDTYPE_SID); 1800909ea964SChristoph Lameter __this_cpu_inc(process_counts); 180180628ca0SOleg Nesterov } else { 180280628ca0SOleg Nesterov current->signal->nr_threads++; 180380628ca0SOleg Nesterov atomic_inc(¤t->signal->live); 180480628ca0SOleg Nesterov atomic_inc(¤t->signal->sigcnt); 180580628ca0SOleg Nesterov list_add_tail_rcu(&p->thread_group, 180680628ca0SOleg Nesterov &p->group_leader->thread_group); 18070c740d0aSOleg Nesterov list_add_tail_rcu(&p->thread_node, 18080c740d0aSOleg Nesterov &p->signal->thread_head); 18091da177e4SLinus Torvalds } 181081907739SOleg Nesterov attach_pid(p, PIDTYPE_PID); 18111da177e4SLinus Torvalds nr_threads++; 181273b9ebfeSOleg Nesterov } 181373b9ebfeSOleg Nesterov 18141da177e4SLinus Torvalds total_forks++; 18153f17da69SOleg Nesterov spin_unlock(¤t->sighand->siglock); 18164af4206bSOleg Nesterov syscall_tracepoint_update(p); 18171da177e4SLinus Torvalds write_unlock_irq(&tasklist_lock); 18184af4206bSOleg Nesterov 1819c13cf856SAndrew Morton proc_fork_connector(p); 1820b53202e6SOleg Nesterov cgroup_post_fork(p); 1821257058aeSTejun Heo threadgroup_change_end(current); 1822cdd6c482SIngo Molnar perf_event_fork(p); 182343d2b113SKAMEZAWA Hiroyuki 182443d2b113SKAMEZAWA Hiroyuki trace_task_newtask(p, clone_flags); 18253ab67966SOleg Nesterov uprobe_copy_process(p, clone_flags); 182643d2b113SKAMEZAWA Hiroyuki 18271da177e4SLinus Torvalds return p; 18281da177e4SLinus Torvalds 18297e47682eSAleksa Sarai bad_fork_cancel_cgroup: 1830b53202e6SOleg Nesterov cgroup_cancel_fork(p); 1831425fb2b4SPavel Emelyanov bad_fork_free_pid: 1832568ac888SBalbir Singh threadgroup_change_end(current); 1833425fb2b4SPavel Emelyanov if (pid != &init_struct_pid) 1834425fb2b4SPavel Emelyanov free_pid(pid); 18350740aa5fSJiri Slaby bad_fork_cleanup_thread: 18360740aa5fSJiri Slaby exit_thread(p); 1837fd0928dfSJens Axboe bad_fork_cleanup_io: 1838b69f2292SLouis Rilling if (p->io_context) 1839b69f2292SLouis Rilling exit_io_context(p); 1840ab516013SSerge E. Hallyn bad_fork_cleanup_namespaces: 1841444f378bSLinus Torvalds exit_task_namespaces(p); 18421da177e4SLinus Torvalds bad_fork_cleanup_mm: 1843c9f01245SDavid Rientjes if (p->mm) 18441da177e4SLinus Torvalds mmput(p->mm); 18451da177e4SLinus Torvalds bad_fork_cleanup_signal: 18464ab6c083SOleg Nesterov if (!(clone_flags & CLONE_THREAD)) 18471c5354deSMike Galbraith free_signal_struct(p->signal); 18481da177e4SLinus Torvalds bad_fork_cleanup_sighand: 1849a7e5328aSOleg Nesterov __cleanup_sighand(p->sighand); 18501da177e4SLinus Torvalds bad_fork_cleanup_fs: 18511da177e4SLinus Torvalds exit_fs(p); /* blocking */ 18521da177e4SLinus Torvalds bad_fork_cleanup_files: 18531da177e4SLinus Torvalds exit_files(p); /* blocking */ 18541da177e4SLinus Torvalds bad_fork_cleanup_semundo: 18551da177e4SLinus Torvalds exit_sem(p); 18561da177e4SLinus Torvalds bad_fork_cleanup_audit: 18571da177e4SLinus Torvalds audit_free(p); 18586c72e350SPeter Zijlstra bad_fork_cleanup_perf: 1859cdd6c482SIngo Molnar perf_event_free_task(p); 18606c72e350SPeter Zijlstra bad_fork_cleanup_policy: 18611da177e4SLinus Torvalds #ifdef CONFIG_NUMA 1862f0be3d32SLee Schermerhorn mpol_put(p->mempolicy); 1863e8604cb4SLi Zefan bad_fork_cleanup_threadgroup_lock: 18641da177e4SLinus Torvalds #endif 186535df17c5SShailabh Nagar delayacct_tsk_free(p); 18661da177e4SLinus Torvalds bad_fork_cleanup_count: 1867d84f4f99SDavid Howells atomic_dec(&p->cred->user->processes); 1868e0e81739SDavid Howells exit_creds(p); 18691da177e4SLinus Torvalds bad_fork_free: 1870405c0759SAndy Lutomirski p->state = TASK_DEAD; 187168f24b08SAndy Lutomirski put_task_stack(p); 18721da177e4SLinus Torvalds free_task(p); 1873fe7d37d1SOleg Nesterov fork_out: 1874fe7d37d1SOleg Nesterov return ERR_PTR(retval); 18751da177e4SLinus Torvalds } 18761da177e4SLinus Torvalds 1877f106eee1SOleg Nesterov static inline void init_idle_pids(struct pid_link *links) 1878f106eee1SOleg Nesterov { 1879f106eee1SOleg Nesterov enum pid_type type; 1880f106eee1SOleg Nesterov 1881f106eee1SOleg Nesterov for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { 1882f106eee1SOleg Nesterov INIT_HLIST_NODE(&links[type].node); /* not really needed */ 1883f106eee1SOleg Nesterov links[type].pid = &init_struct_pid; 1884f106eee1SOleg Nesterov } 1885f106eee1SOleg Nesterov } 1886f106eee1SOleg Nesterov 18870db0628dSPaul Gortmaker struct task_struct *fork_idle(int cpu) 18881da177e4SLinus Torvalds { 188936c8b586SIngo Molnar struct task_struct *task; 1890725fc629SAndi Kleen task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0, 1891725fc629SAndi Kleen cpu_to_node(cpu)); 1892f106eee1SOleg Nesterov if (!IS_ERR(task)) { 1893f106eee1SOleg Nesterov init_idle_pids(task->pids); 18941da177e4SLinus Torvalds init_idle(task, cpu); 1895f106eee1SOleg Nesterov } 189673b9ebfeSOleg Nesterov 18971da177e4SLinus Torvalds return task; 18981da177e4SLinus Torvalds } 18991da177e4SLinus Torvalds 19001da177e4SLinus Torvalds /* 19011da177e4SLinus Torvalds * Ok, this is the main fork-routine. 19021da177e4SLinus Torvalds * 19031da177e4SLinus Torvalds * It copies the process, and if successful kick-starts 19041da177e4SLinus Torvalds * it and waits for it to finish using the VM if required. 19051da177e4SLinus Torvalds */ 19063033f14aSJosh Triplett long _do_fork(unsigned long clone_flags, 19071da177e4SLinus Torvalds unsigned long stack_start, 19081da177e4SLinus Torvalds unsigned long stack_size, 19091da177e4SLinus Torvalds int __user *parent_tidptr, 19103033f14aSJosh Triplett int __user *child_tidptr, 19113033f14aSJosh Triplett unsigned long tls) 19121da177e4SLinus Torvalds { 19131da177e4SLinus Torvalds struct task_struct *p; 19141da177e4SLinus Torvalds int trace = 0; 191592476d7fSEric W. Biederman long nr; 19161da177e4SLinus Torvalds 1917bdff746aSAndrew Morton /* 19184b9d33e6STejun Heo * Determine whether and which event to report to ptracer. When 19194b9d33e6STejun Heo * called from kernel_thread or CLONE_UNTRACED is explicitly 19204b9d33e6STejun Heo * requested, no event is reported; otherwise, report if the event 19214b9d33e6STejun Heo * for the type of forking is enabled. 192209a05394SRoland McGrath */ 1923e80d6661SAl Viro if (!(clone_flags & CLONE_UNTRACED)) { 19244b9d33e6STejun Heo if (clone_flags & CLONE_VFORK) 19254b9d33e6STejun Heo trace = PTRACE_EVENT_VFORK; 19264b9d33e6STejun Heo else if ((clone_flags & CSIGNAL) != SIGCHLD) 19274b9d33e6STejun Heo trace = PTRACE_EVENT_CLONE; 19284b9d33e6STejun Heo else 19294b9d33e6STejun Heo trace = PTRACE_EVENT_FORK; 19304b9d33e6STejun Heo 19314b9d33e6STejun Heo if (likely(!ptrace_event_enabled(current, trace))) 19324b9d33e6STejun Heo trace = 0; 19334b9d33e6STejun Heo } 19341da177e4SLinus Torvalds 193562e791c1SAl Viro p = copy_process(clone_flags, stack_start, stack_size, 1936725fc629SAndi Kleen child_tidptr, NULL, trace, tls, NUMA_NO_NODE); 193738addce8SEmese Revfy add_latent_entropy(); 19381da177e4SLinus Torvalds /* 19391da177e4SLinus Torvalds * Do this prior waking up the new thread - the thread pointer 19401da177e4SLinus Torvalds * might get invalid after that point, if the thread exits quickly. 19411da177e4SLinus Torvalds */ 19421da177e4SLinus Torvalds if (!IS_ERR(p)) { 19431da177e4SLinus Torvalds struct completion vfork; 19444e52365fSMatthew Dempsky struct pid *pid; 19451da177e4SLinus Torvalds 19460a16b607SMathieu Desnoyers trace_sched_process_fork(current, p); 19470a16b607SMathieu Desnoyers 19484e52365fSMatthew Dempsky pid = get_task_pid(p, PIDTYPE_PID); 19494e52365fSMatthew Dempsky nr = pid_vnr(pid); 195030e49c26SPavel Emelyanov 195130e49c26SPavel Emelyanov if (clone_flags & CLONE_PARENT_SETTID) 195230e49c26SPavel Emelyanov put_user(nr, parent_tidptr); 1953a6f5e063SSukadev Bhattiprolu 19541da177e4SLinus Torvalds if (clone_flags & CLONE_VFORK) { 19551da177e4SLinus Torvalds p->vfork_done = &vfork; 19561da177e4SLinus Torvalds init_completion(&vfork); 1957d68b46feSOleg Nesterov get_task_struct(p); 19581da177e4SLinus Torvalds } 19591da177e4SLinus Torvalds 19603e51e3edSSamir Bellabes wake_up_new_task(p); 19611da177e4SLinus Torvalds 19624b9d33e6STejun Heo /* forking complete and child started to run, tell ptracer */ 19634b9d33e6STejun Heo if (unlikely(trace)) 19644e52365fSMatthew Dempsky ptrace_event_pid(trace, pid); 196509a05394SRoland McGrath 19661da177e4SLinus Torvalds if (clone_flags & CLONE_VFORK) { 1967d68b46feSOleg Nesterov if (!wait_for_vfork_done(p, &vfork)) 19684e52365fSMatthew Dempsky ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); 19699f59ce5dSChuck Ebbert } 19704e52365fSMatthew Dempsky 19714e52365fSMatthew Dempsky put_pid(pid); 19721da177e4SLinus Torvalds } else { 197392476d7fSEric W. Biederman nr = PTR_ERR(p); 19741da177e4SLinus Torvalds } 197592476d7fSEric W. Biederman return nr; 19761da177e4SLinus Torvalds } 19771da177e4SLinus Torvalds 19783033f14aSJosh Triplett #ifndef CONFIG_HAVE_COPY_THREAD_TLS 19793033f14aSJosh Triplett /* For compatibility with architectures that call do_fork directly rather than 19803033f14aSJosh Triplett * using the syscall entry points below. */ 19813033f14aSJosh Triplett long do_fork(unsigned long clone_flags, 19823033f14aSJosh Triplett unsigned long stack_start, 19833033f14aSJosh Triplett unsigned long stack_size, 19843033f14aSJosh Triplett int __user *parent_tidptr, 19853033f14aSJosh Triplett int __user *child_tidptr) 19863033f14aSJosh Triplett { 19873033f14aSJosh Triplett return _do_fork(clone_flags, stack_start, stack_size, 19883033f14aSJosh Triplett parent_tidptr, child_tidptr, 0); 19893033f14aSJosh Triplett } 19903033f14aSJosh Triplett #endif 19913033f14aSJosh Triplett 19922aa3a7f8SAl Viro /* 19932aa3a7f8SAl Viro * Create a kernel thread. 19942aa3a7f8SAl Viro */ 19952aa3a7f8SAl Viro pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 19962aa3a7f8SAl Viro { 19973033f14aSJosh Triplett return _do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn, 19983033f14aSJosh Triplett (unsigned long)arg, NULL, NULL, 0); 19992aa3a7f8SAl Viro } 20002aa3a7f8SAl Viro 2001d2125043SAl Viro #ifdef __ARCH_WANT_SYS_FORK 2002d2125043SAl Viro SYSCALL_DEFINE0(fork) 2003d2125043SAl Viro { 2004d2125043SAl Viro #ifdef CONFIG_MMU 20053033f14aSJosh Triplett return _do_fork(SIGCHLD, 0, 0, NULL, NULL, 0); 2006d2125043SAl Viro #else 2007d2125043SAl Viro /* can not support in nommu mode */ 20085d59e182SDaeseok Youn return -EINVAL; 2009d2125043SAl Viro #endif 2010d2125043SAl Viro } 2011d2125043SAl Viro #endif 2012d2125043SAl Viro 2013d2125043SAl Viro #ifdef __ARCH_WANT_SYS_VFORK 2014d2125043SAl Viro SYSCALL_DEFINE0(vfork) 2015d2125043SAl Viro { 20163033f14aSJosh Triplett return _do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0, 20173033f14aSJosh Triplett 0, NULL, NULL, 0); 2018d2125043SAl Viro } 2019d2125043SAl Viro #endif 2020d2125043SAl Viro 2021d2125043SAl Viro #ifdef __ARCH_WANT_SYS_CLONE 2022d2125043SAl Viro #ifdef CONFIG_CLONE_BACKWARDS 2023d2125043SAl Viro SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 2024d2125043SAl Viro int __user *, parent_tidptr, 20253033f14aSJosh Triplett unsigned long, tls, 2026d2125043SAl Viro int __user *, child_tidptr) 2027d2125043SAl Viro #elif defined(CONFIG_CLONE_BACKWARDS2) 2028d2125043SAl Viro SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, 2029d2125043SAl Viro int __user *, parent_tidptr, 2030d2125043SAl Viro int __user *, child_tidptr, 20313033f14aSJosh Triplett unsigned long, tls) 2032dfa9771aSMichal Simek #elif defined(CONFIG_CLONE_BACKWARDS3) 2033dfa9771aSMichal Simek SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, 2034dfa9771aSMichal Simek int, stack_size, 2035dfa9771aSMichal Simek int __user *, parent_tidptr, 2036dfa9771aSMichal Simek int __user *, child_tidptr, 20373033f14aSJosh Triplett unsigned long, tls) 2038d2125043SAl Viro #else 2039d2125043SAl Viro SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 2040d2125043SAl Viro int __user *, parent_tidptr, 2041d2125043SAl Viro int __user *, child_tidptr, 20423033f14aSJosh Triplett unsigned long, tls) 2043d2125043SAl Viro #endif 2044d2125043SAl Viro { 20453033f14aSJosh Triplett return _do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr, tls); 2046d2125043SAl Viro } 2047d2125043SAl Viro #endif 2048d2125043SAl Viro 20495fd63b30SRavikiran G Thirumalai #ifndef ARCH_MIN_MMSTRUCT_ALIGN 20505fd63b30SRavikiran G Thirumalai #define ARCH_MIN_MMSTRUCT_ALIGN 0 20515fd63b30SRavikiran G Thirumalai #endif 20525fd63b30SRavikiran G Thirumalai 205351cc5068SAlexey Dobriyan static void sighand_ctor(void *data) 2054aa1757f9SOleg Nesterov { 2055aa1757f9SOleg Nesterov struct sighand_struct *sighand = data; 2056aa1757f9SOleg Nesterov 2057aa1757f9SOleg Nesterov spin_lock_init(&sighand->siglock); 2058b8fceee1SDavide Libenzi init_waitqueue_head(&sighand->signalfd_wqh); 2059fba2afaaSDavide Libenzi } 2060aa1757f9SOleg Nesterov 20611da177e4SLinus Torvalds void __init proc_caches_init(void) 20621da177e4SLinus Torvalds { 20631da177e4SLinus Torvalds sighand_cachep = kmem_cache_create("sighand_cache", 20641da177e4SLinus Torvalds sizeof(struct sighand_struct), 0, 20652dff4405SVegard Nossum SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU| 20665d097056SVladimir Davydov SLAB_NOTRACK|SLAB_ACCOUNT, sighand_ctor); 20671da177e4SLinus Torvalds signal_cachep = kmem_cache_create("signal_cache", 20681da177e4SLinus Torvalds sizeof(struct signal_struct), 0, 20695d097056SVladimir Davydov SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, 20705d097056SVladimir Davydov NULL); 20711da177e4SLinus Torvalds files_cachep = kmem_cache_create("files_cache", 20721da177e4SLinus Torvalds sizeof(struct files_struct), 0, 20735d097056SVladimir Davydov SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, 20745d097056SVladimir Davydov NULL); 20751da177e4SLinus Torvalds fs_cachep = kmem_cache_create("fs_cache", 20761da177e4SLinus Torvalds sizeof(struct fs_struct), 0, 20775d097056SVladimir Davydov SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, 20785d097056SVladimir Davydov NULL); 20796345d24dSLinus Torvalds /* 20806345d24dSLinus Torvalds * FIXME! The "sizeof(struct mm_struct)" currently includes the 20816345d24dSLinus Torvalds * whole struct cpumask for the OFFSTACK case. We could change 20826345d24dSLinus Torvalds * this to *only* allocate as much of it as required by the 20836345d24dSLinus Torvalds * maximum number of CPU's we can ever have. The cpumask_allocation 20846345d24dSLinus Torvalds * is at the end of the structure, exactly for that reason. 20856345d24dSLinus Torvalds */ 20861da177e4SLinus Torvalds mm_cachep = kmem_cache_create("mm_struct", 20875fd63b30SRavikiran G Thirumalai sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, 20885d097056SVladimir Davydov SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, 20895d097056SVladimir Davydov NULL); 20905d097056SVladimir Davydov vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); 20918feae131SDavid Howells mmap_init(); 209266577193SAl Viro nsproxy_cache_init(); 20931da177e4SLinus Torvalds } 2094cf2e340fSJANAK DESAI 2095cf2e340fSJANAK DESAI /* 20969bfb23fcSOleg Nesterov * Check constraints on flags passed to the unshare system call. 2097cf2e340fSJANAK DESAI */ 20989bfb23fcSOleg Nesterov static int check_unshare_flags(unsigned long unshare_flags) 2099cf2e340fSJANAK DESAI { 21009bfb23fcSOleg Nesterov if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| 21019bfb23fcSOleg Nesterov CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| 210250804fe3SEric W. Biederman CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET| 2103a79a908fSAditya Kali CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP)) 2104cf2e340fSJANAK DESAI return -EINVAL; 21059bfb23fcSOleg Nesterov /* 210612c641abSEric W. Biederman * Not implemented, but pretend it works if there is nothing 210712c641abSEric W. Biederman * to unshare. Note that unsharing the address space or the 210812c641abSEric W. Biederman * signal handlers also need to unshare the signal queues (aka 210912c641abSEric W. Biederman * CLONE_THREAD). 21109bfb23fcSOleg Nesterov */ 21119bfb23fcSOleg Nesterov if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { 211212c641abSEric W. Biederman if (!thread_group_empty(current)) 211312c641abSEric W. Biederman return -EINVAL; 211412c641abSEric W. Biederman } 211512c641abSEric W. Biederman if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) { 211612c641abSEric W. Biederman if (atomic_read(¤t->sighand->count) > 1) 211712c641abSEric W. Biederman return -EINVAL; 211812c641abSEric W. Biederman } 211912c641abSEric W. Biederman if (unshare_flags & CLONE_VM) { 212012c641abSEric W. Biederman if (!current_is_single_threaded()) 21219bfb23fcSOleg Nesterov return -EINVAL; 21229bfb23fcSOleg Nesterov } 2123cf2e340fSJANAK DESAI 2124cf2e340fSJANAK DESAI return 0; 2125cf2e340fSJANAK DESAI } 2126cf2e340fSJANAK DESAI 2127cf2e340fSJANAK DESAI /* 212899d1419dSJANAK DESAI * Unshare the filesystem structure if it is being shared 2129cf2e340fSJANAK DESAI */ 2130cf2e340fSJANAK DESAI static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) 2131cf2e340fSJANAK DESAI { 2132cf2e340fSJANAK DESAI struct fs_struct *fs = current->fs; 2133cf2e340fSJANAK DESAI 2134498052bbSAl Viro if (!(unshare_flags & CLONE_FS) || !fs) 2135498052bbSAl Viro return 0; 2136498052bbSAl Viro 2137498052bbSAl Viro /* don't need lock here; in the worst case we'll do useless copy */ 2138498052bbSAl Viro if (fs->users == 1) 2139498052bbSAl Viro return 0; 2140498052bbSAl Viro 2141498052bbSAl Viro *new_fsp = copy_fs_struct(fs); 214299d1419dSJANAK DESAI if (!*new_fsp) 214399d1419dSJANAK DESAI return -ENOMEM; 2144cf2e340fSJANAK DESAI 2145cf2e340fSJANAK DESAI return 0; 2146cf2e340fSJANAK DESAI } 2147cf2e340fSJANAK DESAI 2148cf2e340fSJANAK DESAI /* 2149a016f338SJANAK DESAI * Unshare file descriptor table if it is being shared 2150cf2e340fSJANAK DESAI */ 2151cf2e340fSJANAK DESAI static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) 2152cf2e340fSJANAK DESAI { 2153cf2e340fSJANAK DESAI struct files_struct *fd = current->files; 2154a016f338SJANAK DESAI int error = 0; 2155cf2e340fSJANAK DESAI 2156cf2e340fSJANAK DESAI if ((unshare_flags & CLONE_FILES) && 2157a016f338SJANAK DESAI (fd && atomic_read(&fd->count) > 1)) { 2158a016f338SJANAK DESAI *new_fdp = dup_fd(fd, &error); 2159a016f338SJANAK DESAI if (!*new_fdp) 2160a016f338SJANAK DESAI return error; 2161a016f338SJANAK DESAI } 2162cf2e340fSJANAK DESAI 2163cf2e340fSJANAK DESAI return 0; 2164cf2e340fSJANAK DESAI } 2165cf2e340fSJANAK DESAI 2166cf2e340fSJANAK DESAI /* 2167cf2e340fSJANAK DESAI * unshare allows a process to 'unshare' part of the process 2168cf2e340fSJANAK DESAI * context which was originally shared using clone. copy_* 2169cf2e340fSJANAK DESAI * functions used by do_fork() cannot be used here directly 2170cf2e340fSJANAK DESAI * because they modify an inactive task_struct that is being 2171cf2e340fSJANAK DESAI * constructed. Here we are modifying the current, active, 2172cf2e340fSJANAK DESAI * task_struct. 2173cf2e340fSJANAK DESAI */ 21746559eed8SHeiko Carstens SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) 2175cf2e340fSJANAK DESAI { 2176cf2e340fSJANAK DESAI struct fs_struct *fs, *new_fs = NULL; 2177cf2e340fSJANAK DESAI struct files_struct *fd, *new_fd = NULL; 2178b2e0d987SEric W. Biederman struct cred *new_cred = NULL; 2179cf7b708cSPavel Emelyanov struct nsproxy *new_nsproxy = NULL; 21809edff4abSManfred Spraul int do_sysvsem = 0; 21819bfb23fcSOleg Nesterov int err; 2182cf2e340fSJANAK DESAI 218350804fe3SEric W. Biederman /* 2184faf00da5SEric W. Biederman * If unsharing a user namespace must also unshare the thread group 2185faf00da5SEric W. Biederman * and unshare the filesystem root and working directories. 2186b2e0d987SEric W. Biederman */ 2187b2e0d987SEric W. Biederman if (unshare_flags & CLONE_NEWUSER) 2188e66eded8SEric W. Biederman unshare_flags |= CLONE_THREAD | CLONE_FS; 2189b2e0d987SEric W. Biederman /* 219050804fe3SEric W. Biederman * If unsharing vm, must also unshare signal handlers. 219150804fe3SEric W. Biederman */ 219250804fe3SEric W. Biederman if (unshare_flags & CLONE_VM) 219350804fe3SEric W. Biederman unshare_flags |= CLONE_SIGHAND; 21946013f67fSManfred Spraul /* 219512c641abSEric W. Biederman * If unsharing a signal handlers, must also unshare the signal queues. 219612c641abSEric W. Biederman */ 219712c641abSEric W. Biederman if (unshare_flags & CLONE_SIGHAND) 219812c641abSEric W. Biederman unshare_flags |= CLONE_THREAD; 219912c641abSEric W. Biederman /* 22009bfb23fcSOleg Nesterov * If unsharing namespace, must also unshare filesystem information. 22019bfb23fcSOleg Nesterov */ 22029bfb23fcSOleg Nesterov if (unshare_flags & CLONE_NEWNS) 22039bfb23fcSOleg Nesterov unshare_flags |= CLONE_FS; 220450804fe3SEric W. Biederman 220550804fe3SEric W. Biederman err = check_unshare_flags(unshare_flags); 220650804fe3SEric W. Biederman if (err) 220750804fe3SEric W. Biederman goto bad_unshare_out; 22089bfb23fcSOleg Nesterov /* 22096013f67fSManfred Spraul * CLONE_NEWIPC must also detach from the undolist: after switching 22106013f67fSManfred Spraul * to a new ipc namespace, the semaphore arrays from the old 22116013f67fSManfred Spraul * namespace are unreachable. 22126013f67fSManfred Spraul */ 22136013f67fSManfred Spraul if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) 22149edff4abSManfred Spraul do_sysvsem = 1; 2215fb0a685cSDaniel Rebelo de Oliveira err = unshare_fs(unshare_flags, &new_fs); 2216fb0a685cSDaniel Rebelo de Oliveira if (err) 22179bfb23fcSOleg Nesterov goto bad_unshare_out; 2218fb0a685cSDaniel Rebelo de Oliveira err = unshare_fd(unshare_flags, &new_fd); 2219fb0a685cSDaniel Rebelo de Oliveira if (err) 22209bfb23fcSOleg Nesterov goto bad_unshare_cleanup_fs; 2221b2e0d987SEric W. Biederman err = unshare_userns(unshare_flags, &new_cred); 2222fb0a685cSDaniel Rebelo de Oliveira if (err) 22239edff4abSManfred Spraul goto bad_unshare_cleanup_fd; 2224b2e0d987SEric W. Biederman err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, 2225b2e0d987SEric W. Biederman new_cred, new_fs); 2226b2e0d987SEric W. Biederman if (err) 2227b2e0d987SEric W. Biederman goto bad_unshare_cleanup_cred; 2228cf2e340fSJANAK DESAI 2229b2e0d987SEric W. Biederman if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) { 22309edff4abSManfred Spraul if (do_sysvsem) { 22319edff4abSManfred Spraul /* 22329edff4abSManfred Spraul * CLONE_SYSVSEM is equivalent to sys_exit(). 22339edff4abSManfred Spraul */ 22349edff4abSManfred Spraul exit_sem(current); 22359edff4abSManfred Spraul } 2236ab602f79SJack Miller if (unshare_flags & CLONE_NEWIPC) { 2237ab602f79SJack Miller /* Orphan segments in old ns (see sem above). */ 2238ab602f79SJack Miller exit_shm(current); 2239ab602f79SJack Miller shm_init_task(current); 2240ab602f79SJack Miller } 2241ab516013SSerge E. Hallyn 22426f977e6bSAlan Cox if (new_nsproxy) 2243cf7b708cSPavel Emelyanov switch_task_namespaces(current, new_nsproxy); 2244cf2e340fSJANAK DESAI 2245cf7b708cSPavel Emelyanov task_lock(current); 2246cf7b708cSPavel Emelyanov 2247cf2e340fSJANAK DESAI if (new_fs) { 2248cf2e340fSJANAK DESAI fs = current->fs; 22492a4419b5SNick Piggin spin_lock(&fs->lock); 2250cf2e340fSJANAK DESAI current->fs = new_fs; 2251498052bbSAl Viro if (--fs->users) 2252498052bbSAl Viro new_fs = NULL; 2253498052bbSAl Viro else 2254cf2e340fSJANAK DESAI new_fs = fs; 22552a4419b5SNick Piggin spin_unlock(&fs->lock); 2256cf2e340fSJANAK DESAI } 2257cf2e340fSJANAK DESAI 2258cf2e340fSJANAK DESAI if (new_fd) { 2259cf2e340fSJANAK DESAI fd = current->files; 2260cf2e340fSJANAK DESAI current->files = new_fd; 2261cf2e340fSJANAK DESAI new_fd = fd; 2262cf2e340fSJANAK DESAI } 2263cf2e340fSJANAK DESAI 2264cf2e340fSJANAK DESAI task_unlock(current); 2265b2e0d987SEric W. Biederman 2266b2e0d987SEric W. Biederman if (new_cred) { 2267b2e0d987SEric W. Biederman /* Install the new user namespace */ 2268b2e0d987SEric W. Biederman commit_creds(new_cred); 2269b2e0d987SEric W. Biederman new_cred = NULL; 2270b2e0d987SEric W. Biederman } 2271cf2e340fSJANAK DESAI } 2272cf2e340fSJANAK DESAI 2273b2e0d987SEric W. Biederman bad_unshare_cleanup_cred: 2274b2e0d987SEric W. Biederman if (new_cred) 2275b2e0d987SEric W. Biederman put_cred(new_cred); 2276cf2e340fSJANAK DESAI bad_unshare_cleanup_fd: 2277cf2e340fSJANAK DESAI if (new_fd) 2278cf2e340fSJANAK DESAI put_files_struct(new_fd); 2279cf2e340fSJANAK DESAI 2280cf2e340fSJANAK DESAI bad_unshare_cleanup_fs: 2281cf2e340fSJANAK DESAI if (new_fs) 2282498052bbSAl Viro free_fs_struct(new_fs); 2283cf2e340fSJANAK DESAI 2284cf2e340fSJANAK DESAI bad_unshare_out: 2285cf2e340fSJANAK DESAI return err; 2286cf2e340fSJANAK DESAI } 22873b125388SAl Viro 22883b125388SAl Viro /* 22893b125388SAl Viro * Helper to unshare the files of the current task. 22903b125388SAl Viro * We don't want to expose copy_files internals to 22913b125388SAl Viro * the exec layer of the kernel. 22923b125388SAl Viro */ 22933b125388SAl Viro 22943b125388SAl Viro int unshare_files(struct files_struct **displaced) 22953b125388SAl Viro { 22963b125388SAl Viro struct task_struct *task = current; 229750704516SAl Viro struct files_struct *copy = NULL; 22983b125388SAl Viro int error; 22993b125388SAl Viro 23003b125388SAl Viro error = unshare_fd(CLONE_FILES, ©); 23013b125388SAl Viro if (error || !copy) { 23023b125388SAl Viro *displaced = NULL; 23033b125388SAl Viro return error; 23043b125388SAl Viro } 23053b125388SAl Viro *displaced = task->files; 23063b125388SAl Viro task_lock(task); 23073b125388SAl Viro task->files = copy; 23083b125388SAl Viro task_unlock(task); 23093b125388SAl Viro return 0; 23103b125388SAl Viro } 231116db3d3fSHeinrich Schuchardt 231216db3d3fSHeinrich Schuchardt int sysctl_max_threads(struct ctl_table *table, int write, 231316db3d3fSHeinrich Schuchardt void __user *buffer, size_t *lenp, loff_t *ppos) 231416db3d3fSHeinrich Schuchardt { 231516db3d3fSHeinrich Schuchardt struct ctl_table t; 231616db3d3fSHeinrich Schuchardt int ret; 231716db3d3fSHeinrich Schuchardt int threads = max_threads; 231816db3d3fSHeinrich Schuchardt int min = MIN_THREADS; 231916db3d3fSHeinrich Schuchardt int max = MAX_THREADS; 232016db3d3fSHeinrich Schuchardt 232116db3d3fSHeinrich Schuchardt t = *table; 232216db3d3fSHeinrich Schuchardt t.data = &threads; 232316db3d3fSHeinrich Schuchardt t.extra1 = &min; 232416db3d3fSHeinrich Schuchardt t.extra2 = &max; 232516db3d3fSHeinrich Schuchardt 232616db3d3fSHeinrich Schuchardt ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 232716db3d3fSHeinrich Schuchardt if (ret || !write) 232816db3d3fSHeinrich Schuchardt return ret; 232916db3d3fSHeinrich Schuchardt 233016db3d3fSHeinrich Schuchardt set_max_threads(threads); 233116db3d3fSHeinrich Schuchardt 233216db3d3fSHeinrich Schuchardt return 0; 233316db3d3fSHeinrich Schuchardt } 2334