1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/fork.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 /* 9 * 'fork.c' contains the help-routines for the 'fork' system call 10 * (see also entry.S and others). 11 * Fork is rather simple, once you get the hang of it, but the memory 12 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' 13 */ 14 15 #include <linux/anon_inodes.h> 16 #include <linux/slab.h> 17 #include <linux/sched/autogroup.h> 18 #include <linux/sched/mm.h> 19 #include <linux/sched/coredump.h> 20 #include <linux/sched/user.h> 21 #include <linux/sched/numa_balancing.h> 22 #include <linux/sched/stat.h> 23 #include <linux/sched/task.h> 24 #include <linux/sched/task_stack.h> 25 #include <linux/sched/cputime.h> 26 #include <linux/seq_file.h> 27 #include <linux/rtmutex.h> 28 #include <linux/init.h> 29 #include <linux/unistd.h> 30 #include <linux/module.h> 31 #include <linux/vmalloc.h> 32 #include <linux/completion.h> 33 #include <linux/personality.h> 34 #include <linux/mempolicy.h> 35 #include <linux/sem.h> 36 #include <linux/file.h> 37 #include <linux/fdtable.h> 38 #include <linux/iocontext.h> 39 #include <linux/key.h> 40 #include <linux/binfmts.h> 41 #include <linux/mman.h> 42 #include <linux/mmu_notifier.h> 43 #include <linux/hmm.h> 44 #include <linux/fs.h> 45 #include <linux/mm.h> 46 #include <linux/vmacache.h> 47 #include <linux/nsproxy.h> 48 #include <linux/capability.h> 49 #include <linux/cpu.h> 50 #include <linux/cgroup.h> 51 #include <linux/security.h> 52 #include <linux/hugetlb.h> 53 #include <linux/seccomp.h> 54 #include <linux/swap.h> 55 #include <linux/syscalls.h> 56 #include <linux/jiffies.h> 57 #include <linux/futex.h> 58 #include <linux/compat.h> 59 #include <linux/kthread.h> 60 #include <linux/task_io_accounting_ops.h> 61 #include <linux/rcupdate.h> 62 #include <linux/ptrace.h> 63 #include <linux/mount.h> 64 #include <linux/audit.h> 65 #include <linux/memcontrol.h> 66 #include <linux/ftrace.h> 67 #include <linux/proc_fs.h> 68 #include <linux/profile.h> 69 #include <linux/rmap.h> 70 #include <linux/ksm.h> 71 #include <linux/acct.h> 72 #include <linux/userfaultfd_k.h> 73 #include <linux/tsacct_kern.h> 74 #include <linux/cn_proc.h> 75 #include <linux/freezer.h> 76 #include <linux/delayacct.h> 77 #include <linux/taskstats_kern.h> 78 #include <linux/random.h> 79 #include <linux/tty.h> 80 #include <linux/blkdev.h> 81 #include <linux/fs_struct.h> 82 #include <linux/magic.h> 83 #include <linux/perf_event.h> 84 #include <linux/posix-timers.h> 85 #include <linux/user-return-notifier.h> 86 #include <linux/oom.h> 87 #include <linux/khugepaged.h> 88 #include <linux/signalfd.h> 89 #include <linux/uprobes.h> 90 #include <linux/aio.h> 91 #include <linux/compiler.h> 92 #include <linux/sysctl.h> 93 #include <linux/kcov.h> 94 #include <linux/livepatch.h> 95 #include <linux/thread_info.h> 96 #include <linux/stackleak.h> 97 98 #include <asm/pgtable.h> 99 #include <asm/pgalloc.h> 100 #include <linux/uaccess.h> 101 #include <asm/mmu_context.h> 102 #include <asm/cacheflush.h> 103 #include <asm/tlbflush.h> 104 105 #include <trace/events/sched.h> 106 107 #define CREATE_TRACE_POINTS 108 #include <trace/events/task.h> 109 110 /* 111 * Minimum number of threads to boot the kernel 112 */ 113 #define MIN_THREADS 20 114 115 /* 116 * Maximum number of threads 117 */ 118 #define MAX_THREADS FUTEX_TID_MASK 119 120 /* 121 * Protected counters by write_lock_irq(&tasklist_lock) 122 */ 123 unsigned long total_forks; /* Handle normal Linux uptimes. */ 124 int nr_threads; /* The idle threads do not count.. */ 125 126 static int max_threads; /* tunable limit on nr_threads */ 127 128 DEFINE_PER_CPU(unsigned long, process_counts) = 0; 129 130 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 131 132 #ifdef CONFIG_PROVE_RCU 133 int lockdep_tasklist_lock_is_held(void) 134 { 135 return lockdep_is_held(&tasklist_lock); 136 } 137 EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); 138 #endif /* #ifdef CONFIG_PROVE_RCU */ 139 140 int nr_processes(void) 141 { 142 int cpu; 143 int total = 0; 144 145 for_each_possible_cpu(cpu) 146 total += per_cpu(process_counts, cpu); 147 148 return total; 149 } 150 151 void __weak arch_release_task_struct(struct task_struct *tsk) 152 { 153 } 154 155 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 156 static struct kmem_cache *task_struct_cachep; 157 158 static inline struct task_struct *alloc_task_struct_node(int node) 159 { 160 return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); 161 } 162 163 static inline void free_task_struct(struct task_struct *tsk) 164 { 165 kmem_cache_free(task_struct_cachep, tsk); 166 } 167 #endif 168 169 #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR 170 171 /* 172 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a 173 * kmemcache based allocator. 174 */ 175 # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) 176 177 #ifdef CONFIG_VMAP_STACK 178 /* 179 * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB 180 * flush. Try to minimize the number of calls by caching stacks. 181 */ 182 #define NR_CACHED_STACKS 2 183 static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]); 184 185 static int free_vm_stack_cache(unsigned int cpu) 186 { 187 struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu); 188 int i; 189 190 for (i = 0; i < NR_CACHED_STACKS; i++) { 191 struct vm_struct *vm_stack = cached_vm_stacks[i]; 192 193 if (!vm_stack) 194 continue; 195 196 vfree(vm_stack->addr); 197 cached_vm_stacks[i] = NULL; 198 } 199 200 return 0; 201 } 202 #endif 203 204 static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) 205 { 206 #ifdef CONFIG_VMAP_STACK 207 void *stack; 208 int i; 209 210 for (i = 0; i < NR_CACHED_STACKS; i++) { 211 struct vm_struct *s; 212 213 s = this_cpu_xchg(cached_stacks[i], NULL); 214 215 if (!s) 216 continue; 217 218 /* Clear stale pointers from reused stack. */ 219 memset(s->addr, 0, THREAD_SIZE); 220 221 tsk->stack_vm_area = s; 222 tsk->stack = s->addr; 223 return s->addr; 224 } 225 226 /* 227 * Allocated stacks are cached and later reused by new threads, 228 * so memcg accounting is performed manually on assigning/releasing 229 * stacks to tasks. Drop __GFP_ACCOUNT. 230 */ 231 stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN, 232 VMALLOC_START, VMALLOC_END, 233 THREADINFO_GFP & ~__GFP_ACCOUNT, 234 PAGE_KERNEL, 235 0, node, __builtin_return_address(0)); 236 237 /* 238 * We can't call find_vm_area() in interrupt context, and 239 * free_thread_stack() can be called in interrupt context, 240 * so cache the vm_struct. 241 */ 242 if (stack) { 243 tsk->stack_vm_area = find_vm_area(stack); 244 tsk->stack = stack; 245 } 246 return stack; 247 #else 248 struct page *page = alloc_pages_node(node, THREADINFO_GFP, 249 THREAD_SIZE_ORDER); 250 251 if (likely(page)) { 252 tsk->stack = page_address(page); 253 return tsk->stack; 254 } 255 return NULL; 256 #endif 257 } 258 259 static inline void free_thread_stack(struct task_struct *tsk) 260 { 261 #ifdef CONFIG_VMAP_STACK 262 struct vm_struct *vm = task_stack_vm_area(tsk); 263 264 if (vm) { 265 int i; 266 267 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { 268 mod_memcg_page_state(vm->pages[i], 269 MEMCG_KERNEL_STACK_KB, 270 -(int)(PAGE_SIZE / 1024)); 271 272 memcg_kmem_uncharge(vm->pages[i], 0); 273 } 274 275 for (i = 0; i < NR_CACHED_STACKS; i++) { 276 if (this_cpu_cmpxchg(cached_stacks[i], 277 NULL, tsk->stack_vm_area) != NULL) 278 continue; 279 280 return; 281 } 282 283 vfree_atomic(tsk->stack); 284 return; 285 } 286 #endif 287 288 __free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER); 289 } 290 # else 291 static struct kmem_cache *thread_stack_cache; 292 293 static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, 294 int node) 295 { 296 unsigned long *stack; 297 stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); 298 tsk->stack = stack; 299 return stack; 300 } 301 302 static void free_thread_stack(struct task_struct *tsk) 303 { 304 kmem_cache_free(thread_stack_cache, tsk->stack); 305 } 306 307 void thread_stack_cache_init(void) 308 { 309 thread_stack_cache = kmem_cache_create_usercopy("thread_stack", 310 THREAD_SIZE, THREAD_SIZE, 0, 0, 311 THREAD_SIZE, NULL); 312 BUG_ON(thread_stack_cache == NULL); 313 } 314 # endif 315 #endif 316 317 /* SLAB cache for signal_struct structures (tsk->signal) */ 318 static struct kmem_cache *signal_cachep; 319 320 /* SLAB cache for sighand_struct structures (tsk->sighand) */ 321 struct kmem_cache *sighand_cachep; 322 323 /* SLAB cache for files_struct structures (tsk->files) */ 324 struct kmem_cache *files_cachep; 325 326 /* SLAB cache for fs_struct structures (tsk->fs) */ 327 struct kmem_cache *fs_cachep; 328 329 /* SLAB cache for vm_area_struct structures */ 330 static struct kmem_cache *vm_area_cachep; 331 332 /* SLAB cache for mm_struct structures (tsk->mm) */ 333 static struct kmem_cache *mm_cachep; 334 335 struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) 336 { 337 struct vm_area_struct *vma; 338 339 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 340 if (vma) 341 vma_init(vma, mm); 342 return vma; 343 } 344 345 struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) 346 { 347 struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 348 349 if (new) { 350 *new = *orig; 351 INIT_LIST_HEAD(&new->anon_vma_chain); 352 } 353 return new; 354 } 355 356 void vm_area_free(struct vm_area_struct *vma) 357 { 358 kmem_cache_free(vm_area_cachep, vma); 359 } 360 361 static void account_kernel_stack(struct task_struct *tsk, int account) 362 { 363 void *stack = task_stack_page(tsk); 364 struct vm_struct *vm = task_stack_vm_area(tsk); 365 366 BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0); 367 368 if (vm) { 369 int i; 370 371 BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE); 372 373 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { 374 mod_zone_page_state(page_zone(vm->pages[i]), 375 NR_KERNEL_STACK_KB, 376 PAGE_SIZE / 1024 * account); 377 } 378 } else { 379 /* 380 * All stack pages are in the same zone and belong to the 381 * same memcg. 382 */ 383 struct page *first_page = virt_to_page(stack); 384 385 mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB, 386 THREAD_SIZE / 1024 * account); 387 388 mod_memcg_page_state(first_page, MEMCG_KERNEL_STACK_KB, 389 account * (THREAD_SIZE / 1024)); 390 } 391 } 392 393 static int memcg_charge_kernel_stack(struct task_struct *tsk) 394 { 395 #ifdef CONFIG_VMAP_STACK 396 struct vm_struct *vm = task_stack_vm_area(tsk); 397 int ret; 398 399 if (vm) { 400 int i; 401 402 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { 403 /* 404 * If memcg_kmem_charge() fails, page->mem_cgroup 405 * pointer is NULL, and both memcg_kmem_uncharge() 406 * and mod_memcg_page_state() in free_thread_stack() 407 * will ignore this page. So it's safe. 408 */ 409 ret = memcg_kmem_charge(vm->pages[i], GFP_KERNEL, 0); 410 if (ret) 411 return ret; 412 413 mod_memcg_page_state(vm->pages[i], 414 MEMCG_KERNEL_STACK_KB, 415 PAGE_SIZE / 1024); 416 } 417 } 418 #endif 419 return 0; 420 } 421 422 static void release_task_stack(struct task_struct *tsk) 423 { 424 if (WARN_ON(tsk->state != TASK_DEAD)) 425 return; /* Better to leak the stack than to free prematurely */ 426 427 account_kernel_stack(tsk, -1); 428 free_thread_stack(tsk); 429 tsk->stack = NULL; 430 #ifdef CONFIG_VMAP_STACK 431 tsk->stack_vm_area = NULL; 432 #endif 433 } 434 435 #ifdef CONFIG_THREAD_INFO_IN_TASK 436 void put_task_stack(struct task_struct *tsk) 437 { 438 if (refcount_dec_and_test(&tsk->stack_refcount)) 439 release_task_stack(tsk); 440 } 441 #endif 442 443 void free_task(struct task_struct *tsk) 444 { 445 #ifndef CONFIG_THREAD_INFO_IN_TASK 446 /* 447 * The task is finally done with both the stack and thread_info, 448 * so free both. 449 */ 450 release_task_stack(tsk); 451 #else 452 /* 453 * If the task had a separate stack allocation, it should be gone 454 * by now. 455 */ 456 WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0); 457 #endif 458 rt_mutex_debug_task_free(tsk); 459 ftrace_graph_exit_task(tsk); 460 put_seccomp_filter(tsk); 461 arch_release_task_struct(tsk); 462 if (tsk->flags & PF_KTHREAD) 463 free_kthread_struct(tsk); 464 free_task_struct(tsk); 465 } 466 EXPORT_SYMBOL(free_task); 467 468 #ifdef CONFIG_MMU 469 static __latent_entropy int dup_mmap(struct mm_struct *mm, 470 struct mm_struct *oldmm) 471 { 472 struct vm_area_struct *mpnt, *tmp, *prev, **pprev; 473 struct rb_node **rb_link, *rb_parent; 474 int retval; 475 unsigned long charge; 476 LIST_HEAD(uf); 477 478 uprobe_start_dup_mmap(); 479 if (down_write_killable(&oldmm->mmap_sem)) { 480 retval = -EINTR; 481 goto fail_uprobe_end; 482 } 483 flush_cache_dup_mm(oldmm); 484 uprobe_dup_mmap(oldmm, mm); 485 /* 486 * Not linked in yet - no deadlock potential: 487 */ 488 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); 489 490 /* No ordering required: file already has been exposed. */ 491 RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); 492 493 mm->total_vm = oldmm->total_vm; 494 mm->data_vm = oldmm->data_vm; 495 mm->exec_vm = oldmm->exec_vm; 496 mm->stack_vm = oldmm->stack_vm; 497 498 rb_link = &mm->mm_rb.rb_node; 499 rb_parent = NULL; 500 pprev = &mm->mmap; 501 retval = ksm_fork(mm, oldmm); 502 if (retval) 503 goto out; 504 retval = khugepaged_fork(mm, oldmm); 505 if (retval) 506 goto out; 507 508 prev = NULL; 509 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { 510 struct file *file; 511 512 if (mpnt->vm_flags & VM_DONTCOPY) { 513 vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt)); 514 continue; 515 } 516 charge = 0; 517 /* 518 * Don't duplicate many vmas if we've been oom-killed (for 519 * example) 520 */ 521 if (fatal_signal_pending(current)) { 522 retval = -EINTR; 523 goto out; 524 } 525 if (mpnt->vm_flags & VM_ACCOUNT) { 526 unsigned long len = vma_pages(mpnt); 527 528 if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ 529 goto fail_nomem; 530 charge = len; 531 } 532 tmp = vm_area_dup(mpnt); 533 if (!tmp) 534 goto fail_nomem; 535 retval = vma_dup_policy(mpnt, tmp); 536 if (retval) 537 goto fail_nomem_policy; 538 tmp->vm_mm = mm; 539 retval = dup_userfaultfd(tmp, &uf); 540 if (retval) 541 goto fail_nomem_anon_vma_fork; 542 if (tmp->vm_flags & VM_WIPEONFORK) { 543 /* VM_WIPEONFORK gets a clean slate in the child. */ 544 tmp->anon_vma = NULL; 545 if (anon_vma_prepare(tmp)) 546 goto fail_nomem_anon_vma_fork; 547 } else if (anon_vma_fork(tmp, mpnt)) 548 goto fail_nomem_anon_vma_fork; 549 tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT); 550 tmp->vm_next = tmp->vm_prev = NULL; 551 file = tmp->vm_file; 552 if (file) { 553 struct inode *inode = file_inode(file); 554 struct address_space *mapping = file->f_mapping; 555 556 get_file(file); 557 if (tmp->vm_flags & VM_DENYWRITE) 558 atomic_dec(&inode->i_writecount); 559 i_mmap_lock_write(mapping); 560 if (tmp->vm_flags & VM_SHARED) 561 atomic_inc(&mapping->i_mmap_writable); 562 flush_dcache_mmap_lock(mapping); 563 /* insert tmp into the share list, just after mpnt */ 564 vma_interval_tree_insert_after(tmp, mpnt, 565 &mapping->i_mmap); 566 flush_dcache_mmap_unlock(mapping); 567 i_mmap_unlock_write(mapping); 568 } 569 570 /* 571 * Clear hugetlb-related page reserves for children. This only 572 * affects MAP_PRIVATE mappings. Faults generated by the child 573 * are not guaranteed to succeed, even if read-only 574 */ 575 if (is_vm_hugetlb_page(tmp)) 576 reset_vma_resv_huge_pages(tmp); 577 578 /* 579 * Link in the new vma and copy the page table entries. 580 */ 581 *pprev = tmp; 582 pprev = &tmp->vm_next; 583 tmp->vm_prev = prev; 584 prev = tmp; 585 586 __vma_link_rb(mm, tmp, rb_link, rb_parent); 587 rb_link = &tmp->vm_rb.rb_right; 588 rb_parent = &tmp->vm_rb; 589 590 mm->map_count++; 591 if (!(tmp->vm_flags & VM_WIPEONFORK)) 592 retval = copy_page_range(mm, oldmm, mpnt); 593 594 if (tmp->vm_ops && tmp->vm_ops->open) 595 tmp->vm_ops->open(tmp); 596 597 if (retval) 598 goto out; 599 } 600 /* a new mm has just been created */ 601 retval = arch_dup_mmap(oldmm, mm); 602 out: 603 up_write(&mm->mmap_sem); 604 flush_tlb_mm(oldmm); 605 up_write(&oldmm->mmap_sem); 606 dup_userfaultfd_complete(&uf); 607 fail_uprobe_end: 608 uprobe_end_dup_mmap(); 609 return retval; 610 fail_nomem_anon_vma_fork: 611 mpol_put(vma_policy(tmp)); 612 fail_nomem_policy: 613 vm_area_free(tmp); 614 fail_nomem: 615 retval = -ENOMEM; 616 vm_unacct_memory(charge); 617 goto out; 618 } 619 620 static inline int mm_alloc_pgd(struct mm_struct *mm) 621 { 622 mm->pgd = pgd_alloc(mm); 623 if (unlikely(!mm->pgd)) 624 return -ENOMEM; 625 return 0; 626 } 627 628 static inline void mm_free_pgd(struct mm_struct *mm) 629 { 630 pgd_free(mm, mm->pgd); 631 } 632 #else 633 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 634 { 635 down_write(&oldmm->mmap_sem); 636 RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); 637 up_write(&oldmm->mmap_sem); 638 return 0; 639 } 640 #define mm_alloc_pgd(mm) (0) 641 #define mm_free_pgd(mm) 642 #endif /* CONFIG_MMU */ 643 644 static void check_mm(struct mm_struct *mm) 645 { 646 int i; 647 648 for (i = 0; i < NR_MM_COUNTERS; i++) { 649 long x = atomic_long_read(&mm->rss_stat.count[i]); 650 651 if (unlikely(x)) 652 printk(KERN_ALERT "BUG: Bad rss-counter state " 653 "mm:%p idx:%d val:%ld\n", mm, i, x); 654 } 655 656 if (mm_pgtables_bytes(mm)) 657 pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n", 658 mm_pgtables_bytes(mm)); 659 660 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 661 VM_BUG_ON_MM(mm->pmd_huge_pte, mm); 662 #endif 663 } 664 665 #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) 666 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) 667 668 /* 669 * Called when the last reference to the mm 670 * is dropped: either by a lazy thread or by 671 * mmput. Free the page directory and the mm. 672 */ 673 void __mmdrop(struct mm_struct *mm) 674 { 675 BUG_ON(mm == &init_mm); 676 WARN_ON_ONCE(mm == current->mm); 677 WARN_ON_ONCE(mm == current->active_mm); 678 mm_free_pgd(mm); 679 destroy_context(mm); 680 mmu_notifier_mm_destroy(mm); 681 check_mm(mm); 682 put_user_ns(mm->user_ns); 683 free_mm(mm); 684 } 685 EXPORT_SYMBOL_GPL(__mmdrop); 686 687 static void mmdrop_async_fn(struct work_struct *work) 688 { 689 struct mm_struct *mm; 690 691 mm = container_of(work, struct mm_struct, async_put_work); 692 __mmdrop(mm); 693 } 694 695 static void mmdrop_async(struct mm_struct *mm) 696 { 697 if (unlikely(atomic_dec_and_test(&mm->mm_count))) { 698 INIT_WORK(&mm->async_put_work, mmdrop_async_fn); 699 schedule_work(&mm->async_put_work); 700 } 701 } 702 703 static inline void free_signal_struct(struct signal_struct *sig) 704 { 705 taskstats_tgid_free(sig); 706 sched_autogroup_exit(sig); 707 /* 708 * __mmdrop is not safe to call from softirq context on x86 due to 709 * pgd_dtor so postpone it to the async context 710 */ 711 if (sig->oom_mm) 712 mmdrop_async(sig->oom_mm); 713 kmem_cache_free(signal_cachep, sig); 714 } 715 716 static inline void put_signal_struct(struct signal_struct *sig) 717 { 718 if (refcount_dec_and_test(&sig->sigcnt)) 719 free_signal_struct(sig); 720 } 721 722 void __put_task_struct(struct task_struct *tsk) 723 { 724 WARN_ON(!tsk->exit_state); 725 WARN_ON(refcount_read(&tsk->usage)); 726 WARN_ON(tsk == current); 727 728 cgroup_free(tsk); 729 task_numa_free(tsk, true); 730 security_task_free(tsk); 731 exit_creds(tsk); 732 delayacct_tsk_free(tsk); 733 put_signal_struct(tsk->signal); 734 735 if (!profile_handoff_task(tsk)) 736 free_task(tsk); 737 } 738 EXPORT_SYMBOL_GPL(__put_task_struct); 739 740 void __init __weak arch_task_cache_init(void) { } 741 742 /* 743 * set_max_threads 744 */ 745 static void set_max_threads(unsigned int max_threads_suggested) 746 { 747 u64 threads; 748 unsigned long nr_pages = totalram_pages(); 749 750 /* 751 * The number of threads shall be limited such that the thread 752 * structures may only consume a small part of the available memory. 753 */ 754 if (fls64(nr_pages) + fls64(PAGE_SIZE) > 64) 755 threads = MAX_THREADS; 756 else 757 threads = div64_u64((u64) nr_pages * (u64) PAGE_SIZE, 758 (u64) THREAD_SIZE * 8UL); 759 760 if (threads > max_threads_suggested) 761 threads = max_threads_suggested; 762 763 max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); 764 } 765 766 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT 767 /* Initialized by the architecture: */ 768 int arch_task_struct_size __read_mostly; 769 #endif 770 771 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 772 static void task_struct_whitelist(unsigned long *offset, unsigned long *size) 773 { 774 /* Fetch thread_struct whitelist for the architecture. */ 775 arch_thread_struct_whitelist(offset, size); 776 777 /* 778 * Handle zero-sized whitelist or empty thread_struct, otherwise 779 * adjust offset to position of thread_struct in task_struct. 780 */ 781 if (unlikely(*size == 0)) 782 *offset = 0; 783 else 784 *offset += offsetof(struct task_struct, thread); 785 } 786 #endif /* CONFIG_ARCH_TASK_STRUCT_ALLOCATOR */ 787 788 void __init fork_init(void) 789 { 790 int i; 791 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 792 #ifndef ARCH_MIN_TASKALIGN 793 #define ARCH_MIN_TASKALIGN 0 794 #endif 795 int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN); 796 unsigned long useroffset, usersize; 797 798 /* create a slab on which task_structs can be allocated */ 799 task_struct_whitelist(&useroffset, &usersize); 800 task_struct_cachep = kmem_cache_create_usercopy("task_struct", 801 arch_task_struct_size, align, 802 SLAB_PANIC|SLAB_ACCOUNT, 803 useroffset, usersize, NULL); 804 #endif 805 806 /* do the arch specific task caches init */ 807 arch_task_cache_init(); 808 809 set_max_threads(MAX_THREADS); 810 811 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; 812 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; 813 init_task.signal->rlim[RLIMIT_SIGPENDING] = 814 init_task.signal->rlim[RLIMIT_NPROC]; 815 816 for (i = 0; i < UCOUNT_COUNTS; i++) { 817 init_user_ns.ucount_max[i] = max_threads/2; 818 } 819 820 #ifdef CONFIG_VMAP_STACK 821 cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache", 822 NULL, free_vm_stack_cache); 823 #endif 824 825 lockdep_init_task(&init_task); 826 uprobes_init(); 827 } 828 829 int __weak arch_dup_task_struct(struct task_struct *dst, 830 struct task_struct *src) 831 { 832 *dst = *src; 833 return 0; 834 } 835 836 void set_task_stack_end_magic(struct task_struct *tsk) 837 { 838 unsigned long *stackend; 839 840 stackend = end_of_stack(tsk); 841 *stackend = STACK_END_MAGIC; /* for overflow detection */ 842 } 843 844 static struct task_struct *dup_task_struct(struct task_struct *orig, int node) 845 { 846 struct task_struct *tsk; 847 unsigned long *stack; 848 struct vm_struct *stack_vm_area __maybe_unused; 849 int err; 850 851 if (node == NUMA_NO_NODE) 852 node = tsk_fork_get_node(orig); 853 tsk = alloc_task_struct_node(node); 854 if (!tsk) 855 return NULL; 856 857 stack = alloc_thread_stack_node(tsk, node); 858 if (!stack) 859 goto free_tsk; 860 861 if (memcg_charge_kernel_stack(tsk)) 862 goto free_stack; 863 864 stack_vm_area = task_stack_vm_area(tsk); 865 866 err = arch_dup_task_struct(tsk, orig); 867 868 /* 869 * arch_dup_task_struct() clobbers the stack-related fields. Make 870 * sure they're properly initialized before using any stack-related 871 * functions again. 872 */ 873 tsk->stack = stack; 874 #ifdef CONFIG_VMAP_STACK 875 tsk->stack_vm_area = stack_vm_area; 876 #endif 877 #ifdef CONFIG_THREAD_INFO_IN_TASK 878 refcount_set(&tsk->stack_refcount, 1); 879 #endif 880 881 if (err) 882 goto free_stack; 883 884 #ifdef CONFIG_SECCOMP 885 /* 886 * We must handle setting up seccomp filters once we're under 887 * the sighand lock in case orig has changed between now and 888 * then. Until then, filter must be NULL to avoid messing up 889 * the usage counts on the error path calling free_task. 890 */ 891 tsk->seccomp.filter = NULL; 892 #endif 893 894 setup_thread_stack(tsk, orig); 895 clear_user_return_notifier(tsk); 896 clear_tsk_need_resched(tsk); 897 set_task_stack_end_magic(tsk); 898 899 #ifdef CONFIG_STACKPROTECTOR 900 tsk->stack_canary = get_random_canary(); 901 #endif 902 if (orig->cpus_ptr == &orig->cpus_mask) 903 tsk->cpus_ptr = &tsk->cpus_mask; 904 905 /* One for the user space visible state that goes away when reaped. */ 906 refcount_set(&tsk->rcu_users, 1); 907 /* One for the rcu users, and one for the scheduler */ 908 refcount_set(&tsk->usage, 2); 909 #ifdef CONFIG_BLK_DEV_IO_TRACE 910 tsk->btrace_seq = 0; 911 #endif 912 tsk->splice_pipe = NULL; 913 tsk->task_frag.page = NULL; 914 tsk->wake_q.next = NULL; 915 916 account_kernel_stack(tsk, 1); 917 918 kcov_task_init(tsk); 919 920 #ifdef CONFIG_FAULT_INJECTION 921 tsk->fail_nth = 0; 922 #endif 923 924 #ifdef CONFIG_BLK_CGROUP 925 tsk->throttle_queue = NULL; 926 tsk->use_memdelay = 0; 927 #endif 928 929 #ifdef CONFIG_MEMCG 930 tsk->active_memcg = NULL; 931 #endif 932 return tsk; 933 934 free_stack: 935 free_thread_stack(tsk); 936 free_tsk: 937 free_task_struct(tsk); 938 return NULL; 939 } 940 941 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); 942 943 static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; 944 945 static int __init coredump_filter_setup(char *s) 946 { 947 default_dump_filter = 948 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & 949 MMF_DUMP_FILTER_MASK; 950 return 1; 951 } 952 953 __setup("coredump_filter=", coredump_filter_setup); 954 955 #include <linux/init_task.h> 956 957 static void mm_init_aio(struct mm_struct *mm) 958 { 959 #ifdef CONFIG_AIO 960 spin_lock_init(&mm->ioctx_lock); 961 mm->ioctx_table = NULL; 962 #endif 963 } 964 965 static __always_inline void mm_clear_owner(struct mm_struct *mm, 966 struct task_struct *p) 967 { 968 #ifdef CONFIG_MEMCG 969 if (mm->owner == p) 970 WRITE_ONCE(mm->owner, NULL); 971 #endif 972 } 973 974 static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) 975 { 976 #ifdef CONFIG_MEMCG 977 mm->owner = p; 978 #endif 979 } 980 981 static void mm_init_uprobes_state(struct mm_struct *mm) 982 { 983 #ifdef CONFIG_UPROBES 984 mm->uprobes_state.xol_area = NULL; 985 #endif 986 } 987 988 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, 989 struct user_namespace *user_ns) 990 { 991 mm->mmap = NULL; 992 mm->mm_rb = RB_ROOT; 993 mm->vmacache_seqnum = 0; 994 atomic_set(&mm->mm_users, 1); 995 atomic_set(&mm->mm_count, 1); 996 init_rwsem(&mm->mmap_sem); 997 INIT_LIST_HEAD(&mm->mmlist); 998 mm->core_state = NULL; 999 mm_pgtables_bytes_init(mm); 1000 mm->map_count = 0; 1001 mm->locked_vm = 0; 1002 atomic64_set(&mm->pinned_vm, 0); 1003 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); 1004 spin_lock_init(&mm->page_table_lock); 1005 spin_lock_init(&mm->arg_lock); 1006 mm_init_cpumask(mm); 1007 mm_init_aio(mm); 1008 mm_init_owner(mm, p); 1009 RCU_INIT_POINTER(mm->exe_file, NULL); 1010 mmu_notifier_mm_init(mm); 1011 hmm_mm_init(mm); 1012 init_tlb_flush_pending(mm); 1013 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 1014 mm->pmd_huge_pte = NULL; 1015 #endif 1016 mm_init_uprobes_state(mm); 1017 1018 if (current->mm) { 1019 mm->flags = current->mm->flags & MMF_INIT_MASK; 1020 mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; 1021 } else { 1022 mm->flags = default_dump_filter; 1023 mm->def_flags = 0; 1024 } 1025 1026 if (mm_alloc_pgd(mm)) 1027 goto fail_nopgd; 1028 1029 if (init_new_context(p, mm)) 1030 goto fail_nocontext; 1031 1032 mm->user_ns = get_user_ns(user_ns); 1033 return mm; 1034 1035 fail_nocontext: 1036 mm_free_pgd(mm); 1037 fail_nopgd: 1038 free_mm(mm); 1039 return NULL; 1040 } 1041 1042 /* 1043 * Allocate and initialize an mm_struct. 1044 */ 1045 struct mm_struct *mm_alloc(void) 1046 { 1047 struct mm_struct *mm; 1048 1049 mm = allocate_mm(); 1050 if (!mm) 1051 return NULL; 1052 1053 memset(mm, 0, sizeof(*mm)); 1054 return mm_init(mm, current, current_user_ns()); 1055 } 1056 1057 static inline void __mmput(struct mm_struct *mm) 1058 { 1059 VM_BUG_ON(atomic_read(&mm->mm_users)); 1060 1061 uprobe_clear_state(mm); 1062 exit_aio(mm); 1063 ksm_exit(mm); 1064 khugepaged_exit(mm); /* must run before exit_mmap */ 1065 exit_mmap(mm); 1066 mm_put_huge_zero_page(mm); 1067 set_mm_exe_file(mm, NULL); 1068 if (!list_empty(&mm->mmlist)) { 1069 spin_lock(&mmlist_lock); 1070 list_del(&mm->mmlist); 1071 spin_unlock(&mmlist_lock); 1072 } 1073 if (mm->binfmt) 1074 module_put(mm->binfmt->module); 1075 mmdrop(mm); 1076 } 1077 1078 /* 1079 * Decrement the use count and release all resources for an mm. 1080 */ 1081 void mmput(struct mm_struct *mm) 1082 { 1083 might_sleep(); 1084 1085 if (atomic_dec_and_test(&mm->mm_users)) 1086 __mmput(mm); 1087 } 1088 EXPORT_SYMBOL_GPL(mmput); 1089 1090 #ifdef CONFIG_MMU 1091 static void mmput_async_fn(struct work_struct *work) 1092 { 1093 struct mm_struct *mm = container_of(work, struct mm_struct, 1094 async_put_work); 1095 1096 __mmput(mm); 1097 } 1098 1099 void mmput_async(struct mm_struct *mm) 1100 { 1101 if (atomic_dec_and_test(&mm->mm_users)) { 1102 INIT_WORK(&mm->async_put_work, mmput_async_fn); 1103 schedule_work(&mm->async_put_work); 1104 } 1105 } 1106 #endif 1107 1108 /** 1109 * set_mm_exe_file - change a reference to the mm's executable file 1110 * 1111 * This changes mm's executable file (shown as symlink /proc/[pid]/exe). 1112 * 1113 * Main users are mmput() and sys_execve(). Callers prevent concurrent 1114 * invocations: in mmput() nobody alive left, in execve task is single 1115 * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the 1116 * mm->exe_file, but does so without using set_mm_exe_file() in order 1117 * to do avoid the need for any locks. 1118 */ 1119 void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) 1120 { 1121 struct file *old_exe_file; 1122 1123 /* 1124 * It is safe to dereference the exe_file without RCU as 1125 * this function is only called if nobody else can access 1126 * this mm -- see comment above for justification. 1127 */ 1128 old_exe_file = rcu_dereference_raw(mm->exe_file); 1129 1130 if (new_exe_file) 1131 get_file(new_exe_file); 1132 rcu_assign_pointer(mm->exe_file, new_exe_file); 1133 if (old_exe_file) 1134 fput(old_exe_file); 1135 } 1136 1137 /** 1138 * get_mm_exe_file - acquire a reference to the mm's executable file 1139 * 1140 * Returns %NULL if mm has no associated executable file. 1141 * User must release file via fput(). 1142 */ 1143 struct file *get_mm_exe_file(struct mm_struct *mm) 1144 { 1145 struct file *exe_file; 1146 1147 rcu_read_lock(); 1148 exe_file = rcu_dereference(mm->exe_file); 1149 if (exe_file && !get_file_rcu(exe_file)) 1150 exe_file = NULL; 1151 rcu_read_unlock(); 1152 return exe_file; 1153 } 1154 EXPORT_SYMBOL(get_mm_exe_file); 1155 1156 /** 1157 * get_task_exe_file - acquire a reference to the task's executable file 1158 * 1159 * Returns %NULL if task's mm (if any) has no associated executable file or 1160 * this is a kernel thread with borrowed mm (see the comment above get_task_mm). 1161 * User must release file via fput(). 1162 */ 1163 struct file *get_task_exe_file(struct task_struct *task) 1164 { 1165 struct file *exe_file = NULL; 1166 struct mm_struct *mm; 1167 1168 task_lock(task); 1169 mm = task->mm; 1170 if (mm) { 1171 if (!(task->flags & PF_KTHREAD)) 1172 exe_file = get_mm_exe_file(mm); 1173 } 1174 task_unlock(task); 1175 return exe_file; 1176 } 1177 EXPORT_SYMBOL(get_task_exe_file); 1178 1179 /** 1180 * get_task_mm - acquire a reference to the task's mm 1181 * 1182 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning 1183 * this kernel workthread has transiently adopted a user mm with use_mm, 1184 * to do its AIO) is not set and if so returns a reference to it, after 1185 * bumping up the use count. User must release the mm via mmput() 1186 * after use. Typically used by /proc and ptrace. 1187 */ 1188 struct mm_struct *get_task_mm(struct task_struct *task) 1189 { 1190 struct mm_struct *mm; 1191 1192 task_lock(task); 1193 mm = task->mm; 1194 if (mm) { 1195 if (task->flags & PF_KTHREAD) 1196 mm = NULL; 1197 else 1198 mmget(mm); 1199 } 1200 task_unlock(task); 1201 return mm; 1202 } 1203 EXPORT_SYMBOL_GPL(get_task_mm); 1204 1205 struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) 1206 { 1207 struct mm_struct *mm; 1208 int err; 1209 1210 err = mutex_lock_killable(&task->signal->cred_guard_mutex); 1211 if (err) 1212 return ERR_PTR(err); 1213 1214 mm = get_task_mm(task); 1215 if (mm && mm != current->mm && 1216 !ptrace_may_access(task, mode)) { 1217 mmput(mm); 1218 mm = ERR_PTR(-EACCES); 1219 } 1220 mutex_unlock(&task->signal->cred_guard_mutex); 1221 1222 return mm; 1223 } 1224 1225 static void complete_vfork_done(struct task_struct *tsk) 1226 { 1227 struct completion *vfork; 1228 1229 task_lock(tsk); 1230 vfork = tsk->vfork_done; 1231 if (likely(vfork)) { 1232 tsk->vfork_done = NULL; 1233 complete(vfork); 1234 } 1235 task_unlock(tsk); 1236 } 1237 1238 static int wait_for_vfork_done(struct task_struct *child, 1239 struct completion *vfork) 1240 { 1241 int killed; 1242 1243 freezer_do_not_count(); 1244 cgroup_enter_frozen(); 1245 killed = wait_for_completion_killable(vfork); 1246 cgroup_leave_frozen(false); 1247 freezer_count(); 1248 1249 if (killed) { 1250 task_lock(child); 1251 child->vfork_done = NULL; 1252 task_unlock(child); 1253 } 1254 1255 put_task_struct(child); 1256 return killed; 1257 } 1258 1259 /* Please note the differences between mmput and mm_release. 1260 * mmput is called whenever we stop holding onto a mm_struct, 1261 * error success whatever. 1262 * 1263 * mm_release is called after a mm_struct has been removed 1264 * from the current process. 1265 * 1266 * This difference is important for error handling, when we 1267 * only half set up a mm_struct for a new process and need to restore 1268 * the old one. Because we mmput the new mm_struct before 1269 * restoring the old one. . . 1270 * Eric Biederman 10 January 1998 1271 */ 1272 void mm_release(struct task_struct *tsk, struct mm_struct *mm) 1273 { 1274 /* Get rid of any futexes when releasing the mm */ 1275 #ifdef CONFIG_FUTEX 1276 if (unlikely(tsk->robust_list)) { 1277 exit_robust_list(tsk); 1278 tsk->robust_list = NULL; 1279 } 1280 #ifdef CONFIG_COMPAT 1281 if (unlikely(tsk->compat_robust_list)) { 1282 compat_exit_robust_list(tsk); 1283 tsk->compat_robust_list = NULL; 1284 } 1285 #endif 1286 if (unlikely(!list_empty(&tsk->pi_state_list))) 1287 exit_pi_state_list(tsk); 1288 #endif 1289 1290 uprobe_free_utask(tsk); 1291 1292 /* Get rid of any cached register state */ 1293 deactivate_mm(tsk, mm); 1294 1295 /* 1296 * Signal userspace if we're not exiting with a core dump 1297 * because we want to leave the value intact for debugging 1298 * purposes. 1299 */ 1300 if (tsk->clear_child_tid) { 1301 if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) && 1302 atomic_read(&mm->mm_users) > 1) { 1303 /* 1304 * We don't check the error code - if userspace has 1305 * not set up a proper pointer then tough luck. 1306 */ 1307 put_user(0, tsk->clear_child_tid); 1308 do_futex(tsk->clear_child_tid, FUTEX_WAKE, 1309 1, NULL, NULL, 0, 0); 1310 } 1311 tsk->clear_child_tid = NULL; 1312 } 1313 1314 /* 1315 * All done, finally we can wake up parent and return this mm to him. 1316 * Also kthread_stop() uses this completion for synchronization. 1317 */ 1318 if (tsk->vfork_done) 1319 complete_vfork_done(tsk); 1320 } 1321 1322 /** 1323 * dup_mm() - duplicates an existing mm structure 1324 * @tsk: the task_struct with which the new mm will be associated. 1325 * @oldmm: the mm to duplicate. 1326 * 1327 * Allocates a new mm structure and duplicates the provided @oldmm structure 1328 * content into it. 1329 * 1330 * Return: the duplicated mm or NULL on failure. 1331 */ 1332 static struct mm_struct *dup_mm(struct task_struct *tsk, 1333 struct mm_struct *oldmm) 1334 { 1335 struct mm_struct *mm; 1336 int err; 1337 1338 mm = allocate_mm(); 1339 if (!mm) 1340 goto fail_nomem; 1341 1342 memcpy(mm, oldmm, sizeof(*mm)); 1343 1344 if (!mm_init(mm, tsk, mm->user_ns)) 1345 goto fail_nomem; 1346 1347 err = dup_mmap(mm, oldmm); 1348 if (err) 1349 goto free_pt; 1350 1351 mm->hiwater_rss = get_mm_rss(mm); 1352 mm->hiwater_vm = mm->total_vm; 1353 1354 if (mm->binfmt && !try_module_get(mm->binfmt->module)) 1355 goto free_pt; 1356 1357 return mm; 1358 1359 free_pt: 1360 /* don't put binfmt in mmput, we haven't got module yet */ 1361 mm->binfmt = NULL; 1362 mm_init_owner(mm, NULL); 1363 mmput(mm); 1364 1365 fail_nomem: 1366 return NULL; 1367 } 1368 1369 static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) 1370 { 1371 struct mm_struct *mm, *oldmm; 1372 int retval; 1373 1374 tsk->min_flt = tsk->maj_flt = 0; 1375 tsk->nvcsw = tsk->nivcsw = 0; 1376 #ifdef CONFIG_DETECT_HUNG_TASK 1377 tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; 1378 tsk->last_switch_time = 0; 1379 #endif 1380 1381 tsk->mm = NULL; 1382 tsk->active_mm = NULL; 1383 1384 /* 1385 * Are we cloning a kernel thread? 1386 * 1387 * We need to steal a active VM for that.. 1388 */ 1389 oldmm = current->mm; 1390 if (!oldmm) 1391 return 0; 1392 1393 /* initialize the new vmacache entries */ 1394 vmacache_flush(tsk); 1395 1396 if (clone_flags & CLONE_VM) { 1397 mmget(oldmm); 1398 mm = oldmm; 1399 goto good_mm; 1400 } 1401 1402 retval = -ENOMEM; 1403 mm = dup_mm(tsk, current->mm); 1404 if (!mm) 1405 goto fail_nomem; 1406 1407 good_mm: 1408 tsk->mm = mm; 1409 tsk->active_mm = mm; 1410 return 0; 1411 1412 fail_nomem: 1413 return retval; 1414 } 1415 1416 static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) 1417 { 1418 struct fs_struct *fs = current->fs; 1419 if (clone_flags & CLONE_FS) { 1420 /* tsk->fs is already what we want */ 1421 spin_lock(&fs->lock); 1422 if (fs->in_exec) { 1423 spin_unlock(&fs->lock); 1424 return -EAGAIN; 1425 } 1426 fs->users++; 1427 spin_unlock(&fs->lock); 1428 return 0; 1429 } 1430 tsk->fs = copy_fs_struct(fs); 1431 if (!tsk->fs) 1432 return -ENOMEM; 1433 return 0; 1434 } 1435 1436 static int copy_files(unsigned long clone_flags, struct task_struct *tsk) 1437 { 1438 struct files_struct *oldf, *newf; 1439 int error = 0; 1440 1441 /* 1442 * A background process may not have any files ... 1443 */ 1444 oldf = current->files; 1445 if (!oldf) 1446 goto out; 1447 1448 if (clone_flags & CLONE_FILES) { 1449 atomic_inc(&oldf->count); 1450 goto out; 1451 } 1452 1453 newf = dup_fd(oldf, &error); 1454 if (!newf) 1455 goto out; 1456 1457 tsk->files = newf; 1458 error = 0; 1459 out: 1460 return error; 1461 } 1462 1463 static int copy_io(unsigned long clone_flags, struct task_struct *tsk) 1464 { 1465 #ifdef CONFIG_BLOCK 1466 struct io_context *ioc = current->io_context; 1467 struct io_context *new_ioc; 1468 1469 if (!ioc) 1470 return 0; 1471 /* 1472 * Share io context with parent, if CLONE_IO is set 1473 */ 1474 if (clone_flags & CLONE_IO) { 1475 ioc_task_link(ioc); 1476 tsk->io_context = ioc; 1477 } else if (ioprio_valid(ioc->ioprio)) { 1478 new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE); 1479 if (unlikely(!new_ioc)) 1480 return -ENOMEM; 1481 1482 new_ioc->ioprio = ioc->ioprio; 1483 put_io_context(new_ioc); 1484 } 1485 #endif 1486 return 0; 1487 } 1488 1489 static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) 1490 { 1491 struct sighand_struct *sig; 1492 1493 if (clone_flags & CLONE_SIGHAND) { 1494 refcount_inc(¤t->sighand->count); 1495 return 0; 1496 } 1497 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); 1498 rcu_assign_pointer(tsk->sighand, sig); 1499 if (!sig) 1500 return -ENOMEM; 1501 1502 refcount_set(&sig->count, 1); 1503 spin_lock_irq(¤t->sighand->siglock); 1504 memcpy(sig->action, current->sighand->action, sizeof(sig->action)); 1505 spin_unlock_irq(¤t->sighand->siglock); 1506 return 0; 1507 } 1508 1509 void __cleanup_sighand(struct sighand_struct *sighand) 1510 { 1511 if (refcount_dec_and_test(&sighand->count)) { 1512 signalfd_cleanup(sighand); 1513 /* 1514 * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it 1515 * without an RCU grace period, see __lock_task_sighand(). 1516 */ 1517 kmem_cache_free(sighand_cachep, sighand); 1518 } 1519 } 1520 1521 #ifdef CONFIG_POSIX_TIMERS 1522 /* 1523 * Initialize POSIX timer handling for a thread group. 1524 */ 1525 static void posix_cpu_timers_init_group(struct signal_struct *sig) 1526 { 1527 unsigned long cpu_limit; 1528 1529 cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); 1530 if (cpu_limit != RLIM_INFINITY) { 1531 sig->cputime_expires.prof_exp = cpu_limit * NSEC_PER_SEC; 1532 sig->cputimer.running = true; 1533 } 1534 1535 /* The timer lists. */ 1536 INIT_LIST_HEAD(&sig->cpu_timers[0]); 1537 INIT_LIST_HEAD(&sig->cpu_timers[1]); 1538 INIT_LIST_HEAD(&sig->cpu_timers[2]); 1539 } 1540 #else 1541 static inline void posix_cpu_timers_init_group(struct signal_struct *sig) { } 1542 #endif 1543 1544 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) 1545 { 1546 struct signal_struct *sig; 1547 1548 if (clone_flags & CLONE_THREAD) 1549 return 0; 1550 1551 sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL); 1552 tsk->signal = sig; 1553 if (!sig) 1554 return -ENOMEM; 1555 1556 sig->nr_threads = 1; 1557 atomic_set(&sig->live, 1); 1558 refcount_set(&sig->sigcnt, 1); 1559 1560 /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */ 1561 sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node); 1562 tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head); 1563 1564 init_waitqueue_head(&sig->wait_chldexit); 1565 sig->curr_target = tsk; 1566 init_sigpending(&sig->shared_pending); 1567 INIT_HLIST_HEAD(&sig->multiprocess); 1568 seqlock_init(&sig->stats_lock); 1569 prev_cputime_init(&sig->prev_cputime); 1570 1571 #ifdef CONFIG_POSIX_TIMERS 1572 INIT_LIST_HEAD(&sig->posix_timers); 1573 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1574 sig->real_timer.function = it_real_fn; 1575 #endif 1576 1577 task_lock(current->group_leader); 1578 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 1579 task_unlock(current->group_leader); 1580 1581 posix_cpu_timers_init_group(sig); 1582 1583 tty_audit_fork(sig); 1584 sched_autogroup_fork(sig); 1585 1586 sig->oom_score_adj = current->signal->oom_score_adj; 1587 sig->oom_score_adj_min = current->signal->oom_score_adj_min; 1588 1589 mutex_init(&sig->cred_guard_mutex); 1590 1591 return 0; 1592 } 1593 1594 static void copy_seccomp(struct task_struct *p) 1595 { 1596 #ifdef CONFIG_SECCOMP 1597 /* 1598 * Must be called with sighand->lock held, which is common to 1599 * all threads in the group. Holding cred_guard_mutex is not 1600 * needed because this new task is not yet running and cannot 1601 * be racing exec. 1602 */ 1603 assert_spin_locked(¤t->sighand->siglock); 1604 1605 /* Ref-count the new filter user, and assign it. */ 1606 get_seccomp_filter(current); 1607 p->seccomp = current->seccomp; 1608 1609 /* 1610 * Explicitly enable no_new_privs here in case it got set 1611 * between the task_struct being duplicated and holding the 1612 * sighand lock. The seccomp state and nnp must be in sync. 1613 */ 1614 if (task_no_new_privs(current)) 1615 task_set_no_new_privs(p); 1616 1617 /* 1618 * If the parent gained a seccomp mode after copying thread 1619 * flags and between before we held the sighand lock, we have 1620 * to manually enable the seccomp thread flag here. 1621 */ 1622 if (p->seccomp.mode != SECCOMP_MODE_DISABLED) 1623 set_tsk_thread_flag(p, TIF_SECCOMP); 1624 #endif 1625 } 1626 1627 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) 1628 { 1629 current->clear_child_tid = tidptr; 1630 1631 return task_pid_vnr(current); 1632 } 1633 1634 static void rt_mutex_init_task(struct task_struct *p) 1635 { 1636 raw_spin_lock_init(&p->pi_lock); 1637 #ifdef CONFIG_RT_MUTEXES 1638 p->pi_waiters = RB_ROOT_CACHED; 1639 p->pi_top_task = NULL; 1640 p->pi_blocked_on = NULL; 1641 #endif 1642 } 1643 1644 #ifdef CONFIG_POSIX_TIMERS 1645 /* 1646 * Initialize POSIX timer handling for a single task. 1647 */ 1648 static void posix_cpu_timers_init(struct task_struct *tsk) 1649 { 1650 tsk->cputime_expires.prof_exp = 0; 1651 tsk->cputime_expires.virt_exp = 0; 1652 tsk->cputime_expires.sched_exp = 0; 1653 INIT_LIST_HEAD(&tsk->cpu_timers[0]); 1654 INIT_LIST_HEAD(&tsk->cpu_timers[1]); 1655 INIT_LIST_HEAD(&tsk->cpu_timers[2]); 1656 } 1657 #else 1658 static inline void posix_cpu_timers_init(struct task_struct *tsk) { } 1659 #endif 1660 1661 static inline void init_task_pid_links(struct task_struct *task) 1662 { 1663 enum pid_type type; 1664 1665 for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { 1666 INIT_HLIST_NODE(&task->pid_links[type]); 1667 } 1668 } 1669 1670 static inline void 1671 init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) 1672 { 1673 if (type == PIDTYPE_PID) 1674 task->thread_pid = pid; 1675 else 1676 task->signal->pids[type] = pid; 1677 } 1678 1679 static inline void rcu_copy_process(struct task_struct *p) 1680 { 1681 #ifdef CONFIG_PREEMPT_RCU 1682 p->rcu_read_lock_nesting = 0; 1683 p->rcu_read_unlock_special.s = 0; 1684 p->rcu_blocked_node = NULL; 1685 INIT_LIST_HEAD(&p->rcu_node_entry); 1686 #endif /* #ifdef CONFIG_PREEMPT_RCU */ 1687 #ifdef CONFIG_TASKS_RCU 1688 p->rcu_tasks_holdout = false; 1689 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); 1690 p->rcu_tasks_idle_cpu = -1; 1691 #endif /* #ifdef CONFIG_TASKS_RCU */ 1692 } 1693 1694 struct pid *pidfd_pid(const struct file *file) 1695 { 1696 if (file->f_op == &pidfd_fops) 1697 return file->private_data; 1698 1699 return ERR_PTR(-EBADF); 1700 } 1701 1702 static int pidfd_release(struct inode *inode, struct file *file) 1703 { 1704 struct pid *pid = file->private_data; 1705 1706 file->private_data = NULL; 1707 put_pid(pid); 1708 return 0; 1709 } 1710 1711 #ifdef CONFIG_PROC_FS 1712 static void pidfd_show_fdinfo(struct seq_file *m, struct file *f) 1713 { 1714 struct pid_namespace *ns = proc_pid_ns(file_inode(m->file)); 1715 struct pid *pid = f->private_data; 1716 1717 seq_put_decimal_ull(m, "Pid:\t", pid_nr_ns(pid, ns)); 1718 seq_putc(m, '\n'); 1719 } 1720 #endif 1721 1722 /* 1723 * Poll support for process exit notification. 1724 */ 1725 static unsigned int pidfd_poll(struct file *file, struct poll_table_struct *pts) 1726 { 1727 struct task_struct *task; 1728 struct pid *pid = file->private_data; 1729 int poll_flags = 0; 1730 1731 poll_wait(file, &pid->wait_pidfd, pts); 1732 1733 rcu_read_lock(); 1734 task = pid_task(pid, PIDTYPE_PID); 1735 /* 1736 * Inform pollers only when the whole thread group exits. 1737 * If the thread group leader exits before all other threads in the 1738 * group, then poll(2) should block, similar to the wait(2) family. 1739 */ 1740 if (!task || (task->exit_state && thread_group_empty(task))) 1741 poll_flags = POLLIN | POLLRDNORM; 1742 rcu_read_unlock(); 1743 1744 return poll_flags; 1745 } 1746 1747 const struct file_operations pidfd_fops = { 1748 .release = pidfd_release, 1749 .poll = pidfd_poll, 1750 #ifdef CONFIG_PROC_FS 1751 .show_fdinfo = pidfd_show_fdinfo, 1752 #endif 1753 }; 1754 1755 static void __delayed_free_task(struct rcu_head *rhp) 1756 { 1757 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 1758 1759 free_task(tsk); 1760 } 1761 1762 static __always_inline void delayed_free_task(struct task_struct *tsk) 1763 { 1764 if (IS_ENABLED(CONFIG_MEMCG)) 1765 call_rcu(&tsk->rcu, __delayed_free_task); 1766 else 1767 free_task(tsk); 1768 } 1769 1770 /* 1771 * This creates a new process as a copy of the old one, 1772 * but does not actually start it yet. 1773 * 1774 * It copies the registers, and all the appropriate 1775 * parts of the process environment (as per the clone 1776 * flags). The actual kick-off is left to the caller. 1777 */ 1778 static __latent_entropy struct task_struct *copy_process( 1779 struct pid *pid, 1780 int trace, 1781 int node, 1782 struct kernel_clone_args *args) 1783 { 1784 int pidfd = -1, retval; 1785 struct task_struct *p; 1786 struct multiprocess_signals delayed; 1787 struct file *pidfile = NULL; 1788 u64 clone_flags = args->flags; 1789 1790 /* 1791 * Don't allow sharing the root directory with processes in a different 1792 * namespace 1793 */ 1794 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) 1795 return ERR_PTR(-EINVAL); 1796 1797 if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) 1798 return ERR_PTR(-EINVAL); 1799 1800 /* 1801 * Thread groups must share signals as well, and detached threads 1802 * can only be started up within the thread group. 1803 */ 1804 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) 1805 return ERR_PTR(-EINVAL); 1806 1807 /* 1808 * Shared signal handlers imply shared VM. By way of the above, 1809 * thread groups also imply shared VM. Blocking this case allows 1810 * for various simplifications in other code. 1811 */ 1812 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) 1813 return ERR_PTR(-EINVAL); 1814 1815 /* 1816 * Siblings of global init remain as zombies on exit since they are 1817 * not reaped by their parent (swapper). To solve this and to avoid 1818 * multi-rooted process trees, prevent global and container-inits 1819 * from creating siblings. 1820 */ 1821 if ((clone_flags & CLONE_PARENT) && 1822 current->signal->flags & SIGNAL_UNKILLABLE) 1823 return ERR_PTR(-EINVAL); 1824 1825 /* 1826 * If the new process will be in a different pid or user namespace 1827 * do not allow it to share a thread group with the forking task. 1828 */ 1829 if (clone_flags & CLONE_THREAD) { 1830 if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || 1831 (task_active_pid_ns(current) != 1832 current->nsproxy->pid_ns_for_children)) 1833 return ERR_PTR(-EINVAL); 1834 } 1835 1836 if (clone_flags & CLONE_PIDFD) { 1837 /* 1838 * - CLONE_DETACHED is blocked so that we can potentially 1839 * reuse it later for CLONE_PIDFD. 1840 * - CLONE_THREAD is blocked until someone really needs it. 1841 */ 1842 if (clone_flags & (CLONE_DETACHED | CLONE_THREAD)) 1843 return ERR_PTR(-EINVAL); 1844 } 1845 1846 /* 1847 * Force any signals received before this point to be delivered 1848 * before the fork happens. Collect up signals sent to multiple 1849 * processes that happen during the fork and delay them so that 1850 * they appear to happen after the fork. 1851 */ 1852 sigemptyset(&delayed.signal); 1853 INIT_HLIST_NODE(&delayed.node); 1854 1855 spin_lock_irq(¤t->sighand->siglock); 1856 if (!(clone_flags & CLONE_THREAD)) 1857 hlist_add_head(&delayed.node, ¤t->signal->multiprocess); 1858 recalc_sigpending(); 1859 spin_unlock_irq(¤t->sighand->siglock); 1860 retval = -ERESTARTNOINTR; 1861 if (signal_pending(current)) 1862 goto fork_out; 1863 1864 retval = -ENOMEM; 1865 p = dup_task_struct(current, node); 1866 if (!p) 1867 goto fork_out; 1868 1869 /* 1870 * This _must_ happen before we call free_task(), i.e. before we jump 1871 * to any of the bad_fork_* labels. This is to avoid freeing 1872 * p->set_child_tid which is (ab)used as a kthread's data pointer for 1873 * kernel threads (PF_KTHREAD). 1874 */ 1875 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL; 1876 /* 1877 * Clear TID on mm_release()? 1878 */ 1879 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? args->child_tid : NULL; 1880 1881 ftrace_graph_init_task(p); 1882 1883 rt_mutex_init_task(p); 1884 1885 #ifdef CONFIG_PROVE_LOCKING 1886 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); 1887 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 1888 #endif 1889 retval = -EAGAIN; 1890 if (atomic_read(&p->real_cred->user->processes) >= 1891 task_rlimit(p, RLIMIT_NPROC)) { 1892 if (p->real_cred->user != INIT_USER && 1893 !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) 1894 goto bad_fork_free; 1895 } 1896 current->flags &= ~PF_NPROC_EXCEEDED; 1897 1898 retval = copy_creds(p, clone_flags); 1899 if (retval < 0) 1900 goto bad_fork_free; 1901 1902 /* 1903 * If multiple threads are within copy_process(), then this check 1904 * triggers too late. This doesn't hurt, the check is only there 1905 * to stop root fork bombs. 1906 */ 1907 retval = -EAGAIN; 1908 if (nr_threads >= max_threads) 1909 goto bad_fork_cleanup_count; 1910 1911 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ 1912 p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE); 1913 p->flags |= PF_FORKNOEXEC; 1914 INIT_LIST_HEAD(&p->children); 1915 INIT_LIST_HEAD(&p->sibling); 1916 rcu_copy_process(p); 1917 p->vfork_done = NULL; 1918 spin_lock_init(&p->alloc_lock); 1919 1920 init_sigpending(&p->pending); 1921 1922 p->utime = p->stime = p->gtime = 0; 1923 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 1924 p->utimescaled = p->stimescaled = 0; 1925 #endif 1926 prev_cputime_init(&p->prev_cputime); 1927 1928 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1929 seqcount_init(&p->vtime.seqcount); 1930 p->vtime.starttime = 0; 1931 p->vtime.state = VTIME_INACTIVE; 1932 #endif 1933 1934 #if defined(SPLIT_RSS_COUNTING) 1935 memset(&p->rss_stat, 0, sizeof(p->rss_stat)); 1936 #endif 1937 1938 p->default_timer_slack_ns = current->timer_slack_ns; 1939 1940 #ifdef CONFIG_PSI 1941 p->psi_flags = 0; 1942 #endif 1943 1944 task_io_accounting_init(&p->ioac); 1945 acct_clear_integrals(p); 1946 1947 posix_cpu_timers_init(p); 1948 1949 p->io_context = NULL; 1950 audit_set_context(p, NULL); 1951 cgroup_fork(p); 1952 #ifdef CONFIG_NUMA 1953 p->mempolicy = mpol_dup(p->mempolicy); 1954 if (IS_ERR(p->mempolicy)) { 1955 retval = PTR_ERR(p->mempolicy); 1956 p->mempolicy = NULL; 1957 goto bad_fork_cleanup_threadgroup_lock; 1958 } 1959 #endif 1960 #ifdef CONFIG_CPUSETS 1961 p->cpuset_mem_spread_rotor = NUMA_NO_NODE; 1962 p->cpuset_slab_spread_rotor = NUMA_NO_NODE; 1963 seqcount_init(&p->mems_allowed_seq); 1964 #endif 1965 #ifdef CONFIG_TRACE_IRQFLAGS 1966 p->irq_events = 0; 1967 p->hardirqs_enabled = 0; 1968 p->hardirq_enable_ip = 0; 1969 p->hardirq_enable_event = 0; 1970 p->hardirq_disable_ip = _THIS_IP_; 1971 p->hardirq_disable_event = 0; 1972 p->softirqs_enabled = 1; 1973 p->softirq_enable_ip = _THIS_IP_; 1974 p->softirq_enable_event = 0; 1975 p->softirq_disable_ip = 0; 1976 p->softirq_disable_event = 0; 1977 p->hardirq_context = 0; 1978 p->softirq_context = 0; 1979 #endif 1980 1981 p->pagefault_disabled = 0; 1982 1983 #ifdef CONFIG_LOCKDEP 1984 lockdep_init_task(p); 1985 #endif 1986 1987 #ifdef CONFIG_DEBUG_MUTEXES 1988 p->blocked_on = NULL; /* not blocked yet */ 1989 #endif 1990 #ifdef CONFIG_BCACHE 1991 p->sequential_io = 0; 1992 p->sequential_io_avg = 0; 1993 #endif 1994 1995 /* Perform scheduler related setup. Assign this task to a CPU. */ 1996 retval = sched_fork(clone_flags, p); 1997 if (retval) 1998 goto bad_fork_cleanup_policy; 1999 2000 retval = perf_event_init_task(p); 2001 if (retval) 2002 goto bad_fork_cleanup_policy; 2003 retval = audit_alloc(p); 2004 if (retval) 2005 goto bad_fork_cleanup_perf; 2006 /* copy all the process information */ 2007 shm_init_task(p); 2008 retval = security_task_alloc(p, clone_flags); 2009 if (retval) 2010 goto bad_fork_cleanup_audit; 2011 retval = copy_semundo(clone_flags, p); 2012 if (retval) 2013 goto bad_fork_cleanup_security; 2014 retval = copy_files(clone_flags, p); 2015 if (retval) 2016 goto bad_fork_cleanup_semundo; 2017 retval = copy_fs(clone_flags, p); 2018 if (retval) 2019 goto bad_fork_cleanup_files; 2020 retval = copy_sighand(clone_flags, p); 2021 if (retval) 2022 goto bad_fork_cleanup_fs; 2023 retval = copy_signal(clone_flags, p); 2024 if (retval) 2025 goto bad_fork_cleanup_sighand; 2026 retval = copy_mm(clone_flags, p); 2027 if (retval) 2028 goto bad_fork_cleanup_signal; 2029 retval = copy_namespaces(clone_flags, p); 2030 if (retval) 2031 goto bad_fork_cleanup_mm; 2032 retval = copy_io(clone_flags, p); 2033 if (retval) 2034 goto bad_fork_cleanup_namespaces; 2035 retval = copy_thread_tls(clone_flags, args->stack, args->stack_size, p, 2036 args->tls); 2037 if (retval) 2038 goto bad_fork_cleanup_io; 2039 2040 stackleak_task_init(p); 2041 2042 if (pid != &init_struct_pid) { 2043 pid = alloc_pid(p->nsproxy->pid_ns_for_children); 2044 if (IS_ERR(pid)) { 2045 retval = PTR_ERR(pid); 2046 goto bad_fork_cleanup_thread; 2047 } 2048 } 2049 2050 /* 2051 * This has to happen after we've potentially unshared the file 2052 * descriptor table (so that the pidfd doesn't leak into the child 2053 * if the fd table isn't shared). 2054 */ 2055 if (clone_flags & CLONE_PIDFD) { 2056 retval = get_unused_fd_flags(O_RDWR | O_CLOEXEC); 2057 if (retval < 0) 2058 goto bad_fork_free_pid; 2059 2060 pidfd = retval; 2061 2062 pidfile = anon_inode_getfile("[pidfd]", &pidfd_fops, pid, 2063 O_RDWR | O_CLOEXEC); 2064 if (IS_ERR(pidfile)) { 2065 put_unused_fd(pidfd); 2066 retval = PTR_ERR(pidfile); 2067 goto bad_fork_free_pid; 2068 } 2069 get_pid(pid); /* held by pidfile now */ 2070 2071 retval = put_user(pidfd, args->pidfd); 2072 if (retval) 2073 goto bad_fork_put_pidfd; 2074 } 2075 2076 #ifdef CONFIG_BLOCK 2077 p->plug = NULL; 2078 #endif 2079 #ifdef CONFIG_FUTEX 2080 p->robust_list = NULL; 2081 #ifdef CONFIG_COMPAT 2082 p->compat_robust_list = NULL; 2083 #endif 2084 INIT_LIST_HEAD(&p->pi_state_list); 2085 p->pi_state_cache = NULL; 2086 #endif 2087 /* 2088 * sigaltstack should be cleared when sharing the same VM 2089 */ 2090 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) 2091 sas_ss_reset(p); 2092 2093 /* 2094 * Syscall tracing and stepping should be turned off in the 2095 * child regardless of CLONE_PTRACE. 2096 */ 2097 user_disable_single_step(p); 2098 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); 2099 #ifdef TIF_SYSCALL_EMU 2100 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); 2101 #endif 2102 clear_tsk_latency_tracing(p); 2103 2104 /* ok, now we should be set up.. */ 2105 p->pid = pid_nr(pid); 2106 if (clone_flags & CLONE_THREAD) { 2107 p->exit_signal = -1; 2108 p->group_leader = current->group_leader; 2109 p->tgid = current->tgid; 2110 } else { 2111 if (clone_flags & CLONE_PARENT) 2112 p->exit_signal = current->group_leader->exit_signal; 2113 else 2114 p->exit_signal = args->exit_signal; 2115 p->group_leader = p; 2116 p->tgid = p->pid; 2117 } 2118 2119 p->nr_dirtied = 0; 2120 p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); 2121 p->dirty_paused_when = 0; 2122 2123 p->pdeath_signal = 0; 2124 INIT_LIST_HEAD(&p->thread_group); 2125 p->task_works = NULL; 2126 2127 cgroup_threadgroup_change_begin(current); 2128 /* 2129 * Ensure that the cgroup subsystem policies allow the new process to be 2130 * forked. It should be noted the the new process's css_set can be changed 2131 * between here and cgroup_post_fork() if an organisation operation is in 2132 * progress. 2133 */ 2134 retval = cgroup_can_fork(p); 2135 if (retval) 2136 goto bad_fork_cgroup_threadgroup_change_end; 2137 2138 /* 2139 * From this point on we must avoid any synchronous user-space 2140 * communication until we take the tasklist-lock. In particular, we do 2141 * not want user-space to be able to predict the process start-time by 2142 * stalling fork(2) after we recorded the start_time but before it is 2143 * visible to the system. 2144 */ 2145 2146 p->start_time = ktime_get_ns(); 2147 p->real_start_time = ktime_get_boottime_ns(); 2148 2149 /* 2150 * Make it visible to the rest of the system, but dont wake it up yet. 2151 * Need tasklist lock for parent etc handling! 2152 */ 2153 write_lock_irq(&tasklist_lock); 2154 2155 /* CLONE_PARENT re-uses the old parent */ 2156 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { 2157 p->real_parent = current->real_parent; 2158 p->parent_exec_id = current->parent_exec_id; 2159 } else { 2160 p->real_parent = current; 2161 p->parent_exec_id = current->self_exec_id; 2162 } 2163 2164 klp_copy_process(p); 2165 2166 spin_lock(¤t->sighand->siglock); 2167 2168 /* 2169 * Copy seccomp details explicitly here, in case they were changed 2170 * before holding sighand lock. 2171 */ 2172 copy_seccomp(p); 2173 2174 rseq_fork(p, clone_flags); 2175 2176 /* Don't start children in a dying pid namespace */ 2177 if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) { 2178 retval = -ENOMEM; 2179 goto bad_fork_cancel_cgroup; 2180 } 2181 2182 /* Let kill terminate clone/fork in the middle */ 2183 if (fatal_signal_pending(current)) { 2184 retval = -EINTR; 2185 goto bad_fork_cancel_cgroup; 2186 } 2187 2188 /* past the last point of failure */ 2189 if (pidfile) 2190 fd_install(pidfd, pidfile); 2191 2192 init_task_pid_links(p); 2193 if (likely(p->pid)) { 2194 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); 2195 2196 init_task_pid(p, PIDTYPE_PID, pid); 2197 if (thread_group_leader(p)) { 2198 init_task_pid(p, PIDTYPE_TGID, pid); 2199 init_task_pid(p, PIDTYPE_PGID, task_pgrp(current)); 2200 init_task_pid(p, PIDTYPE_SID, task_session(current)); 2201 2202 if (is_child_reaper(pid)) { 2203 ns_of_pid(pid)->child_reaper = p; 2204 p->signal->flags |= SIGNAL_UNKILLABLE; 2205 } 2206 p->signal->shared_pending.signal = delayed.signal; 2207 p->signal->tty = tty_kref_get(current->signal->tty); 2208 /* 2209 * Inherit has_child_subreaper flag under the same 2210 * tasklist_lock with adding child to the process tree 2211 * for propagate_has_child_subreaper optimization. 2212 */ 2213 p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper || 2214 p->real_parent->signal->is_child_subreaper; 2215 list_add_tail(&p->sibling, &p->real_parent->children); 2216 list_add_tail_rcu(&p->tasks, &init_task.tasks); 2217 attach_pid(p, PIDTYPE_TGID); 2218 attach_pid(p, PIDTYPE_PGID); 2219 attach_pid(p, PIDTYPE_SID); 2220 __this_cpu_inc(process_counts); 2221 } else { 2222 current->signal->nr_threads++; 2223 atomic_inc(¤t->signal->live); 2224 refcount_inc(¤t->signal->sigcnt); 2225 task_join_group_stop(p); 2226 list_add_tail_rcu(&p->thread_group, 2227 &p->group_leader->thread_group); 2228 list_add_tail_rcu(&p->thread_node, 2229 &p->signal->thread_head); 2230 } 2231 attach_pid(p, PIDTYPE_PID); 2232 nr_threads++; 2233 } 2234 total_forks++; 2235 hlist_del_init(&delayed.node); 2236 spin_unlock(¤t->sighand->siglock); 2237 syscall_tracepoint_update(p); 2238 write_unlock_irq(&tasklist_lock); 2239 2240 proc_fork_connector(p); 2241 cgroup_post_fork(p); 2242 cgroup_threadgroup_change_end(current); 2243 perf_event_fork(p); 2244 2245 trace_task_newtask(p, clone_flags); 2246 uprobe_copy_process(p, clone_flags); 2247 2248 return p; 2249 2250 bad_fork_cancel_cgroup: 2251 spin_unlock(¤t->sighand->siglock); 2252 write_unlock_irq(&tasklist_lock); 2253 cgroup_cancel_fork(p); 2254 bad_fork_cgroup_threadgroup_change_end: 2255 cgroup_threadgroup_change_end(current); 2256 bad_fork_put_pidfd: 2257 if (clone_flags & CLONE_PIDFD) { 2258 fput(pidfile); 2259 put_unused_fd(pidfd); 2260 } 2261 bad_fork_free_pid: 2262 if (pid != &init_struct_pid) 2263 free_pid(pid); 2264 bad_fork_cleanup_thread: 2265 exit_thread(p); 2266 bad_fork_cleanup_io: 2267 if (p->io_context) 2268 exit_io_context(p); 2269 bad_fork_cleanup_namespaces: 2270 exit_task_namespaces(p); 2271 bad_fork_cleanup_mm: 2272 if (p->mm) { 2273 mm_clear_owner(p->mm, p); 2274 mmput(p->mm); 2275 } 2276 bad_fork_cleanup_signal: 2277 if (!(clone_flags & CLONE_THREAD)) 2278 free_signal_struct(p->signal); 2279 bad_fork_cleanup_sighand: 2280 __cleanup_sighand(p->sighand); 2281 bad_fork_cleanup_fs: 2282 exit_fs(p); /* blocking */ 2283 bad_fork_cleanup_files: 2284 exit_files(p); /* blocking */ 2285 bad_fork_cleanup_semundo: 2286 exit_sem(p); 2287 bad_fork_cleanup_security: 2288 security_task_free(p); 2289 bad_fork_cleanup_audit: 2290 audit_free(p); 2291 bad_fork_cleanup_perf: 2292 perf_event_free_task(p); 2293 bad_fork_cleanup_policy: 2294 lockdep_free_task(p); 2295 #ifdef CONFIG_NUMA 2296 mpol_put(p->mempolicy); 2297 bad_fork_cleanup_threadgroup_lock: 2298 #endif 2299 delayacct_tsk_free(p); 2300 bad_fork_cleanup_count: 2301 atomic_dec(&p->cred->user->processes); 2302 exit_creds(p); 2303 bad_fork_free: 2304 p->state = TASK_DEAD; 2305 put_task_stack(p); 2306 delayed_free_task(p); 2307 fork_out: 2308 spin_lock_irq(¤t->sighand->siglock); 2309 hlist_del_init(&delayed.node); 2310 spin_unlock_irq(¤t->sighand->siglock); 2311 return ERR_PTR(retval); 2312 } 2313 2314 static inline void init_idle_pids(struct task_struct *idle) 2315 { 2316 enum pid_type type; 2317 2318 for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { 2319 INIT_HLIST_NODE(&idle->pid_links[type]); /* not really needed */ 2320 init_task_pid(idle, type, &init_struct_pid); 2321 } 2322 } 2323 2324 struct task_struct *fork_idle(int cpu) 2325 { 2326 struct task_struct *task; 2327 struct kernel_clone_args args = { 2328 .flags = CLONE_VM, 2329 }; 2330 2331 task = copy_process(&init_struct_pid, 0, cpu_to_node(cpu), &args); 2332 if (!IS_ERR(task)) { 2333 init_idle_pids(task); 2334 init_idle(task, cpu); 2335 } 2336 2337 return task; 2338 } 2339 2340 struct mm_struct *copy_init_mm(void) 2341 { 2342 return dup_mm(NULL, &init_mm); 2343 } 2344 2345 /* 2346 * Ok, this is the main fork-routine. 2347 * 2348 * It copies the process, and if successful kick-starts 2349 * it and waits for it to finish using the VM if required. 2350 * 2351 * args->exit_signal is expected to be checked for sanity by the caller. 2352 */ 2353 long _do_fork(struct kernel_clone_args *args) 2354 { 2355 u64 clone_flags = args->flags; 2356 struct completion vfork; 2357 struct pid *pid; 2358 struct task_struct *p; 2359 int trace = 0; 2360 long nr; 2361 2362 /* 2363 * Determine whether and which event to report to ptracer. When 2364 * called from kernel_thread or CLONE_UNTRACED is explicitly 2365 * requested, no event is reported; otherwise, report if the event 2366 * for the type of forking is enabled. 2367 */ 2368 if (!(clone_flags & CLONE_UNTRACED)) { 2369 if (clone_flags & CLONE_VFORK) 2370 trace = PTRACE_EVENT_VFORK; 2371 else if (args->exit_signal != SIGCHLD) 2372 trace = PTRACE_EVENT_CLONE; 2373 else 2374 trace = PTRACE_EVENT_FORK; 2375 2376 if (likely(!ptrace_event_enabled(current, trace))) 2377 trace = 0; 2378 } 2379 2380 p = copy_process(NULL, trace, NUMA_NO_NODE, args); 2381 add_latent_entropy(); 2382 2383 if (IS_ERR(p)) 2384 return PTR_ERR(p); 2385 2386 /* 2387 * Do this prior waking up the new thread - the thread pointer 2388 * might get invalid after that point, if the thread exits quickly. 2389 */ 2390 trace_sched_process_fork(current, p); 2391 2392 pid = get_task_pid(p, PIDTYPE_PID); 2393 nr = pid_vnr(pid); 2394 2395 if (clone_flags & CLONE_PARENT_SETTID) 2396 put_user(nr, args->parent_tid); 2397 2398 if (clone_flags & CLONE_VFORK) { 2399 p->vfork_done = &vfork; 2400 init_completion(&vfork); 2401 get_task_struct(p); 2402 } 2403 2404 wake_up_new_task(p); 2405 2406 /* forking complete and child started to run, tell ptracer */ 2407 if (unlikely(trace)) 2408 ptrace_event_pid(trace, pid); 2409 2410 if (clone_flags & CLONE_VFORK) { 2411 if (!wait_for_vfork_done(p, &vfork)) 2412 ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); 2413 } 2414 2415 put_pid(pid); 2416 return nr; 2417 } 2418 2419 bool legacy_clone_args_valid(const struct kernel_clone_args *kargs) 2420 { 2421 /* clone(CLONE_PIDFD) uses parent_tidptr to return a pidfd */ 2422 if ((kargs->flags & CLONE_PIDFD) && 2423 (kargs->flags & CLONE_PARENT_SETTID)) 2424 return false; 2425 2426 return true; 2427 } 2428 2429 #ifndef CONFIG_HAVE_COPY_THREAD_TLS 2430 /* For compatibility with architectures that call do_fork directly rather than 2431 * using the syscall entry points below. */ 2432 long do_fork(unsigned long clone_flags, 2433 unsigned long stack_start, 2434 unsigned long stack_size, 2435 int __user *parent_tidptr, 2436 int __user *child_tidptr) 2437 { 2438 struct kernel_clone_args args = { 2439 .flags = (clone_flags & ~CSIGNAL), 2440 .pidfd = parent_tidptr, 2441 .child_tid = child_tidptr, 2442 .parent_tid = parent_tidptr, 2443 .exit_signal = (clone_flags & CSIGNAL), 2444 .stack = stack_start, 2445 .stack_size = stack_size, 2446 }; 2447 2448 if (!legacy_clone_args_valid(&args)) 2449 return -EINVAL; 2450 2451 return _do_fork(&args); 2452 } 2453 #endif 2454 2455 /* 2456 * Create a kernel thread. 2457 */ 2458 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 2459 { 2460 struct kernel_clone_args args = { 2461 .flags = ((flags | CLONE_VM | CLONE_UNTRACED) & ~CSIGNAL), 2462 .exit_signal = (flags & CSIGNAL), 2463 .stack = (unsigned long)fn, 2464 .stack_size = (unsigned long)arg, 2465 }; 2466 2467 return _do_fork(&args); 2468 } 2469 2470 #ifdef __ARCH_WANT_SYS_FORK 2471 SYSCALL_DEFINE0(fork) 2472 { 2473 #ifdef CONFIG_MMU 2474 struct kernel_clone_args args = { 2475 .exit_signal = SIGCHLD, 2476 }; 2477 2478 return _do_fork(&args); 2479 #else 2480 /* can not support in nommu mode */ 2481 return -EINVAL; 2482 #endif 2483 } 2484 #endif 2485 2486 #ifdef __ARCH_WANT_SYS_VFORK 2487 SYSCALL_DEFINE0(vfork) 2488 { 2489 struct kernel_clone_args args = { 2490 .flags = CLONE_VFORK | CLONE_VM, 2491 .exit_signal = SIGCHLD, 2492 }; 2493 2494 return _do_fork(&args); 2495 } 2496 #endif 2497 2498 #ifdef __ARCH_WANT_SYS_CLONE 2499 #ifdef CONFIG_CLONE_BACKWARDS 2500 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 2501 int __user *, parent_tidptr, 2502 unsigned long, tls, 2503 int __user *, child_tidptr) 2504 #elif defined(CONFIG_CLONE_BACKWARDS2) 2505 SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, 2506 int __user *, parent_tidptr, 2507 int __user *, child_tidptr, 2508 unsigned long, tls) 2509 #elif defined(CONFIG_CLONE_BACKWARDS3) 2510 SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, 2511 int, stack_size, 2512 int __user *, parent_tidptr, 2513 int __user *, child_tidptr, 2514 unsigned long, tls) 2515 #else 2516 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 2517 int __user *, parent_tidptr, 2518 int __user *, child_tidptr, 2519 unsigned long, tls) 2520 #endif 2521 { 2522 struct kernel_clone_args args = { 2523 .flags = (clone_flags & ~CSIGNAL), 2524 .pidfd = parent_tidptr, 2525 .child_tid = child_tidptr, 2526 .parent_tid = parent_tidptr, 2527 .exit_signal = (clone_flags & CSIGNAL), 2528 .stack = newsp, 2529 .tls = tls, 2530 }; 2531 2532 if (!legacy_clone_args_valid(&args)) 2533 return -EINVAL; 2534 2535 return _do_fork(&args); 2536 } 2537 #endif 2538 2539 #ifdef __ARCH_WANT_SYS_CLONE3 2540 noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs, 2541 struct clone_args __user *uargs, 2542 size_t size) 2543 { 2544 struct clone_args args; 2545 2546 if (unlikely(size > PAGE_SIZE)) 2547 return -E2BIG; 2548 2549 if (unlikely(size < sizeof(struct clone_args))) 2550 return -EINVAL; 2551 2552 if (unlikely(!access_ok(uargs, size))) 2553 return -EFAULT; 2554 2555 if (size > sizeof(struct clone_args)) { 2556 unsigned char __user *addr; 2557 unsigned char __user *end; 2558 unsigned char val; 2559 2560 addr = (void __user *)uargs + sizeof(struct clone_args); 2561 end = (void __user *)uargs + size; 2562 2563 for (; addr < end; addr++) { 2564 if (get_user(val, addr)) 2565 return -EFAULT; 2566 if (val) 2567 return -E2BIG; 2568 } 2569 2570 size = sizeof(struct clone_args); 2571 } 2572 2573 if (copy_from_user(&args, uargs, size)) 2574 return -EFAULT; 2575 2576 /* 2577 * Verify that higher 32bits of exit_signal are unset and that 2578 * it is a valid signal 2579 */ 2580 if (unlikely((args.exit_signal & ~((u64)CSIGNAL)) || 2581 !valid_signal(args.exit_signal))) 2582 return -EINVAL; 2583 2584 *kargs = (struct kernel_clone_args){ 2585 .flags = args.flags, 2586 .pidfd = u64_to_user_ptr(args.pidfd), 2587 .child_tid = u64_to_user_ptr(args.child_tid), 2588 .parent_tid = u64_to_user_ptr(args.parent_tid), 2589 .exit_signal = args.exit_signal, 2590 .stack = args.stack, 2591 .stack_size = args.stack_size, 2592 .tls = args.tls, 2593 }; 2594 2595 return 0; 2596 } 2597 2598 static bool clone3_args_valid(const struct kernel_clone_args *kargs) 2599 { 2600 /* 2601 * All lower bits of the flag word are taken. 2602 * Verify that no other unknown flags are passed along. 2603 */ 2604 if (kargs->flags & ~CLONE_LEGACY_FLAGS) 2605 return false; 2606 2607 /* 2608 * - make the CLONE_DETACHED bit reuseable for clone3 2609 * - make the CSIGNAL bits reuseable for clone3 2610 */ 2611 if (kargs->flags & (CLONE_DETACHED | CSIGNAL)) 2612 return false; 2613 2614 if ((kargs->flags & (CLONE_THREAD | CLONE_PARENT)) && 2615 kargs->exit_signal) 2616 return false; 2617 2618 return true; 2619 } 2620 2621 SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size) 2622 { 2623 int err; 2624 2625 struct kernel_clone_args kargs; 2626 2627 err = copy_clone_args_from_user(&kargs, uargs, size); 2628 if (err) 2629 return err; 2630 2631 if (!clone3_args_valid(&kargs)) 2632 return -EINVAL; 2633 2634 return _do_fork(&kargs); 2635 } 2636 #endif 2637 2638 void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data) 2639 { 2640 struct task_struct *leader, *parent, *child; 2641 int res; 2642 2643 read_lock(&tasklist_lock); 2644 leader = top = top->group_leader; 2645 down: 2646 for_each_thread(leader, parent) { 2647 list_for_each_entry(child, &parent->children, sibling) { 2648 res = visitor(child, data); 2649 if (res) { 2650 if (res < 0) 2651 goto out; 2652 leader = child; 2653 goto down; 2654 } 2655 up: 2656 ; 2657 } 2658 } 2659 2660 if (leader != top) { 2661 child = leader; 2662 parent = child->real_parent; 2663 leader = parent->group_leader; 2664 goto up; 2665 } 2666 out: 2667 read_unlock(&tasklist_lock); 2668 } 2669 2670 #ifndef ARCH_MIN_MMSTRUCT_ALIGN 2671 #define ARCH_MIN_MMSTRUCT_ALIGN 0 2672 #endif 2673 2674 static void sighand_ctor(void *data) 2675 { 2676 struct sighand_struct *sighand = data; 2677 2678 spin_lock_init(&sighand->siglock); 2679 init_waitqueue_head(&sighand->signalfd_wqh); 2680 } 2681 2682 void __init proc_caches_init(void) 2683 { 2684 unsigned int mm_size; 2685 2686 sighand_cachep = kmem_cache_create("sighand_cache", 2687 sizeof(struct sighand_struct), 0, 2688 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| 2689 SLAB_ACCOUNT, sighand_ctor); 2690 signal_cachep = kmem_cache_create("signal_cache", 2691 sizeof(struct signal_struct), 0, 2692 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 2693 NULL); 2694 files_cachep = kmem_cache_create("files_cache", 2695 sizeof(struct files_struct), 0, 2696 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 2697 NULL); 2698 fs_cachep = kmem_cache_create("fs_cache", 2699 sizeof(struct fs_struct), 0, 2700 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 2701 NULL); 2702 2703 /* 2704 * The mm_cpumask is located at the end of mm_struct, and is 2705 * dynamically sized based on the maximum CPU number this system 2706 * can have, taking hotplug into account (nr_cpu_ids). 2707 */ 2708 mm_size = sizeof(struct mm_struct) + cpumask_size(); 2709 2710 mm_cachep = kmem_cache_create_usercopy("mm_struct", 2711 mm_size, ARCH_MIN_MMSTRUCT_ALIGN, 2712 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 2713 offsetof(struct mm_struct, saved_auxv), 2714 sizeof_field(struct mm_struct, saved_auxv), 2715 NULL); 2716 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); 2717 mmap_init(); 2718 nsproxy_cache_init(); 2719 } 2720 2721 /* 2722 * Check constraints on flags passed to the unshare system call. 2723 */ 2724 static int check_unshare_flags(unsigned long unshare_flags) 2725 { 2726 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| 2727 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| 2728 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET| 2729 CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP)) 2730 return -EINVAL; 2731 /* 2732 * Not implemented, but pretend it works if there is nothing 2733 * to unshare. Note that unsharing the address space or the 2734 * signal handlers also need to unshare the signal queues (aka 2735 * CLONE_THREAD). 2736 */ 2737 if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { 2738 if (!thread_group_empty(current)) 2739 return -EINVAL; 2740 } 2741 if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) { 2742 if (refcount_read(¤t->sighand->count) > 1) 2743 return -EINVAL; 2744 } 2745 if (unshare_flags & CLONE_VM) { 2746 if (!current_is_single_threaded()) 2747 return -EINVAL; 2748 } 2749 2750 return 0; 2751 } 2752 2753 /* 2754 * Unshare the filesystem structure if it is being shared 2755 */ 2756 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) 2757 { 2758 struct fs_struct *fs = current->fs; 2759 2760 if (!(unshare_flags & CLONE_FS) || !fs) 2761 return 0; 2762 2763 /* don't need lock here; in the worst case we'll do useless copy */ 2764 if (fs->users == 1) 2765 return 0; 2766 2767 *new_fsp = copy_fs_struct(fs); 2768 if (!*new_fsp) 2769 return -ENOMEM; 2770 2771 return 0; 2772 } 2773 2774 /* 2775 * Unshare file descriptor table if it is being shared 2776 */ 2777 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) 2778 { 2779 struct files_struct *fd = current->files; 2780 int error = 0; 2781 2782 if ((unshare_flags & CLONE_FILES) && 2783 (fd && atomic_read(&fd->count) > 1)) { 2784 *new_fdp = dup_fd(fd, &error); 2785 if (!*new_fdp) 2786 return error; 2787 } 2788 2789 return 0; 2790 } 2791 2792 /* 2793 * unshare allows a process to 'unshare' part of the process 2794 * context which was originally shared using clone. copy_* 2795 * functions used by do_fork() cannot be used here directly 2796 * because they modify an inactive task_struct that is being 2797 * constructed. Here we are modifying the current, active, 2798 * task_struct. 2799 */ 2800 int ksys_unshare(unsigned long unshare_flags) 2801 { 2802 struct fs_struct *fs, *new_fs = NULL; 2803 struct files_struct *fd, *new_fd = NULL; 2804 struct cred *new_cred = NULL; 2805 struct nsproxy *new_nsproxy = NULL; 2806 int do_sysvsem = 0; 2807 int err; 2808 2809 /* 2810 * If unsharing a user namespace must also unshare the thread group 2811 * and unshare the filesystem root and working directories. 2812 */ 2813 if (unshare_flags & CLONE_NEWUSER) 2814 unshare_flags |= CLONE_THREAD | CLONE_FS; 2815 /* 2816 * If unsharing vm, must also unshare signal handlers. 2817 */ 2818 if (unshare_flags & CLONE_VM) 2819 unshare_flags |= CLONE_SIGHAND; 2820 /* 2821 * If unsharing a signal handlers, must also unshare the signal queues. 2822 */ 2823 if (unshare_flags & CLONE_SIGHAND) 2824 unshare_flags |= CLONE_THREAD; 2825 /* 2826 * If unsharing namespace, must also unshare filesystem information. 2827 */ 2828 if (unshare_flags & CLONE_NEWNS) 2829 unshare_flags |= CLONE_FS; 2830 2831 err = check_unshare_flags(unshare_flags); 2832 if (err) 2833 goto bad_unshare_out; 2834 /* 2835 * CLONE_NEWIPC must also detach from the undolist: after switching 2836 * to a new ipc namespace, the semaphore arrays from the old 2837 * namespace are unreachable. 2838 */ 2839 if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) 2840 do_sysvsem = 1; 2841 err = unshare_fs(unshare_flags, &new_fs); 2842 if (err) 2843 goto bad_unshare_out; 2844 err = unshare_fd(unshare_flags, &new_fd); 2845 if (err) 2846 goto bad_unshare_cleanup_fs; 2847 err = unshare_userns(unshare_flags, &new_cred); 2848 if (err) 2849 goto bad_unshare_cleanup_fd; 2850 err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, 2851 new_cred, new_fs); 2852 if (err) 2853 goto bad_unshare_cleanup_cred; 2854 2855 if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) { 2856 if (do_sysvsem) { 2857 /* 2858 * CLONE_SYSVSEM is equivalent to sys_exit(). 2859 */ 2860 exit_sem(current); 2861 } 2862 if (unshare_flags & CLONE_NEWIPC) { 2863 /* Orphan segments in old ns (see sem above). */ 2864 exit_shm(current); 2865 shm_init_task(current); 2866 } 2867 2868 if (new_nsproxy) 2869 switch_task_namespaces(current, new_nsproxy); 2870 2871 task_lock(current); 2872 2873 if (new_fs) { 2874 fs = current->fs; 2875 spin_lock(&fs->lock); 2876 current->fs = new_fs; 2877 if (--fs->users) 2878 new_fs = NULL; 2879 else 2880 new_fs = fs; 2881 spin_unlock(&fs->lock); 2882 } 2883 2884 if (new_fd) { 2885 fd = current->files; 2886 current->files = new_fd; 2887 new_fd = fd; 2888 } 2889 2890 task_unlock(current); 2891 2892 if (new_cred) { 2893 /* Install the new user namespace */ 2894 commit_creds(new_cred); 2895 new_cred = NULL; 2896 } 2897 } 2898 2899 perf_event_namespaces(current); 2900 2901 bad_unshare_cleanup_cred: 2902 if (new_cred) 2903 put_cred(new_cred); 2904 bad_unshare_cleanup_fd: 2905 if (new_fd) 2906 put_files_struct(new_fd); 2907 2908 bad_unshare_cleanup_fs: 2909 if (new_fs) 2910 free_fs_struct(new_fs); 2911 2912 bad_unshare_out: 2913 return err; 2914 } 2915 2916 SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) 2917 { 2918 return ksys_unshare(unshare_flags); 2919 } 2920 2921 /* 2922 * Helper to unshare the files of the current task. 2923 * We don't want to expose copy_files internals to 2924 * the exec layer of the kernel. 2925 */ 2926 2927 int unshare_files(struct files_struct **displaced) 2928 { 2929 struct task_struct *task = current; 2930 struct files_struct *copy = NULL; 2931 int error; 2932 2933 error = unshare_fd(CLONE_FILES, ©); 2934 if (error || !copy) { 2935 *displaced = NULL; 2936 return error; 2937 } 2938 *displaced = task->files; 2939 task_lock(task); 2940 task->files = copy; 2941 task_unlock(task); 2942 return 0; 2943 } 2944 2945 int sysctl_max_threads(struct ctl_table *table, int write, 2946 void __user *buffer, size_t *lenp, loff_t *ppos) 2947 { 2948 struct ctl_table t; 2949 int ret; 2950 int threads = max_threads; 2951 int min = MIN_THREADS; 2952 int max = MAX_THREADS; 2953 2954 t = *table; 2955 t.data = &threads; 2956 t.extra1 = &min; 2957 t.extra2 = &max; 2958 2959 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 2960 if (ret || !write) 2961 return ret; 2962 2963 set_max_threads(threads); 2964 2965 return 0; 2966 } 2967