1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/fork.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 /* 9 * 'fork.c' contains the help-routines for the 'fork' system call 10 * (see also entry.S and others). 11 * Fork is rather simple, once you get the hang of it, but the memory 12 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' 13 */ 14 15 #include <linux/anon_inodes.h> 16 #include <linux/slab.h> 17 #include <linux/sched/autogroup.h> 18 #include <linux/sched/mm.h> 19 #include <linux/sched/coredump.h> 20 #include <linux/sched/user.h> 21 #include <linux/sched/numa_balancing.h> 22 #include <linux/sched/stat.h> 23 #include <linux/sched/task.h> 24 #include <linux/sched/task_stack.h> 25 #include <linux/sched/cputime.h> 26 #include <linux/seq_file.h> 27 #include <linux/rtmutex.h> 28 #include <linux/init.h> 29 #include <linux/unistd.h> 30 #include <linux/module.h> 31 #include <linux/vmalloc.h> 32 #include <linux/completion.h> 33 #include <linux/personality.h> 34 #include <linux/mempolicy.h> 35 #include <linux/sem.h> 36 #include <linux/file.h> 37 #include <linux/fdtable.h> 38 #include <linux/iocontext.h> 39 #include <linux/key.h> 40 #include <linux/kmsan.h> 41 #include <linux/binfmts.h> 42 #include <linux/mman.h> 43 #include <linux/mmu_notifier.h> 44 #include <linux/fs.h> 45 #include <linux/mm.h> 46 #include <linux/mm_inline.h> 47 #include <linux/nsproxy.h> 48 #include <linux/capability.h> 49 #include <linux/cpu.h> 50 #include <linux/cgroup.h> 51 #include <linux/security.h> 52 #include <linux/hugetlb.h> 53 #include <linux/seccomp.h> 54 #include <linux/swap.h> 55 #include <linux/syscalls.h> 56 #include <linux/jiffies.h> 57 #include <linux/futex.h> 58 #include <linux/compat.h> 59 #include <linux/kthread.h> 60 #include <linux/task_io_accounting_ops.h> 61 #include <linux/rcupdate.h> 62 #include <linux/ptrace.h> 63 #include <linux/mount.h> 64 #include <linux/audit.h> 65 #include <linux/memcontrol.h> 66 #include <linux/ftrace.h> 67 #include <linux/proc_fs.h> 68 #include <linux/profile.h> 69 #include <linux/rmap.h> 70 #include <linux/ksm.h> 71 #include <linux/acct.h> 72 #include <linux/userfaultfd_k.h> 73 #include <linux/tsacct_kern.h> 74 #include <linux/cn_proc.h> 75 #include <linux/freezer.h> 76 #include <linux/delayacct.h> 77 #include <linux/taskstats_kern.h> 78 #include <linux/tty.h> 79 #include <linux/fs_struct.h> 80 #include <linux/magic.h> 81 #include <linux/perf_event.h> 82 #include <linux/posix-timers.h> 83 #include <linux/user-return-notifier.h> 84 #include <linux/oom.h> 85 #include <linux/khugepaged.h> 86 #include <linux/signalfd.h> 87 #include <linux/uprobes.h> 88 #include <linux/aio.h> 89 #include <linux/compiler.h> 90 #include <linux/sysctl.h> 91 #include <linux/kcov.h> 92 #include <linux/livepatch.h> 93 #include <linux/thread_info.h> 94 #include <linux/stackleak.h> 95 #include <linux/kasan.h> 96 #include <linux/scs.h> 97 #include <linux/io_uring.h> 98 #include <linux/bpf.h> 99 #include <linux/stackprotector.h> 100 101 #include <asm/pgalloc.h> 102 #include <linux/uaccess.h> 103 #include <asm/mmu_context.h> 104 #include <asm/cacheflush.h> 105 #include <asm/tlbflush.h> 106 107 #include <trace/events/sched.h> 108 109 #define CREATE_TRACE_POINTS 110 #include <trace/events/task.h> 111 112 /* 113 * Minimum number of threads to boot the kernel 114 */ 115 #define MIN_THREADS 20 116 117 /* 118 * Maximum number of threads 119 */ 120 #define MAX_THREADS FUTEX_TID_MASK 121 122 /* 123 * Protected counters by write_lock_irq(&tasklist_lock) 124 */ 125 unsigned long total_forks; /* Handle normal Linux uptimes. */ 126 int nr_threads; /* The idle threads do not count.. */ 127 128 static int max_threads; /* tunable limit on nr_threads */ 129 130 #define NAMED_ARRAY_INDEX(x) [x] = __stringify(x) 131 132 static const char * const resident_page_types[] = { 133 NAMED_ARRAY_INDEX(MM_FILEPAGES), 134 NAMED_ARRAY_INDEX(MM_ANONPAGES), 135 NAMED_ARRAY_INDEX(MM_SWAPENTS), 136 NAMED_ARRAY_INDEX(MM_SHMEMPAGES), 137 }; 138 139 DEFINE_PER_CPU(unsigned long, process_counts) = 0; 140 141 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 142 143 #ifdef CONFIG_PROVE_RCU 144 int lockdep_tasklist_lock_is_held(void) 145 { 146 return lockdep_is_held(&tasklist_lock); 147 } 148 EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); 149 #endif /* #ifdef CONFIG_PROVE_RCU */ 150 151 int nr_processes(void) 152 { 153 int cpu; 154 int total = 0; 155 156 for_each_possible_cpu(cpu) 157 total += per_cpu(process_counts, cpu); 158 159 return total; 160 } 161 162 void __weak arch_release_task_struct(struct task_struct *tsk) 163 { 164 } 165 166 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 167 static struct kmem_cache *task_struct_cachep; 168 169 static inline struct task_struct *alloc_task_struct_node(int node) 170 { 171 return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); 172 } 173 174 static inline void free_task_struct(struct task_struct *tsk) 175 { 176 kmem_cache_free(task_struct_cachep, tsk); 177 } 178 #endif 179 180 #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR 181 182 /* 183 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a 184 * kmemcache based allocator. 185 */ 186 # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) 187 188 # ifdef CONFIG_VMAP_STACK 189 /* 190 * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB 191 * flush. Try to minimize the number of calls by caching stacks. 192 */ 193 #define NR_CACHED_STACKS 2 194 static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]); 195 196 struct vm_stack { 197 struct rcu_head rcu; 198 struct vm_struct *stack_vm_area; 199 }; 200 201 static bool try_release_thread_stack_to_cache(struct vm_struct *vm) 202 { 203 unsigned int i; 204 205 for (i = 0; i < NR_CACHED_STACKS; i++) { 206 if (this_cpu_cmpxchg(cached_stacks[i], NULL, vm) != NULL) 207 continue; 208 return true; 209 } 210 return false; 211 } 212 213 static void thread_stack_free_rcu(struct rcu_head *rh) 214 { 215 struct vm_stack *vm_stack = container_of(rh, struct vm_stack, rcu); 216 217 if (try_release_thread_stack_to_cache(vm_stack->stack_vm_area)) 218 return; 219 220 vfree(vm_stack); 221 } 222 223 static void thread_stack_delayed_free(struct task_struct *tsk) 224 { 225 struct vm_stack *vm_stack = tsk->stack; 226 227 vm_stack->stack_vm_area = tsk->stack_vm_area; 228 call_rcu(&vm_stack->rcu, thread_stack_free_rcu); 229 } 230 231 static int free_vm_stack_cache(unsigned int cpu) 232 { 233 struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu); 234 int i; 235 236 for (i = 0; i < NR_CACHED_STACKS; i++) { 237 struct vm_struct *vm_stack = cached_vm_stacks[i]; 238 239 if (!vm_stack) 240 continue; 241 242 vfree(vm_stack->addr); 243 cached_vm_stacks[i] = NULL; 244 } 245 246 return 0; 247 } 248 249 static int memcg_charge_kernel_stack(struct vm_struct *vm) 250 { 251 int i; 252 int ret; 253 254 BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0); 255 BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE); 256 257 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { 258 ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL, 0); 259 if (ret) 260 goto err; 261 } 262 return 0; 263 err: 264 /* 265 * If memcg_kmem_charge_page() fails, page's memory cgroup pointer is 266 * NULL, and memcg_kmem_uncharge_page() in free_thread_stack() will 267 * ignore this page. 268 */ 269 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) 270 memcg_kmem_uncharge_page(vm->pages[i], 0); 271 return ret; 272 } 273 274 static int alloc_thread_stack_node(struct task_struct *tsk, int node) 275 { 276 struct vm_struct *vm; 277 void *stack; 278 int i; 279 280 for (i = 0; i < NR_CACHED_STACKS; i++) { 281 struct vm_struct *s; 282 283 s = this_cpu_xchg(cached_stacks[i], NULL); 284 285 if (!s) 286 continue; 287 288 /* Reset stack metadata. */ 289 kasan_unpoison_range(s->addr, THREAD_SIZE); 290 291 stack = kasan_reset_tag(s->addr); 292 293 /* Clear stale pointers from reused stack. */ 294 memset(stack, 0, THREAD_SIZE); 295 296 if (memcg_charge_kernel_stack(s)) { 297 vfree(s->addr); 298 return -ENOMEM; 299 } 300 301 tsk->stack_vm_area = s; 302 tsk->stack = stack; 303 return 0; 304 } 305 306 /* 307 * Allocated stacks are cached and later reused by new threads, 308 * so memcg accounting is performed manually on assigning/releasing 309 * stacks to tasks. Drop __GFP_ACCOUNT. 310 */ 311 stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN, 312 VMALLOC_START, VMALLOC_END, 313 THREADINFO_GFP & ~__GFP_ACCOUNT, 314 PAGE_KERNEL, 315 0, node, __builtin_return_address(0)); 316 if (!stack) 317 return -ENOMEM; 318 319 vm = find_vm_area(stack); 320 if (memcg_charge_kernel_stack(vm)) { 321 vfree(stack); 322 return -ENOMEM; 323 } 324 /* 325 * We can't call find_vm_area() in interrupt context, and 326 * free_thread_stack() can be called in interrupt context, 327 * so cache the vm_struct. 328 */ 329 tsk->stack_vm_area = vm; 330 stack = kasan_reset_tag(stack); 331 tsk->stack = stack; 332 return 0; 333 } 334 335 static void free_thread_stack(struct task_struct *tsk) 336 { 337 if (!try_release_thread_stack_to_cache(tsk->stack_vm_area)) 338 thread_stack_delayed_free(tsk); 339 340 tsk->stack = NULL; 341 tsk->stack_vm_area = NULL; 342 } 343 344 # else /* !CONFIG_VMAP_STACK */ 345 346 static void thread_stack_free_rcu(struct rcu_head *rh) 347 { 348 __free_pages(virt_to_page(rh), THREAD_SIZE_ORDER); 349 } 350 351 static void thread_stack_delayed_free(struct task_struct *tsk) 352 { 353 struct rcu_head *rh = tsk->stack; 354 355 call_rcu(rh, thread_stack_free_rcu); 356 } 357 358 static int alloc_thread_stack_node(struct task_struct *tsk, int node) 359 { 360 struct page *page = alloc_pages_node(node, THREADINFO_GFP, 361 THREAD_SIZE_ORDER); 362 363 if (likely(page)) { 364 tsk->stack = kasan_reset_tag(page_address(page)); 365 return 0; 366 } 367 return -ENOMEM; 368 } 369 370 static void free_thread_stack(struct task_struct *tsk) 371 { 372 thread_stack_delayed_free(tsk); 373 tsk->stack = NULL; 374 } 375 376 # endif /* CONFIG_VMAP_STACK */ 377 # else /* !(THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)) */ 378 379 static struct kmem_cache *thread_stack_cache; 380 381 static void thread_stack_free_rcu(struct rcu_head *rh) 382 { 383 kmem_cache_free(thread_stack_cache, rh); 384 } 385 386 static void thread_stack_delayed_free(struct task_struct *tsk) 387 { 388 struct rcu_head *rh = tsk->stack; 389 390 call_rcu(rh, thread_stack_free_rcu); 391 } 392 393 static int alloc_thread_stack_node(struct task_struct *tsk, int node) 394 { 395 unsigned long *stack; 396 stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); 397 stack = kasan_reset_tag(stack); 398 tsk->stack = stack; 399 return stack ? 0 : -ENOMEM; 400 } 401 402 static void free_thread_stack(struct task_struct *tsk) 403 { 404 thread_stack_delayed_free(tsk); 405 tsk->stack = NULL; 406 } 407 408 void thread_stack_cache_init(void) 409 { 410 thread_stack_cache = kmem_cache_create_usercopy("thread_stack", 411 THREAD_SIZE, THREAD_SIZE, 0, 0, 412 THREAD_SIZE, NULL); 413 BUG_ON(thread_stack_cache == NULL); 414 } 415 416 # endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */ 417 #else /* CONFIG_ARCH_THREAD_STACK_ALLOCATOR */ 418 419 static int alloc_thread_stack_node(struct task_struct *tsk, int node) 420 { 421 unsigned long *stack; 422 423 stack = arch_alloc_thread_stack_node(tsk, node); 424 tsk->stack = stack; 425 return stack ? 0 : -ENOMEM; 426 } 427 428 static void free_thread_stack(struct task_struct *tsk) 429 { 430 arch_free_thread_stack(tsk); 431 tsk->stack = NULL; 432 } 433 434 #endif /* !CONFIG_ARCH_THREAD_STACK_ALLOCATOR */ 435 436 /* SLAB cache for signal_struct structures (tsk->signal) */ 437 static struct kmem_cache *signal_cachep; 438 439 /* SLAB cache for sighand_struct structures (tsk->sighand) */ 440 struct kmem_cache *sighand_cachep; 441 442 /* SLAB cache for files_struct structures (tsk->files) */ 443 struct kmem_cache *files_cachep; 444 445 /* SLAB cache for fs_struct structures (tsk->fs) */ 446 struct kmem_cache *fs_cachep; 447 448 /* SLAB cache for vm_area_struct structures */ 449 static struct kmem_cache *vm_area_cachep; 450 451 /* SLAB cache for mm_struct structures (tsk->mm) */ 452 static struct kmem_cache *mm_cachep; 453 454 struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) 455 { 456 struct vm_area_struct *vma; 457 458 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 459 if (vma) 460 vma_init(vma, mm); 461 return vma; 462 } 463 464 struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) 465 { 466 struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 467 468 if (new) { 469 ASSERT_EXCLUSIVE_WRITER(orig->vm_flags); 470 ASSERT_EXCLUSIVE_WRITER(orig->vm_file); 471 /* 472 * orig->shared.rb may be modified concurrently, but the clone 473 * will be reinitialized. 474 */ 475 *new = data_race(*orig); 476 INIT_LIST_HEAD(&new->anon_vma_chain); 477 dup_anon_vma_name(orig, new); 478 } 479 return new; 480 } 481 482 void vm_area_free(struct vm_area_struct *vma) 483 { 484 free_anon_vma_name(vma); 485 kmem_cache_free(vm_area_cachep, vma); 486 } 487 488 static void account_kernel_stack(struct task_struct *tsk, int account) 489 { 490 if (IS_ENABLED(CONFIG_VMAP_STACK)) { 491 struct vm_struct *vm = task_stack_vm_area(tsk); 492 int i; 493 494 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) 495 mod_lruvec_page_state(vm->pages[i], NR_KERNEL_STACK_KB, 496 account * (PAGE_SIZE / 1024)); 497 } else { 498 void *stack = task_stack_page(tsk); 499 500 /* All stack pages are in the same node. */ 501 mod_lruvec_kmem_state(stack, NR_KERNEL_STACK_KB, 502 account * (THREAD_SIZE / 1024)); 503 } 504 } 505 506 void exit_task_stack_account(struct task_struct *tsk) 507 { 508 account_kernel_stack(tsk, -1); 509 510 if (IS_ENABLED(CONFIG_VMAP_STACK)) { 511 struct vm_struct *vm; 512 int i; 513 514 vm = task_stack_vm_area(tsk); 515 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) 516 memcg_kmem_uncharge_page(vm->pages[i], 0); 517 } 518 } 519 520 static void release_task_stack(struct task_struct *tsk) 521 { 522 if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD)) 523 return; /* Better to leak the stack than to free prematurely */ 524 525 free_thread_stack(tsk); 526 } 527 528 #ifdef CONFIG_THREAD_INFO_IN_TASK 529 void put_task_stack(struct task_struct *tsk) 530 { 531 if (refcount_dec_and_test(&tsk->stack_refcount)) 532 release_task_stack(tsk); 533 } 534 #endif 535 536 void free_task(struct task_struct *tsk) 537 { 538 #ifdef CONFIG_SECCOMP 539 WARN_ON_ONCE(tsk->seccomp.filter); 540 #endif 541 release_user_cpus_ptr(tsk); 542 scs_release(tsk); 543 544 #ifndef CONFIG_THREAD_INFO_IN_TASK 545 /* 546 * The task is finally done with both the stack and thread_info, 547 * so free both. 548 */ 549 release_task_stack(tsk); 550 #else 551 /* 552 * If the task had a separate stack allocation, it should be gone 553 * by now. 554 */ 555 WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0); 556 #endif 557 rt_mutex_debug_task_free(tsk); 558 ftrace_graph_exit_task(tsk); 559 arch_release_task_struct(tsk); 560 if (tsk->flags & PF_KTHREAD) 561 free_kthread_struct(tsk); 562 free_task_struct(tsk); 563 } 564 EXPORT_SYMBOL(free_task); 565 566 static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm) 567 { 568 struct file *exe_file; 569 570 exe_file = get_mm_exe_file(oldmm); 571 RCU_INIT_POINTER(mm->exe_file, exe_file); 572 /* 573 * We depend on the oldmm having properly denied write access to the 574 * exe_file already. 575 */ 576 if (exe_file && deny_write_access(exe_file)) 577 pr_warn_once("deny_write_access() failed in %s\n", __func__); 578 } 579 580 #ifdef CONFIG_MMU 581 static __latent_entropy int dup_mmap(struct mm_struct *mm, 582 struct mm_struct *oldmm) 583 { 584 struct vm_area_struct *mpnt, *tmp; 585 int retval; 586 unsigned long charge = 0; 587 LIST_HEAD(uf); 588 MA_STATE(old_mas, &oldmm->mm_mt, 0, 0); 589 MA_STATE(mas, &mm->mm_mt, 0, 0); 590 591 uprobe_start_dup_mmap(); 592 if (mmap_write_lock_killable(oldmm)) { 593 retval = -EINTR; 594 goto fail_uprobe_end; 595 } 596 flush_cache_dup_mm(oldmm); 597 uprobe_dup_mmap(oldmm, mm); 598 /* 599 * Not linked in yet - no deadlock potential: 600 */ 601 mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING); 602 603 /* No ordering required: file already has been exposed. */ 604 dup_mm_exe_file(mm, oldmm); 605 606 mm->total_vm = oldmm->total_vm; 607 mm->data_vm = oldmm->data_vm; 608 mm->exec_vm = oldmm->exec_vm; 609 mm->stack_vm = oldmm->stack_vm; 610 611 retval = ksm_fork(mm, oldmm); 612 if (retval) 613 goto out; 614 khugepaged_fork(mm, oldmm); 615 616 retval = mas_expected_entries(&mas, oldmm->map_count); 617 if (retval) 618 goto out; 619 620 mas_for_each(&old_mas, mpnt, ULONG_MAX) { 621 struct file *file; 622 623 if (mpnt->vm_flags & VM_DONTCOPY) { 624 vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt)); 625 continue; 626 } 627 charge = 0; 628 /* 629 * Don't duplicate many vmas if we've been oom-killed (for 630 * example) 631 */ 632 if (fatal_signal_pending(current)) { 633 retval = -EINTR; 634 goto loop_out; 635 } 636 if (mpnt->vm_flags & VM_ACCOUNT) { 637 unsigned long len = vma_pages(mpnt); 638 639 if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ 640 goto fail_nomem; 641 charge = len; 642 } 643 tmp = vm_area_dup(mpnt); 644 if (!tmp) 645 goto fail_nomem; 646 retval = vma_dup_policy(mpnt, tmp); 647 if (retval) 648 goto fail_nomem_policy; 649 tmp->vm_mm = mm; 650 retval = dup_userfaultfd(tmp, &uf); 651 if (retval) 652 goto fail_nomem_anon_vma_fork; 653 if (tmp->vm_flags & VM_WIPEONFORK) { 654 /* 655 * VM_WIPEONFORK gets a clean slate in the child. 656 * Don't prepare anon_vma until fault since we don't 657 * copy page for current vma. 658 */ 659 tmp->anon_vma = NULL; 660 } else if (anon_vma_fork(tmp, mpnt)) 661 goto fail_nomem_anon_vma_fork; 662 tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT); 663 file = tmp->vm_file; 664 if (file) { 665 struct address_space *mapping = file->f_mapping; 666 667 get_file(file); 668 i_mmap_lock_write(mapping); 669 if (tmp->vm_flags & VM_SHARED) 670 mapping_allow_writable(mapping); 671 flush_dcache_mmap_lock(mapping); 672 /* insert tmp into the share list, just after mpnt */ 673 vma_interval_tree_insert_after(tmp, mpnt, 674 &mapping->i_mmap); 675 flush_dcache_mmap_unlock(mapping); 676 i_mmap_unlock_write(mapping); 677 } 678 679 /* 680 * Copy/update hugetlb private vma information. 681 */ 682 if (is_vm_hugetlb_page(tmp)) 683 hugetlb_dup_vma_private(tmp); 684 685 /* Link the vma into the MT */ 686 mas.index = tmp->vm_start; 687 mas.last = tmp->vm_end - 1; 688 mas_store(&mas, tmp); 689 if (mas_is_err(&mas)) 690 goto fail_nomem_mas_store; 691 692 mm->map_count++; 693 if (!(tmp->vm_flags & VM_WIPEONFORK)) 694 retval = copy_page_range(tmp, mpnt); 695 696 if (tmp->vm_ops && tmp->vm_ops->open) 697 tmp->vm_ops->open(tmp); 698 699 if (retval) 700 goto loop_out; 701 } 702 /* a new mm has just been created */ 703 retval = arch_dup_mmap(oldmm, mm); 704 loop_out: 705 mas_destroy(&mas); 706 out: 707 mmap_write_unlock(mm); 708 flush_tlb_mm(oldmm); 709 mmap_write_unlock(oldmm); 710 dup_userfaultfd_complete(&uf); 711 fail_uprobe_end: 712 uprobe_end_dup_mmap(); 713 return retval; 714 715 fail_nomem_mas_store: 716 unlink_anon_vmas(tmp); 717 fail_nomem_anon_vma_fork: 718 mpol_put(vma_policy(tmp)); 719 fail_nomem_policy: 720 vm_area_free(tmp); 721 fail_nomem: 722 retval = -ENOMEM; 723 vm_unacct_memory(charge); 724 goto loop_out; 725 } 726 727 static inline int mm_alloc_pgd(struct mm_struct *mm) 728 { 729 mm->pgd = pgd_alloc(mm); 730 if (unlikely(!mm->pgd)) 731 return -ENOMEM; 732 return 0; 733 } 734 735 static inline void mm_free_pgd(struct mm_struct *mm) 736 { 737 pgd_free(mm, mm->pgd); 738 } 739 #else 740 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 741 { 742 mmap_write_lock(oldmm); 743 dup_mm_exe_file(mm, oldmm); 744 mmap_write_unlock(oldmm); 745 return 0; 746 } 747 #define mm_alloc_pgd(mm) (0) 748 #define mm_free_pgd(mm) 749 #endif /* CONFIG_MMU */ 750 751 static void check_mm(struct mm_struct *mm) 752 { 753 int i; 754 755 BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types) != NR_MM_COUNTERS, 756 "Please make sure 'struct resident_page_types[]' is updated as well"); 757 758 for (i = 0; i < NR_MM_COUNTERS; i++) { 759 long x = percpu_counter_sum(&mm->rss_stat[i]); 760 761 if (likely(!x)) 762 continue; 763 764 /* Making sure this is not due to race with CPU offlining. */ 765 x = percpu_counter_sum_all(&mm->rss_stat[i]); 766 if (unlikely(x)) 767 pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n", 768 mm, resident_page_types[i], x); 769 } 770 771 if (mm_pgtables_bytes(mm)) 772 pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n", 773 mm_pgtables_bytes(mm)); 774 775 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 776 VM_BUG_ON_MM(mm->pmd_huge_pte, mm); 777 #endif 778 } 779 780 #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) 781 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) 782 783 /* 784 * Called when the last reference to the mm 785 * is dropped: either by a lazy thread or by 786 * mmput. Free the page directory and the mm. 787 */ 788 void __mmdrop(struct mm_struct *mm) 789 { 790 int i; 791 792 BUG_ON(mm == &init_mm); 793 WARN_ON_ONCE(mm == current->mm); 794 WARN_ON_ONCE(mm == current->active_mm); 795 mm_free_pgd(mm); 796 destroy_context(mm); 797 mmu_notifier_subscriptions_destroy(mm); 798 check_mm(mm); 799 put_user_ns(mm->user_ns); 800 mm_pasid_drop(mm); 801 802 for (i = 0; i < NR_MM_COUNTERS; i++) 803 percpu_counter_destroy(&mm->rss_stat[i]); 804 free_mm(mm); 805 } 806 EXPORT_SYMBOL_GPL(__mmdrop); 807 808 static void mmdrop_async_fn(struct work_struct *work) 809 { 810 struct mm_struct *mm; 811 812 mm = container_of(work, struct mm_struct, async_put_work); 813 __mmdrop(mm); 814 } 815 816 static void mmdrop_async(struct mm_struct *mm) 817 { 818 if (unlikely(atomic_dec_and_test(&mm->mm_count))) { 819 INIT_WORK(&mm->async_put_work, mmdrop_async_fn); 820 schedule_work(&mm->async_put_work); 821 } 822 } 823 824 static inline void free_signal_struct(struct signal_struct *sig) 825 { 826 taskstats_tgid_free(sig); 827 sched_autogroup_exit(sig); 828 /* 829 * __mmdrop is not safe to call from softirq context on x86 due to 830 * pgd_dtor so postpone it to the async context 831 */ 832 if (sig->oom_mm) 833 mmdrop_async(sig->oom_mm); 834 kmem_cache_free(signal_cachep, sig); 835 } 836 837 static inline void put_signal_struct(struct signal_struct *sig) 838 { 839 if (refcount_dec_and_test(&sig->sigcnt)) 840 free_signal_struct(sig); 841 } 842 843 void __put_task_struct(struct task_struct *tsk) 844 { 845 WARN_ON(!tsk->exit_state); 846 WARN_ON(refcount_read(&tsk->usage)); 847 WARN_ON(tsk == current); 848 849 io_uring_free(tsk); 850 cgroup_free(tsk); 851 task_numa_free(tsk, true); 852 security_task_free(tsk); 853 bpf_task_storage_free(tsk); 854 exit_creds(tsk); 855 delayacct_tsk_free(tsk); 856 put_signal_struct(tsk->signal); 857 sched_core_free(tsk); 858 free_task(tsk); 859 } 860 EXPORT_SYMBOL_GPL(__put_task_struct); 861 862 void __init __weak arch_task_cache_init(void) { } 863 864 /* 865 * set_max_threads 866 */ 867 static void set_max_threads(unsigned int max_threads_suggested) 868 { 869 u64 threads; 870 unsigned long nr_pages = totalram_pages(); 871 872 /* 873 * The number of threads shall be limited such that the thread 874 * structures may only consume a small part of the available memory. 875 */ 876 if (fls64(nr_pages) + fls64(PAGE_SIZE) > 64) 877 threads = MAX_THREADS; 878 else 879 threads = div64_u64((u64) nr_pages * (u64) PAGE_SIZE, 880 (u64) THREAD_SIZE * 8UL); 881 882 if (threads > max_threads_suggested) 883 threads = max_threads_suggested; 884 885 max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); 886 } 887 888 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT 889 /* Initialized by the architecture: */ 890 int arch_task_struct_size __read_mostly; 891 #endif 892 893 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 894 static void task_struct_whitelist(unsigned long *offset, unsigned long *size) 895 { 896 /* Fetch thread_struct whitelist for the architecture. */ 897 arch_thread_struct_whitelist(offset, size); 898 899 /* 900 * Handle zero-sized whitelist or empty thread_struct, otherwise 901 * adjust offset to position of thread_struct in task_struct. 902 */ 903 if (unlikely(*size == 0)) 904 *offset = 0; 905 else 906 *offset += offsetof(struct task_struct, thread); 907 } 908 #endif /* CONFIG_ARCH_TASK_STRUCT_ALLOCATOR */ 909 910 void __init fork_init(void) 911 { 912 int i; 913 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 914 #ifndef ARCH_MIN_TASKALIGN 915 #define ARCH_MIN_TASKALIGN 0 916 #endif 917 int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN); 918 unsigned long useroffset, usersize; 919 920 /* create a slab on which task_structs can be allocated */ 921 task_struct_whitelist(&useroffset, &usersize); 922 task_struct_cachep = kmem_cache_create_usercopy("task_struct", 923 arch_task_struct_size, align, 924 SLAB_PANIC|SLAB_ACCOUNT, 925 useroffset, usersize, NULL); 926 #endif 927 928 /* do the arch specific task caches init */ 929 arch_task_cache_init(); 930 931 set_max_threads(MAX_THREADS); 932 933 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; 934 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; 935 init_task.signal->rlim[RLIMIT_SIGPENDING] = 936 init_task.signal->rlim[RLIMIT_NPROC]; 937 938 for (i = 0; i < UCOUNT_COUNTS; i++) 939 init_user_ns.ucount_max[i] = max_threads/2; 940 941 set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, RLIM_INFINITY); 942 set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, RLIM_INFINITY); 943 set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, RLIM_INFINITY); 944 set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, RLIM_INFINITY); 945 946 #ifdef CONFIG_VMAP_STACK 947 cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache", 948 NULL, free_vm_stack_cache); 949 #endif 950 951 scs_init(); 952 953 lockdep_init_task(&init_task); 954 uprobes_init(); 955 } 956 957 int __weak arch_dup_task_struct(struct task_struct *dst, 958 struct task_struct *src) 959 { 960 *dst = *src; 961 return 0; 962 } 963 964 void set_task_stack_end_magic(struct task_struct *tsk) 965 { 966 unsigned long *stackend; 967 968 stackend = end_of_stack(tsk); 969 *stackend = STACK_END_MAGIC; /* for overflow detection */ 970 } 971 972 static struct task_struct *dup_task_struct(struct task_struct *orig, int node) 973 { 974 struct task_struct *tsk; 975 int err; 976 977 if (node == NUMA_NO_NODE) 978 node = tsk_fork_get_node(orig); 979 tsk = alloc_task_struct_node(node); 980 if (!tsk) 981 return NULL; 982 983 err = arch_dup_task_struct(tsk, orig); 984 if (err) 985 goto free_tsk; 986 987 err = alloc_thread_stack_node(tsk, node); 988 if (err) 989 goto free_tsk; 990 991 #ifdef CONFIG_THREAD_INFO_IN_TASK 992 refcount_set(&tsk->stack_refcount, 1); 993 #endif 994 account_kernel_stack(tsk, 1); 995 996 err = scs_prepare(tsk, node); 997 if (err) 998 goto free_stack; 999 1000 #ifdef CONFIG_SECCOMP 1001 /* 1002 * We must handle setting up seccomp filters once we're under 1003 * the sighand lock in case orig has changed between now and 1004 * then. Until then, filter must be NULL to avoid messing up 1005 * the usage counts on the error path calling free_task. 1006 */ 1007 tsk->seccomp.filter = NULL; 1008 #endif 1009 1010 setup_thread_stack(tsk, orig); 1011 clear_user_return_notifier(tsk); 1012 clear_tsk_need_resched(tsk); 1013 set_task_stack_end_magic(tsk); 1014 clear_syscall_work_syscall_user_dispatch(tsk); 1015 1016 #ifdef CONFIG_STACKPROTECTOR 1017 tsk->stack_canary = get_random_canary(); 1018 #endif 1019 if (orig->cpus_ptr == &orig->cpus_mask) 1020 tsk->cpus_ptr = &tsk->cpus_mask; 1021 dup_user_cpus_ptr(tsk, orig, node); 1022 1023 /* 1024 * One for the user space visible state that goes away when reaped. 1025 * One for the scheduler. 1026 */ 1027 refcount_set(&tsk->rcu_users, 2); 1028 /* One for the rcu users */ 1029 refcount_set(&tsk->usage, 1); 1030 #ifdef CONFIG_BLK_DEV_IO_TRACE 1031 tsk->btrace_seq = 0; 1032 #endif 1033 tsk->splice_pipe = NULL; 1034 tsk->task_frag.page = NULL; 1035 tsk->wake_q.next = NULL; 1036 tsk->worker_private = NULL; 1037 1038 kcov_task_init(tsk); 1039 kmsan_task_create(tsk); 1040 kmap_local_fork(tsk); 1041 1042 #ifdef CONFIG_FAULT_INJECTION 1043 tsk->fail_nth = 0; 1044 #endif 1045 1046 #ifdef CONFIG_BLK_CGROUP 1047 tsk->throttle_queue = NULL; 1048 tsk->use_memdelay = 0; 1049 #endif 1050 1051 #ifdef CONFIG_IOMMU_SVA 1052 tsk->pasid_activated = 0; 1053 #endif 1054 1055 #ifdef CONFIG_MEMCG 1056 tsk->active_memcg = NULL; 1057 #endif 1058 1059 #ifdef CONFIG_CPU_SUP_INTEL 1060 tsk->reported_split_lock = 0; 1061 #endif 1062 1063 return tsk; 1064 1065 free_stack: 1066 exit_task_stack_account(tsk); 1067 free_thread_stack(tsk); 1068 free_tsk: 1069 free_task_struct(tsk); 1070 return NULL; 1071 } 1072 1073 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); 1074 1075 static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; 1076 1077 static int __init coredump_filter_setup(char *s) 1078 { 1079 default_dump_filter = 1080 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & 1081 MMF_DUMP_FILTER_MASK; 1082 return 1; 1083 } 1084 1085 __setup("coredump_filter=", coredump_filter_setup); 1086 1087 #include <linux/init_task.h> 1088 1089 static void mm_init_aio(struct mm_struct *mm) 1090 { 1091 #ifdef CONFIG_AIO 1092 spin_lock_init(&mm->ioctx_lock); 1093 mm->ioctx_table = NULL; 1094 #endif 1095 } 1096 1097 static __always_inline void mm_clear_owner(struct mm_struct *mm, 1098 struct task_struct *p) 1099 { 1100 #ifdef CONFIG_MEMCG 1101 if (mm->owner == p) 1102 WRITE_ONCE(mm->owner, NULL); 1103 #endif 1104 } 1105 1106 static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) 1107 { 1108 #ifdef CONFIG_MEMCG 1109 mm->owner = p; 1110 #endif 1111 } 1112 1113 static void mm_init_uprobes_state(struct mm_struct *mm) 1114 { 1115 #ifdef CONFIG_UPROBES 1116 mm->uprobes_state.xol_area = NULL; 1117 #endif 1118 } 1119 1120 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, 1121 struct user_namespace *user_ns) 1122 { 1123 int i; 1124 1125 mt_init_flags(&mm->mm_mt, MM_MT_FLAGS); 1126 mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock); 1127 atomic_set(&mm->mm_users, 1); 1128 atomic_set(&mm->mm_count, 1); 1129 seqcount_init(&mm->write_protect_seq); 1130 mmap_init_lock(mm); 1131 INIT_LIST_HEAD(&mm->mmlist); 1132 mm_pgtables_bytes_init(mm); 1133 mm->map_count = 0; 1134 mm->locked_vm = 0; 1135 atomic64_set(&mm->pinned_vm, 0); 1136 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); 1137 spin_lock_init(&mm->page_table_lock); 1138 spin_lock_init(&mm->arg_lock); 1139 mm_init_cpumask(mm); 1140 mm_init_aio(mm); 1141 mm_init_owner(mm, p); 1142 mm_pasid_init(mm); 1143 RCU_INIT_POINTER(mm->exe_file, NULL); 1144 mmu_notifier_subscriptions_init(mm); 1145 init_tlb_flush_pending(mm); 1146 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 1147 mm->pmd_huge_pte = NULL; 1148 #endif 1149 mm_init_uprobes_state(mm); 1150 hugetlb_count_init(mm); 1151 1152 if (current->mm) { 1153 mm->flags = current->mm->flags & MMF_INIT_MASK; 1154 mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; 1155 } else { 1156 mm->flags = default_dump_filter; 1157 mm->def_flags = 0; 1158 } 1159 1160 if (mm_alloc_pgd(mm)) 1161 goto fail_nopgd; 1162 1163 if (init_new_context(p, mm)) 1164 goto fail_nocontext; 1165 1166 for (i = 0; i < NR_MM_COUNTERS; i++) 1167 if (percpu_counter_init(&mm->rss_stat[i], 0, GFP_KERNEL_ACCOUNT)) 1168 goto fail_pcpu; 1169 1170 mm->user_ns = get_user_ns(user_ns); 1171 lru_gen_init_mm(mm); 1172 return mm; 1173 1174 fail_pcpu: 1175 while (i > 0) 1176 percpu_counter_destroy(&mm->rss_stat[--i]); 1177 fail_nocontext: 1178 mm_free_pgd(mm); 1179 fail_nopgd: 1180 free_mm(mm); 1181 return NULL; 1182 } 1183 1184 /* 1185 * Allocate and initialize an mm_struct. 1186 */ 1187 struct mm_struct *mm_alloc(void) 1188 { 1189 struct mm_struct *mm; 1190 1191 mm = allocate_mm(); 1192 if (!mm) 1193 return NULL; 1194 1195 memset(mm, 0, sizeof(*mm)); 1196 return mm_init(mm, current, current_user_ns()); 1197 } 1198 1199 static inline void __mmput(struct mm_struct *mm) 1200 { 1201 VM_BUG_ON(atomic_read(&mm->mm_users)); 1202 1203 uprobe_clear_state(mm); 1204 exit_aio(mm); 1205 ksm_exit(mm); 1206 khugepaged_exit(mm); /* must run before exit_mmap */ 1207 exit_mmap(mm); 1208 mm_put_huge_zero_page(mm); 1209 set_mm_exe_file(mm, NULL); 1210 if (!list_empty(&mm->mmlist)) { 1211 spin_lock(&mmlist_lock); 1212 list_del(&mm->mmlist); 1213 spin_unlock(&mmlist_lock); 1214 } 1215 if (mm->binfmt) 1216 module_put(mm->binfmt->module); 1217 lru_gen_del_mm(mm); 1218 mmdrop(mm); 1219 } 1220 1221 /* 1222 * Decrement the use count and release all resources for an mm. 1223 */ 1224 void mmput(struct mm_struct *mm) 1225 { 1226 might_sleep(); 1227 1228 if (atomic_dec_and_test(&mm->mm_users)) 1229 __mmput(mm); 1230 } 1231 EXPORT_SYMBOL_GPL(mmput); 1232 1233 #ifdef CONFIG_MMU 1234 static void mmput_async_fn(struct work_struct *work) 1235 { 1236 struct mm_struct *mm = container_of(work, struct mm_struct, 1237 async_put_work); 1238 1239 __mmput(mm); 1240 } 1241 1242 void mmput_async(struct mm_struct *mm) 1243 { 1244 if (atomic_dec_and_test(&mm->mm_users)) { 1245 INIT_WORK(&mm->async_put_work, mmput_async_fn); 1246 schedule_work(&mm->async_put_work); 1247 } 1248 } 1249 EXPORT_SYMBOL_GPL(mmput_async); 1250 #endif 1251 1252 /** 1253 * set_mm_exe_file - change a reference to the mm's executable file 1254 * 1255 * This changes mm's executable file (shown as symlink /proc/[pid]/exe). 1256 * 1257 * Main users are mmput() and sys_execve(). Callers prevent concurrent 1258 * invocations: in mmput() nobody alive left, in execve task is single 1259 * threaded. 1260 * 1261 * Can only fail if new_exe_file != NULL. 1262 */ 1263 int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) 1264 { 1265 struct file *old_exe_file; 1266 1267 /* 1268 * It is safe to dereference the exe_file without RCU as 1269 * this function is only called if nobody else can access 1270 * this mm -- see comment above for justification. 1271 */ 1272 old_exe_file = rcu_dereference_raw(mm->exe_file); 1273 1274 if (new_exe_file) { 1275 /* 1276 * We expect the caller (i.e., sys_execve) to already denied 1277 * write access, so this is unlikely to fail. 1278 */ 1279 if (unlikely(deny_write_access(new_exe_file))) 1280 return -EACCES; 1281 get_file(new_exe_file); 1282 } 1283 rcu_assign_pointer(mm->exe_file, new_exe_file); 1284 if (old_exe_file) { 1285 allow_write_access(old_exe_file); 1286 fput(old_exe_file); 1287 } 1288 return 0; 1289 } 1290 1291 /** 1292 * replace_mm_exe_file - replace a reference to the mm's executable file 1293 * 1294 * This changes mm's executable file (shown as symlink /proc/[pid]/exe), 1295 * dealing with concurrent invocation and without grabbing the mmap lock in 1296 * write mode. 1297 * 1298 * Main user is sys_prctl(PR_SET_MM_MAP/EXE_FILE). 1299 */ 1300 int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) 1301 { 1302 struct vm_area_struct *vma; 1303 struct file *old_exe_file; 1304 int ret = 0; 1305 1306 /* Forbid mm->exe_file change if old file still mapped. */ 1307 old_exe_file = get_mm_exe_file(mm); 1308 if (old_exe_file) { 1309 VMA_ITERATOR(vmi, mm, 0); 1310 mmap_read_lock(mm); 1311 for_each_vma(vmi, vma) { 1312 if (!vma->vm_file) 1313 continue; 1314 if (path_equal(&vma->vm_file->f_path, 1315 &old_exe_file->f_path)) { 1316 ret = -EBUSY; 1317 break; 1318 } 1319 } 1320 mmap_read_unlock(mm); 1321 fput(old_exe_file); 1322 if (ret) 1323 return ret; 1324 } 1325 1326 /* set the new file, lockless */ 1327 ret = deny_write_access(new_exe_file); 1328 if (ret) 1329 return -EACCES; 1330 get_file(new_exe_file); 1331 1332 old_exe_file = xchg(&mm->exe_file, new_exe_file); 1333 if (old_exe_file) { 1334 /* 1335 * Don't race with dup_mmap() getting the file and disallowing 1336 * write access while someone might open the file writable. 1337 */ 1338 mmap_read_lock(mm); 1339 allow_write_access(old_exe_file); 1340 fput(old_exe_file); 1341 mmap_read_unlock(mm); 1342 } 1343 return 0; 1344 } 1345 1346 /** 1347 * get_mm_exe_file - acquire a reference to the mm's executable file 1348 * 1349 * Returns %NULL if mm has no associated executable file. 1350 * User must release file via fput(). 1351 */ 1352 struct file *get_mm_exe_file(struct mm_struct *mm) 1353 { 1354 struct file *exe_file; 1355 1356 rcu_read_lock(); 1357 exe_file = rcu_dereference(mm->exe_file); 1358 if (exe_file && !get_file_rcu(exe_file)) 1359 exe_file = NULL; 1360 rcu_read_unlock(); 1361 return exe_file; 1362 } 1363 1364 /** 1365 * get_task_exe_file - acquire a reference to the task's executable file 1366 * 1367 * Returns %NULL if task's mm (if any) has no associated executable file or 1368 * this is a kernel thread with borrowed mm (see the comment above get_task_mm). 1369 * User must release file via fput(). 1370 */ 1371 struct file *get_task_exe_file(struct task_struct *task) 1372 { 1373 struct file *exe_file = NULL; 1374 struct mm_struct *mm; 1375 1376 task_lock(task); 1377 mm = task->mm; 1378 if (mm) { 1379 if (!(task->flags & PF_KTHREAD)) 1380 exe_file = get_mm_exe_file(mm); 1381 } 1382 task_unlock(task); 1383 return exe_file; 1384 } 1385 1386 /** 1387 * get_task_mm - acquire a reference to the task's mm 1388 * 1389 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning 1390 * this kernel workthread has transiently adopted a user mm with use_mm, 1391 * to do its AIO) is not set and if so returns a reference to it, after 1392 * bumping up the use count. User must release the mm via mmput() 1393 * after use. Typically used by /proc and ptrace. 1394 */ 1395 struct mm_struct *get_task_mm(struct task_struct *task) 1396 { 1397 struct mm_struct *mm; 1398 1399 task_lock(task); 1400 mm = task->mm; 1401 if (mm) { 1402 if (task->flags & PF_KTHREAD) 1403 mm = NULL; 1404 else 1405 mmget(mm); 1406 } 1407 task_unlock(task); 1408 return mm; 1409 } 1410 EXPORT_SYMBOL_GPL(get_task_mm); 1411 1412 struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) 1413 { 1414 struct mm_struct *mm; 1415 int err; 1416 1417 err = down_read_killable(&task->signal->exec_update_lock); 1418 if (err) 1419 return ERR_PTR(err); 1420 1421 mm = get_task_mm(task); 1422 if (mm && mm != current->mm && 1423 !ptrace_may_access(task, mode)) { 1424 mmput(mm); 1425 mm = ERR_PTR(-EACCES); 1426 } 1427 up_read(&task->signal->exec_update_lock); 1428 1429 return mm; 1430 } 1431 1432 static void complete_vfork_done(struct task_struct *tsk) 1433 { 1434 struct completion *vfork; 1435 1436 task_lock(tsk); 1437 vfork = tsk->vfork_done; 1438 if (likely(vfork)) { 1439 tsk->vfork_done = NULL; 1440 complete(vfork); 1441 } 1442 task_unlock(tsk); 1443 } 1444 1445 static int wait_for_vfork_done(struct task_struct *child, 1446 struct completion *vfork) 1447 { 1448 unsigned int state = TASK_UNINTERRUPTIBLE|TASK_KILLABLE|TASK_FREEZABLE; 1449 int killed; 1450 1451 cgroup_enter_frozen(); 1452 killed = wait_for_completion_state(vfork, state); 1453 cgroup_leave_frozen(false); 1454 1455 if (killed) { 1456 task_lock(child); 1457 child->vfork_done = NULL; 1458 task_unlock(child); 1459 } 1460 1461 put_task_struct(child); 1462 return killed; 1463 } 1464 1465 /* Please note the differences between mmput and mm_release. 1466 * mmput is called whenever we stop holding onto a mm_struct, 1467 * error success whatever. 1468 * 1469 * mm_release is called after a mm_struct has been removed 1470 * from the current process. 1471 * 1472 * This difference is important for error handling, when we 1473 * only half set up a mm_struct for a new process and need to restore 1474 * the old one. Because we mmput the new mm_struct before 1475 * restoring the old one. . . 1476 * Eric Biederman 10 January 1998 1477 */ 1478 static void mm_release(struct task_struct *tsk, struct mm_struct *mm) 1479 { 1480 uprobe_free_utask(tsk); 1481 1482 /* Get rid of any cached register state */ 1483 deactivate_mm(tsk, mm); 1484 1485 /* 1486 * Signal userspace if we're not exiting with a core dump 1487 * because we want to leave the value intact for debugging 1488 * purposes. 1489 */ 1490 if (tsk->clear_child_tid) { 1491 if (atomic_read(&mm->mm_users) > 1) { 1492 /* 1493 * We don't check the error code - if userspace has 1494 * not set up a proper pointer then tough luck. 1495 */ 1496 put_user(0, tsk->clear_child_tid); 1497 do_futex(tsk->clear_child_tid, FUTEX_WAKE, 1498 1, NULL, NULL, 0, 0); 1499 } 1500 tsk->clear_child_tid = NULL; 1501 } 1502 1503 /* 1504 * All done, finally we can wake up parent and return this mm to him. 1505 * Also kthread_stop() uses this completion for synchronization. 1506 */ 1507 if (tsk->vfork_done) 1508 complete_vfork_done(tsk); 1509 } 1510 1511 void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm) 1512 { 1513 futex_exit_release(tsk); 1514 mm_release(tsk, mm); 1515 } 1516 1517 void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm) 1518 { 1519 futex_exec_release(tsk); 1520 mm_release(tsk, mm); 1521 } 1522 1523 /** 1524 * dup_mm() - duplicates an existing mm structure 1525 * @tsk: the task_struct with which the new mm will be associated. 1526 * @oldmm: the mm to duplicate. 1527 * 1528 * Allocates a new mm structure and duplicates the provided @oldmm structure 1529 * content into it. 1530 * 1531 * Return: the duplicated mm or NULL on failure. 1532 */ 1533 static struct mm_struct *dup_mm(struct task_struct *tsk, 1534 struct mm_struct *oldmm) 1535 { 1536 struct mm_struct *mm; 1537 int err; 1538 1539 mm = allocate_mm(); 1540 if (!mm) 1541 goto fail_nomem; 1542 1543 memcpy(mm, oldmm, sizeof(*mm)); 1544 1545 if (!mm_init(mm, tsk, mm->user_ns)) 1546 goto fail_nomem; 1547 1548 err = dup_mmap(mm, oldmm); 1549 if (err) 1550 goto free_pt; 1551 1552 mm->hiwater_rss = get_mm_rss(mm); 1553 mm->hiwater_vm = mm->total_vm; 1554 1555 if (mm->binfmt && !try_module_get(mm->binfmt->module)) 1556 goto free_pt; 1557 1558 return mm; 1559 1560 free_pt: 1561 /* don't put binfmt in mmput, we haven't got module yet */ 1562 mm->binfmt = NULL; 1563 mm_init_owner(mm, NULL); 1564 mmput(mm); 1565 1566 fail_nomem: 1567 return NULL; 1568 } 1569 1570 static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) 1571 { 1572 struct mm_struct *mm, *oldmm; 1573 1574 tsk->min_flt = tsk->maj_flt = 0; 1575 tsk->nvcsw = tsk->nivcsw = 0; 1576 #ifdef CONFIG_DETECT_HUNG_TASK 1577 tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; 1578 tsk->last_switch_time = 0; 1579 #endif 1580 1581 tsk->mm = NULL; 1582 tsk->active_mm = NULL; 1583 1584 /* 1585 * Are we cloning a kernel thread? 1586 * 1587 * We need to steal a active VM for that.. 1588 */ 1589 oldmm = current->mm; 1590 if (!oldmm) 1591 return 0; 1592 1593 if (clone_flags & CLONE_VM) { 1594 mmget(oldmm); 1595 mm = oldmm; 1596 } else { 1597 mm = dup_mm(tsk, current->mm); 1598 if (!mm) 1599 return -ENOMEM; 1600 } 1601 1602 tsk->mm = mm; 1603 tsk->active_mm = mm; 1604 return 0; 1605 } 1606 1607 static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) 1608 { 1609 struct fs_struct *fs = current->fs; 1610 if (clone_flags & CLONE_FS) { 1611 /* tsk->fs is already what we want */ 1612 spin_lock(&fs->lock); 1613 if (fs->in_exec) { 1614 spin_unlock(&fs->lock); 1615 return -EAGAIN; 1616 } 1617 fs->users++; 1618 spin_unlock(&fs->lock); 1619 return 0; 1620 } 1621 tsk->fs = copy_fs_struct(fs); 1622 if (!tsk->fs) 1623 return -ENOMEM; 1624 return 0; 1625 } 1626 1627 static int copy_files(unsigned long clone_flags, struct task_struct *tsk) 1628 { 1629 struct files_struct *oldf, *newf; 1630 int error = 0; 1631 1632 /* 1633 * A background process may not have any files ... 1634 */ 1635 oldf = current->files; 1636 if (!oldf) 1637 goto out; 1638 1639 if (clone_flags & CLONE_FILES) { 1640 atomic_inc(&oldf->count); 1641 goto out; 1642 } 1643 1644 newf = dup_fd(oldf, NR_OPEN_MAX, &error); 1645 if (!newf) 1646 goto out; 1647 1648 tsk->files = newf; 1649 error = 0; 1650 out: 1651 return error; 1652 } 1653 1654 static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) 1655 { 1656 struct sighand_struct *sig; 1657 1658 if (clone_flags & CLONE_SIGHAND) { 1659 refcount_inc(¤t->sighand->count); 1660 return 0; 1661 } 1662 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); 1663 RCU_INIT_POINTER(tsk->sighand, sig); 1664 if (!sig) 1665 return -ENOMEM; 1666 1667 refcount_set(&sig->count, 1); 1668 spin_lock_irq(¤t->sighand->siglock); 1669 memcpy(sig->action, current->sighand->action, sizeof(sig->action)); 1670 spin_unlock_irq(¤t->sighand->siglock); 1671 1672 /* Reset all signal handler not set to SIG_IGN to SIG_DFL. */ 1673 if (clone_flags & CLONE_CLEAR_SIGHAND) 1674 flush_signal_handlers(tsk, 0); 1675 1676 return 0; 1677 } 1678 1679 void __cleanup_sighand(struct sighand_struct *sighand) 1680 { 1681 if (refcount_dec_and_test(&sighand->count)) { 1682 signalfd_cleanup(sighand); 1683 /* 1684 * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it 1685 * without an RCU grace period, see __lock_task_sighand(). 1686 */ 1687 kmem_cache_free(sighand_cachep, sighand); 1688 } 1689 } 1690 1691 /* 1692 * Initialize POSIX timer handling for a thread group. 1693 */ 1694 static void posix_cpu_timers_init_group(struct signal_struct *sig) 1695 { 1696 struct posix_cputimers *pct = &sig->posix_cputimers; 1697 unsigned long cpu_limit; 1698 1699 cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); 1700 posix_cputimers_group_init(pct, cpu_limit); 1701 } 1702 1703 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) 1704 { 1705 struct signal_struct *sig; 1706 1707 if (clone_flags & CLONE_THREAD) 1708 return 0; 1709 1710 sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL); 1711 tsk->signal = sig; 1712 if (!sig) 1713 return -ENOMEM; 1714 1715 sig->nr_threads = 1; 1716 sig->quick_threads = 1; 1717 atomic_set(&sig->live, 1); 1718 refcount_set(&sig->sigcnt, 1); 1719 1720 /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */ 1721 sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node); 1722 tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head); 1723 1724 init_waitqueue_head(&sig->wait_chldexit); 1725 sig->curr_target = tsk; 1726 init_sigpending(&sig->shared_pending); 1727 INIT_HLIST_HEAD(&sig->multiprocess); 1728 seqlock_init(&sig->stats_lock); 1729 prev_cputime_init(&sig->prev_cputime); 1730 1731 #ifdef CONFIG_POSIX_TIMERS 1732 INIT_LIST_HEAD(&sig->posix_timers); 1733 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1734 sig->real_timer.function = it_real_fn; 1735 #endif 1736 1737 task_lock(current->group_leader); 1738 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 1739 task_unlock(current->group_leader); 1740 1741 posix_cpu_timers_init_group(sig); 1742 1743 tty_audit_fork(sig); 1744 sched_autogroup_fork(sig); 1745 1746 sig->oom_score_adj = current->signal->oom_score_adj; 1747 sig->oom_score_adj_min = current->signal->oom_score_adj_min; 1748 1749 mutex_init(&sig->cred_guard_mutex); 1750 init_rwsem(&sig->exec_update_lock); 1751 1752 return 0; 1753 } 1754 1755 static void copy_seccomp(struct task_struct *p) 1756 { 1757 #ifdef CONFIG_SECCOMP 1758 /* 1759 * Must be called with sighand->lock held, which is common to 1760 * all threads in the group. Holding cred_guard_mutex is not 1761 * needed because this new task is not yet running and cannot 1762 * be racing exec. 1763 */ 1764 assert_spin_locked(¤t->sighand->siglock); 1765 1766 /* Ref-count the new filter user, and assign it. */ 1767 get_seccomp_filter(current); 1768 p->seccomp = current->seccomp; 1769 1770 /* 1771 * Explicitly enable no_new_privs here in case it got set 1772 * between the task_struct being duplicated and holding the 1773 * sighand lock. The seccomp state and nnp must be in sync. 1774 */ 1775 if (task_no_new_privs(current)) 1776 task_set_no_new_privs(p); 1777 1778 /* 1779 * If the parent gained a seccomp mode after copying thread 1780 * flags and between before we held the sighand lock, we have 1781 * to manually enable the seccomp thread flag here. 1782 */ 1783 if (p->seccomp.mode != SECCOMP_MODE_DISABLED) 1784 set_task_syscall_work(p, SECCOMP); 1785 #endif 1786 } 1787 1788 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) 1789 { 1790 current->clear_child_tid = tidptr; 1791 1792 return task_pid_vnr(current); 1793 } 1794 1795 static void rt_mutex_init_task(struct task_struct *p) 1796 { 1797 raw_spin_lock_init(&p->pi_lock); 1798 #ifdef CONFIG_RT_MUTEXES 1799 p->pi_waiters = RB_ROOT_CACHED; 1800 p->pi_top_task = NULL; 1801 p->pi_blocked_on = NULL; 1802 #endif 1803 } 1804 1805 static inline void init_task_pid_links(struct task_struct *task) 1806 { 1807 enum pid_type type; 1808 1809 for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) 1810 INIT_HLIST_NODE(&task->pid_links[type]); 1811 } 1812 1813 static inline void 1814 init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) 1815 { 1816 if (type == PIDTYPE_PID) 1817 task->thread_pid = pid; 1818 else 1819 task->signal->pids[type] = pid; 1820 } 1821 1822 static inline void rcu_copy_process(struct task_struct *p) 1823 { 1824 #ifdef CONFIG_PREEMPT_RCU 1825 p->rcu_read_lock_nesting = 0; 1826 p->rcu_read_unlock_special.s = 0; 1827 p->rcu_blocked_node = NULL; 1828 INIT_LIST_HEAD(&p->rcu_node_entry); 1829 #endif /* #ifdef CONFIG_PREEMPT_RCU */ 1830 #ifdef CONFIG_TASKS_RCU 1831 p->rcu_tasks_holdout = false; 1832 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); 1833 p->rcu_tasks_idle_cpu = -1; 1834 #endif /* #ifdef CONFIG_TASKS_RCU */ 1835 #ifdef CONFIG_TASKS_TRACE_RCU 1836 p->trc_reader_nesting = 0; 1837 p->trc_reader_special.s = 0; 1838 INIT_LIST_HEAD(&p->trc_holdout_list); 1839 INIT_LIST_HEAD(&p->trc_blkd_node); 1840 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ 1841 } 1842 1843 struct pid *pidfd_pid(const struct file *file) 1844 { 1845 if (file->f_op == &pidfd_fops) 1846 return file->private_data; 1847 1848 return ERR_PTR(-EBADF); 1849 } 1850 1851 static int pidfd_release(struct inode *inode, struct file *file) 1852 { 1853 struct pid *pid = file->private_data; 1854 1855 file->private_data = NULL; 1856 put_pid(pid); 1857 return 0; 1858 } 1859 1860 #ifdef CONFIG_PROC_FS 1861 /** 1862 * pidfd_show_fdinfo - print information about a pidfd 1863 * @m: proc fdinfo file 1864 * @f: file referencing a pidfd 1865 * 1866 * Pid: 1867 * This function will print the pid that a given pidfd refers to in the 1868 * pid namespace of the procfs instance. 1869 * If the pid namespace of the process is not a descendant of the pid 1870 * namespace of the procfs instance 0 will be shown as its pid. This is 1871 * similar to calling getppid() on a process whose parent is outside of 1872 * its pid namespace. 1873 * 1874 * NSpid: 1875 * If pid namespaces are supported then this function will also print 1876 * the pid of a given pidfd refers to for all descendant pid namespaces 1877 * starting from the current pid namespace of the instance, i.e. the 1878 * Pid field and the first entry in the NSpid field will be identical. 1879 * If the pid namespace of the process is not a descendant of the pid 1880 * namespace of the procfs instance 0 will be shown as its first NSpid 1881 * entry and no others will be shown. 1882 * Note that this differs from the Pid and NSpid fields in 1883 * /proc/<pid>/status where Pid and NSpid are always shown relative to 1884 * the pid namespace of the procfs instance. The difference becomes 1885 * obvious when sending around a pidfd between pid namespaces from a 1886 * different branch of the tree, i.e. where no ancestral relation is 1887 * present between the pid namespaces: 1888 * - create two new pid namespaces ns1 and ns2 in the initial pid 1889 * namespace (also take care to create new mount namespaces in the 1890 * new pid namespace and mount procfs) 1891 * - create a process with a pidfd in ns1 1892 * - send pidfd from ns1 to ns2 1893 * - read /proc/self/fdinfo/<pidfd> and observe that both Pid and NSpid 1894 * have exactly one entry, which is 0 1895 */ 1896 static void pidfd_show_fdinfo(struct seq_file *m, struct file *f) 1897 { 1898 struct pid *pid = f->private_data; 1899 struct pid_namespace *ns; 1900 pid_t nr = -1; 1901 1902 if (likely(pid_has_task(pid, PIDTYPE_PID))) { 1903 ns = proc_pid_ns(file_inode(m->file)->i_sb); 1904 nr = pid_nr_ns(pid, ns); 1905 } 1906 1907 seq_put_decimal_ll(m, "Pid:\t", nr); 1908 1909 #ifdef CONFIG_PID_NS 1910 seq_put_decimal_ll(m, "\nNSpid:\t", nr); 1911 if (nr > 0) { 1912 int i; 1913 1914 /* If nr is non-zero it means that 'pid' is valid and that 1915 * ns, i.e. the pid namespace associated with the procfs 1916 * instance, is in the pid namespace hierarchy of pid. 1917 * Start at one below the already printed level. 1918 */ 1919 for (i = ns->level + 1; i <= pid->level; i++) 1920 seq_put_decimal_ll(m, "\t", pid->numbers[i].nr); 1921 } 1922 #endif 1923 seq_putc(m, '\n'); 1924 } 1925 #endif 1926 1927 /* 1928 * Poll support for process exit notification. 1929 */ 1930 static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts) 1931 { 1932 struct pid *pid = file->private_data; 1933 __poll_t poll_flags = 0; 1934 1935 poll_wait(file, &pid->wait_pidfd, pts); 1936 1937 /* 1938 * Inform pollers only when the whole thread group exits. 1939 * If the thread group leader exits before all other threads in the 1940 * group, then poll(2) should block, similar to the wait(2) family. 1941 */ 1942 if (thread_group_exited(pid)) 1943 poll_flags = EPOLLIN | EPOLLRDNORM; 1944 1945 return poll_flags; 1946 } 1947 1948 const struct file_operations pidfd_fops = { 1949 .release = pidfd_release, 1950 .poll = pidfd_poll, 1951 #ifdef CONFIG_PROC_FS 1952 .show_fdinfo = pidfd_show_fdinfo, 1953 #endif 1954 }; 1955 1956 static void __delayed_free_task(struct rcu_head *rhp) 1957 { 1958 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 1959 1960 free_task(tsk); 1961 } 1962 1963 static __always_inline void delayed_free_task(struct task_struct *tsk) 1964 { 1965 if (IS_ENABLED(CONFIG_MEMCG)) 1966 call_rcu(&tsk->rcu, __delayed_free_task); 1967 else 1968 free_task(tsk); 1969 } 1970 1971 static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk) 1972 { 1973 /* Skip if kernel thread */ 1974 if (!tsk->mm) 1975 return; 1976 1977 /* Skip if spawning a thread or using vfork */ 1978 if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM) 1979 return; 1980 1981 /* We need to synchronize with __set_oom_adj */ 1982 mutex_lock(&oom_adj_mutex); 1983 set_bit(MMF_MULTIPROCESS, &tsk->mm->flags); 1984 /* Update the values in case they were changed after copy_signal */ 1985 tsk->signal->oom_score_adj = current->signal->oom_score_adj; 1986 tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min; 1987 mutex_unlock(&oom_adj_mutex); 1988 } 1989 1990 #ifdef CONFIG_RV 1991 static void rv_task_fork(struct task_struct *p) 1992 { 1993 int i; 1994 1995 for (i = 0; i < RV_PER_TASK_MONITORS; i++) 1996 p->rv[i].da_mon.monitoring = false; 1997 } 1998 #else 1999 #define rv_task_fork(p) do {} while (0) 2000 #endif 2001 2002 /* 2003 * This creates a new process as a copy of the old one, 2004 * but does not actually start it yet. 2005 * 2006 * It copies the registers, and all the appropriate 2007 * parts of the process environment (as per the clone 2008 * flags). The actual kick-off is left to the caller. 2009 */ 2010 static __latent_entropy struct task_struct *copy_process( 2011 struct pid *pid, 2012 int trace, 2013 int node, 2014 struct kernel_clone_args *args) 2015 { 2016 int pidfd = -1, retval; 2017 struct task_struct *p; 2018 struct multiprocess_signals delayed; 2019 struct file *pidfile = NULL; 2020 const u64 clone_flags = args->flags; 2021 struct nsproxy *nsp = current->nsproxy; 2022 2023 /* 2024 * Don't allow sharing the root directory with processes in a different 2025 * namespace 2026 */ 2027 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) 2028 return ERR_PTR(-EINVAL); 2029 2030 if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) 2031 return ERR_PTR(-EINVAL); 2032 2033 /* 2034 * Thread groups must share signals as well, and detached threads 2035 * can only be started up within the thread group. 2036 */ 2037 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) 2038 return ERR_PTR(-EINVAL); 2039 2040 /* 2041 * Shared signal handlers imply shared VM. By way of the above, 2042 * thread groups also imply shared VM. Blocking this case allows 2043 * for various simplifications in other code. 2044 */ 2045 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) 2046 return ERR_PTR(-EINVAL); 2047 2048 /* 2049 * Siblings of global init remain as zombies on exit since they are 2050 * not reaped by their parent (swapper). To solve this and to avoid 2051 * multi-rooted process trees, prevent global and container-inits 2052 * from creating siblings. 2053 */ 2054 if ((clone_flags & CLONE_PARENT) && 2055 current->signal->flags & SIGNAL_UNKILLABLE) 2056 return ERR_PTR(-EINVAL); 2057 2058 /* 2059 * If the new process will be in a different pid or user namespace 2060 * do not allow it to share a thread group with the forking task. 2061 */ 2062 if (clone_flags & CLONE_THREAD) { 2063 if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || 2064 (task_active_pid_ns(current) != nsp->pid_ns_for_children)) 2065 return ERR_PTR(-EINVAL); 2066 } 2067 2068 if (clone_flags & CLONE_PIDFD) { 2069 /* 2070 * - CLONE_DETACHED is blocked so that we can potentially 2071 * reuse it later for CLONE_PIDFD. 2072 * - CLONE_THREAD is blocked until someone really needs it. 2073 */ 2074 if (clone_flags & (CLONE_DETACHED | CLONE_THREAD)) 2075 return ERR_PTR(-EINVAL); 2076 } 2077 2078 /* 2079 * Force any signals received before this point to be delivered 2080 * before the fork happens. Collect up signals sent to multiple 2081 * processes that happen during the fork and delay them so that 2082 * they appear to happen after the fork. 2083 */ 2084 sigemptyset(&delayed.signal); 2085 INIT_HLIST_NODE(&delayed.node); 2086 2087 spin_lock_irq(¤t->sighand->siglock); 2088 if (!(clone_flags & CLONE_THREAD)) 2089 hlist_add_head(&delayed.node, ¤t->signal->multiprocess); 2090 recalc_sigpending(); 2091 spin_unlock_irq(¤t->sighand->siglock); 2092 retval = -ERESTARTNOINTR; 2093 if (task_sigpending(current)) 2094 goto fork_out; 2095 2096 retval = -ENOMEM; 2097 p = dup_task_struct(current, node); 2098 if (!p) 2099 goto fork_out; 2100 p->flags &= ~PF_KTHREAD; 2101 if (args->kthread) 2102 p->flags |= PF_KTHREAD; 2103 if (args->io_thread) { 2104 /* 2105 * Mark us an IO worker, and block any signal that isn't 2106 * fatal or STOP 2107 */ 2108 p->flags |= PF_IO_WORKER; 2109 siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2110 } 2111 2112 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL; 2113 /* 2114 * Clear TID on mm_release()? 2115 */ 2116 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? args->child_tid : NULL; 2117 2118 ftrace_graph_init_task(p); 2119 2120 rt_mutex_init_task(p); 2121 2122 lockdep_assert_irqs_enabled(); 2123 #ifdef CONFIG_PROVE_LOCKING 2124 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 2125 #endif 2126 retval = copy_creds(p, clone_flags); 2127 if (retval < 0) 2128 goto bad_fork_free; 2129 2130 retval = -EAGAIN; 2131 if (is_rlimit_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) { 2132 if (p->real_cred->user != INIT_USER && 2133 !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) 2134 goto bad_fork_cleanup_count; 2135 } 2136 current->flags &= ~PF_NPROC_EXCEEDED; 2137 2138 /* 2139 * If multiple threads are within copy_process(), then this check 2140 * triggers too late. This doesn't hurt, the check is only there 2141 * to stop root fork bombs. 2142 */ 2143 retval = -EAGAIN; 2144 if (data_race(nr_threads >= max_threads)) 2145 goto bad_fork_cleanup_count; 2146 2147 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ 2148 p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE | PF_NO_SETAFFINITY); 2149 p->flags |= PF_FORKNOEXEC; 2150 INIT_LIST_HEAD(&p->children); 2151 INIT_LIST_HEAD(&p->sibling); 2152 rcu_copy_process(p); 2153 p->vfork_done = NULL; 2154 spin_lock_init(&p->alloc_lock); 2155 2156 init_sigpending(&p->pending); 2157 2158 p->utime = p->stime = p->gtime = 0; 2159 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 2160 p->utimescaled = p->stimescaled = 0; 2161 #endif 2162 prev_cputime_init(&p->prev_cputime); 2163 2164 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 2165 seqcount_init(&p->vtime.seqcount); 2166 p->vtime.starttime = 0; 2167 p->vtime.state = VTIME_INACTIVE; 2168 #endif 2169 2170 #ifdef CONFIG_IO_URING 2171 p->io_uring = NULL; 2172 #endif 2173 2174 #if defined(SPLIT_RSS_COUNTING) 2175 memset(&p->rss_stat, 0, sizeof(p->rss_stat)); 2176 #endif 2177 2178 p->default_timer_slack_ns = current->timer_slack_ns; 2179 2180 #ifdef CONFIG_PSI 2181 p->psi_flags = 0; 2182 #endif 2183 2184 task_io_accounting_init(&p->ioac); 2185 acct_clear_integrals(p); 2186 2187 posix_cputimers_init(&p->posix_cputimers); 2188 2189 p->io_context = NULL; 2190 audit_set_context(p, NULL); 2191 cgroup_fork(p); 2192 if (args->kthread) { 2193 if (!set_kthread_struct(p)) 2194 goto bad_fork_cleanup_delayacct; 2195 } 2196 #ifdef CONFIG_NUMA 2197 p->mempolicy = mpol_dup(p->mempolicy); 2198 if (IS_ERR(p->mempolicy)) { 2199 retval = PTR_ERR(p->mempolicy); 2200 p->mempolicy = NULL; 2201 goto bad_fork_cleanup_delayacct; 2202 } 2203 #endif 2204 #ifdef CONFIG_CPUSETS 2205 p->cpuset_mem_spread_rotor = NUMA_NO_NODE; 2206 p->cpuset_slab_spread_rotor = NUMA_NO_NODE; 2207 seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock); 2208 #endif 2209 #ifdef CONFIG_TRACE_IRQFLAGS 2210 memset(&p->irqtrace, 0, sizeof(p->irqtrace)); 2211 p->irqtrace.hardirq_disable_ip = _THIS_IP_; 2212 p->irqtrace.softirq_enable_ip = _THIS_IP_; 2213 p->softirqs_enabled = 1; 2214 p->softirq_context = 0; 2215 #endif 2216 2217 p->pagefault_disabled = 0; 2218 2219 #ifdef CONFIG_LOCKDEP 2220 lockdep_init_task(p); 2221 #endif 2222 2223 #ifdef CONFIG_DEBUG_MUTEXES 2224 p->blocked_on = NULL; /* not blocked yet */ 2225 #endif 2226 #ifdef CONFIG_BCACHE 2227 p->sequential_io = 0; 2228 p->sequential_io_avg = 0; 2229 #endif 2230 #ifdef CONFIG_BPF_SYSCALL 2231 RCU_INIT_POINTER(p->bpf_storage, NULL); 2232 p->bpf_ctx = NULL; 2233 #endif 2234 2235 /* Perform scheduler related setup. Assign this task to a CPU. */ 2236 retval = sched_fork(clone_flags, p); 2237 if (retval) 2238 goto bad_fork_cleanup_policy; 2239 2240 retval = perf_event_init_task(p, clone_flags); 2241 if (retval) 2242 goto bad_fork_cleanup_policy; 2243 retval = audit_alloc(p); 2244 if (retval) 2245 goto bad_fork_cleanup_perf; 2246 /* copy all the process information */ 2247 shm_init_task(p); 2248 retval = security_task_alloc(p, clone_flags); 2249 if (retval) 2250 goto bad_fork_cleanup_audit; 2251 retval = copy_semundo(clone_flags, p); 2252 if (retval) 2253 goto bad_fork_cleanup_security; 2254 retval = copy_files(clone_flags, p); 2255 if (retval) 2256 goto bad_fork_cleanup_semundo; 2257 retval = copy_fs(clone_flags, p); 2258 if (retval) 2259 goto bad_fork_cleanup_files; 2260 retval = copy_sighand(clone_flags, p); 2261 if (retval) 2262 goto bad_fork_cleanup_fs; 2263 retval = copy_signal(clone_flags, p); 2264 if (retval) 2265 goto bad_fork_cleanup_sighand; 2266 retval = copy_mm(clone_flags, p); 2267 if (retval) 2268 goto bad_fork_cleanup_signal; 2269 retval = copy_namespaces(clone_flags, p); 2270 if (retval) 2271 goto bad_fork_cleanup_mm; 2272 retval = copy_io(clone_flags, p); 2273 if (retval) 2274 goto bad_fork_cleanup_namespaces; 2275 retval = copy_thread(p, args); 2276 if (retval) 2277 goto bad_fork_cleanup_io; 2278 2279 stackleak_task_init(p); 2280 2281 if (pid != &init_struct_pid) { 2282 pid = alloc_pid(p->nsproxy->pid_ns_for_children, args->set_tid, 2283 args->set_tid_size); 2284 if (IS_ERR(pid)) { 2285 retval = PTR_ERR(pid); 2286 goto bad_fork_cleanup_thread; 2287 } 2288 } 2289 2290 /* 2291 * This has to happen after we've potentially unshared the file 2292 * descriptor table (so that the pidfd doesn't leak into the child 2293 * if the fd table isn't shared). 2294 */ 2295 if (clone_flags & CLONE_PIDFD) { 2296 retval = get_unused_fd_flags(O_RDWR | O_CLOEXEC); 2297 if (retval < 0) 2298 goto bad_fork_free_pid; 2299 2300 pidfd = retval; 2301 2302 pidfile = anon_inode_getfile("[pidfd]", &pidfd_fops, pid, 2303 O_RDWR | O_CLOEXEC); 2304 if (IS_ERR(pidfile)) { 2305 put_unused_fd(pidfd); 2306 retval = PTR_ERR(pidfile); 2307 goto bad_fork_free_pid; 2308 } 2309 get_pid(pid); /* held by pidfile now */ 2310 2311 retval = put_user(pidfd, args->pidfd); 2312 if (retval) 2313 goto bad_fork_put_pidfd; 2314 } 2315 2316 #ifdef CONFIG_BLOCK 2317 p->plug = NULL; 2318 #endif 2319 futex_init_task(p); 2320 2321 /* 2322 * sigaltstack should be cleared when sharing the same VM 2323 */ 2324 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) 2325 sas_ss_reset(p); 2326 2327 /* 2328 * Syscall tracing and stepping should be turned off in the 2329 * child regardless of CLONE_PTRACE. 2330 */ 2331 user_disable_single_step(p); 2332 clear_task_syscall_work(p, SYSCALL_TRACE); 2333 #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU) 2334 clear_task_syscall_work(p, SYSCALL_EMU); 2335 #endif 2336 clear_tsk_latency_tracing(p); 2337 2338 /* ok, now we should be set up.. */ 2339 p->pid = pid_nr(pid); 2340 if (clone_flags & CLONE_THREAD) { 2341 p->group_leader = current->group_leader; 2342 p->tgid = current->tgid; 2343 } else { 2344 p->group_leader = p; 2345 p->tgid = p->pid; 2346 } 2347 2348 p->nr_dirtied = 0; 2349 p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); 2350 p->dirty_paused_when = 0; 2351 2352 p->pdeath_signal = 0; 2353 INIT_LIST_HEAD(&p->thread_group); 2354 p->task_works = NULL; 2355 clear_posix_cputimers_work(p); 2356 2357 #ifdef CONFIG_KRETPROBES 2358 p->kretprobe_instances.first = NULL; 2359 #endif 2360 #ifdef CONFIG_RETHOOK 2361 p->rethooks.first = NULL; 2362 #endif 2363 2364 /* 2365 * Ensure that the cgroup subsystem policies allow the new process to be 2366 * forked. It should be noted that the new process's css_set can be changed 2367 * between here and cgroup_post_fork() if an organisation operation is in 2368 * progress. 2369 */ 2370 retval = cgroup_can_fork(p, args); 2371 if (retval) 2372 goto bad_fork_put_pidfd; 2373 2374 /* 2375 * Now that the cgroups are pinned, re-clone the parent cgroup and put 2376 * the new task on the correct runqueue. All this *before* the task 2377 * becomes visible. 2378 * 2379 * This isn't part of ->can_fork() because while the re-cloning is 2380 * cgroup specific, it unconditionally needs to place the task on a 2381 * runqueue. 2382 */ 2383 sched_cgroup_fork(p, args); 2384 2385 /* 2386 * From this point on we must avoid any synchronous user-space 2387 * communication until we take the tasklist-lock. In particular, we do 2388 * not want user-space to be able to predict the process start-time by 2389 * stalling fork(2) after we recorded the start_time but before it is 2390 * visible to the system. 2391 */ 2392 2393 p->start_time = ktime_get_ns(); 2394 p->start_boottime = ktime_get_boottime_ns(); 2395 2396 /* 2397 * Make it visible to the rest of the system, but dont wake it up yet. 2398 * Need tasklist lock for parent etc handling! 2399 */ 2400 write_lock_irq(&tasklist_lock); 2401 2402 /* CLONE_PARENT re-uses the old parent */ 2403 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { 2404 p->real_parent = current->real_parent; 2405 p->parent_exec_id = current->parent_exec_id; 2406 if (clone_flags & CLONE_THREAD) 2407 p->exit_signal = -1; 2408 else 2409 p->exit_signal = current->group_leader->exit_signal; 2410 } else { 2411 p->real_parent = current; 2412 p->parent_exec_id = current->self_exec_id; 2413 p->exit_signal = args->exit_signal; 2414 } 2415 2416 klp_copy_process(p); 2417 2418 sched_core_fork(p); 2419 2420 spin_lock(¤t->sighand->siglock); 2421 2422 rv_task_fork(p); 2423 2424 rseq_fork(p, clone_flags); 2425 2426 /* Don't start children in a dying pid namespace */ 2427 if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) { 2428 retval = -ENOMEM; 2429 goto bad_fork_cancel_cgroup; 2430 } 2431 2432 /* Let kill terminate clone/fork in the middle */ 2433 if (fatal_signal_pending(current)) { 2434 retval = -EINTR; 2435 goto bad_fork_cancel_cgroup; 2436 } 2437 2438 /* No more failure paths after this point. */ 2439 2440 /* 2441 * Copy seccomp details explicitly here, in case they were changed 2442 * before holding sighand lock. 2443 */ 2444 copy_seccomp(p); 2445 2446 init_task_pid_links(p); 2447 if (likely(p->pid)) { 2448 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); 2449 2450 init_task_pid(p, PIDTYPE_PID, pid); 2451 if (thread_group_leader(p)) { 2452 init_task_pid(p, PIDTYPE_TGID, pid); 2453 init_task_pid(p, PIDTYPE_PGID, task_pgrp(current)); 2454 init_task_pid(p, PIDTYPE_SID, task_session(current)); 2455 2456 if (is_child_reaper(pid)) { 2457 ns_of_pid(pid)->child_reaper = p; 2458 p->signal->flags |= SIGNAL_UNKILLABLE; 2459 } 2460 p->signal->shared_pending.signal = delayed.signal; 2461 p->signal->tty = tty_kref_get(current->signal->tty); 2462 /* 2463 * Inherit has_child_subreaper flag under the same 2464 * tasklist_lock with adding child to the process tree 2465 * for propagate_has_child_subreaper optimization. 2466 */ 2467 p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper || 2468 p->real_parent->signal->is_child_subreaper; 2469 list_add_tail(&p->sibling, &p->real_parent->children); 2470 list_add_tail_rcu(&p->tasks, &init_task.tasks); 2471 attach_pid(p, PIDTYPE_TGID); 2472 attach_pid(p, PIDTYPE_PGID); 2473 attach_pid(p, PIDTYPE_SID); 2474 __this_cpu_inc(process_counts); 2475 } else { 2476 current->signal->nr_threads++; 2477 current->signal->quick_threads++; 2478 atomic_inc(¤t->signal->live); 2479 refcount_inc(¤t->signal->sigcnt); 2480 task_join_group_stop(p); 2481 list_add_tail_rcu(&p->thread_group, 2482 &p->group_leader->thread_group); 2483 list_add_tail_rcu(&p->thread_node, 2484 &p->signal->thread_head); 2485 } 2486 attach_pid(p, PIDTYPE_PID); 2487 nr_threads++; 2488 } 2489 total_forks++; 2490 hlist_del_init(&delayed.node); 2491 spin_unlock(¤t->sighand->siglock); 2492 syscall_tracepoint_update(p); 2493 write_unlock_irq(&tasklist_lock); 2494 2495 if (pidfile) 2496 fd_install(pidfd, pidfile); 2497 2498 proc_fork_connector(p); 2499 sched_post_fork(p); 2500 cgroup_post_fork(p, args); 2501 perf_event_fork(p); 2502 2503 trace_task_newtask(p, clone_flags); 2504 uprobe_copy_process(p, clone_flags); 2505 2506 copy_oom_score_adj(clone_flags, p); 2507 2508 return p; 2509 2510 bad_fork_cancel_cgroup: 2511 sched_core_free(p); 2512 spin_unlock(¤t->sighand->siglock); 2513 write_unlock_irq(&tasklist_lock); 2514 cgroup_cancel_fork(p, args); 2515 bad_fork_put_pidfd: 2516 if (clone_flags & CLONE_PIDFD) { 2517 fput(pidfile); 2518 put_unused_fd(pidfd); 2519 } 2520 bad_fork_free_pid: 2521 if (pid != &init_struct_pid) 2522 free_pid(pid); 2523 bad_fork_cleanup_thread: 2524 exit_thread(p); 2525 bad_fork_cleanup_io: 2526 if (p->io_context) 2527 exit_io_context(p); 2528 bad_fork_cleanup_namespaces: 2529 exit_task_namespaces(p); 2530 bad_fork_cleanup_mm: 2531 if (p->mm) { 2532 mm_clear_owner(p->mm, p); 2533 mmput(p->mm); 2534 } 2535 bad_fork_cleanup_signal: 2536 if (!(clone_flags & CLONE_THREAD)) 2537 free_signal_struct(p->signal); 2538 bad_fork_cleanup_sighand: 2539 __cleanup_sighand(p->sighand); 2540 bad_fork_cleanup_fs: 2541 exit_fs(p); /* blocking */ 2542 bad_fork_cleanup_files: 2543 exit_files(p); /* blocking */ 2544 bad_fork_cleanup_semundo: 2545 exit_sem(p); 2546 bad_fork_cleanup_security: 2547 security_task_free(p); 2548 bad_fork_cleanup_audit: 2549 audit_free(p); 2550 bad_fork_cleanup_perf: 2551 perf_event_free_task(p); 2552 bad_fork_cleanup_policy: 2553 lockdep_free_task(p); 2554 #ifdef CONFIG_NUMA 2555 mpol_put(p->mempolicy); 2556 #endif 2557 bad_fork_cleanup_delayacct: 2558 delayacct_tsk_free(p); 2559 bad_fork_cleanup_count: 2560 dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); 2561 exit_creds(p); 2562 bad_fork_free: 2563 WRITE_ONCE(p->__state, TASK_DEAD); 2564 exit_task_stack_account(p); 2565 put_task_stack(p); 2566 delayed_free_task(p); 2567 fork_out: 2568 spin_lock_irq(¤t->sighand->siglock); 2569 hlist_del_init(&delayed.node); 2570 spin_unlock_irq(¤t->sighand->siglock); 2571 return ERR_PTR(retval); 2572 } 2573 2574 static inline void init_idle_pids(struct task_struct *idle) 2575 { 2576 enum pid_type type; 2577 2578 for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { 2579 INIT_HLIST_NODE(&idle->pid_links[type]); /* not really needed */ 2580 init_task_pid(idle, type, &init_struct_pid); 2581 } 2582 } 2583 2584 static int idle_dummy(void *dummy) 2585 { 2586 /* This function is never called */ 2587 return 0; 2588 } 2589 2590 struct task_struct * __init fork_idle(int cpu) 2591 { 2592 struct task_struct *task; 2593 struct kernel_clone_args args = { 2594 .flags = CLONE_VM, 2595 .fn = &idle_dummy, 2596 .fn_arg = NULL, 2597 .kthread = 1, 2598 .idle = 1, 2599 }; 2600 2601 task = copy_process(&init_struct_pid, 0, cpu_to_node(cpu), &args); 2602 if (!IS_ERR(task)) { 2603 init_idle_pids(task); 2604 init_idle(task, cpu); 2605 } 2606 2607 return task; 2608 } 2609 2610 /* 2611 * This is like kernel_clone(), but shaved down and tailored to just 2612 * creating io_uring workers. It returns a created task, or an error pointer. 2613 * The returned task is inactive, and the caller must fire it up through 2614 * wake_up_new_task(p). All signals are blocked in the created task. 2615 */ 2616 struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node) 2617 { 2618 unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD| 2619 CLONE_IO; 2620 struct kernel_clone_args args = { 2621 .flags = ((lower_32_bits(flags) | CLONE_VM | 2622 CLONE_UNTRACED) & ~CSIGNAL), 2623 .exit_signal = (lower_32_bits(flags) & CSIGNAL), 2624 .fn = fn, 2625 .fn_arg = arg, 2626 .io_thread = 1, 2627 }; 2628 2629 return copy_process(NULL, 0, node, &args); 2630 } 2631 2632 /* 2633 * Ok, this is the main fork-routine. 2634 * 2635 * It copies the process, and if successful kick-starts 2636 * it and waits for it to finish using the VM if required. 2637 * 2638 * args->exit_signal is expected to be checked for sanity by the caller. 2639 */ 2640 pid_t kernel_clone(struct kernel_clone_args *args) 2641 { 2642 u64 clone_flags = args->flags; 2643 struct completion vfork; 2644 struct pid *pid; 2645 struct task_struct *p; 2646 int trace = 0; 2647 pid_t nr; 2648 2649 /* 2650 * For legacy clone() calls, CLONE_PIDFD uses the parent_tid argument 2651 * to return the pidfd. Hence, CLONE_PIDFD and CLONE_PARENT_SETTID are 2652 * mutually exclusive. With clone3() CLONE_PIDFD has grown a separate 2653 * field in struct clone_args and it still doesn't make sense to have 2654 * them both point at the same memory location. Performing this check 2655 * here has the advantage that we don't need to have a separate helper 2656 * to check for legacy clone(). 2657 */ 2658 if ((args->flags & CLONE_PIDFD) && 2659 (args->flags & CLONE_PARENT_SETTID) && 2660 (args->pidfd == args->parent_tid)) 2661 return -EINVAL; 2662 2663 /* 2664 * Determine whether and which event to report to ptracer. When 2665 * called from kernel_thread or CLONE_UNTRACED is explicitly 2666 * requested, no event is reported; otherwise, report if the event 2667 * for the type of forking is enabled. 2668 */ 2669 if (!(clone_flags & CLONE_UNTRACED)) { 2670 if (clone_flags & CLONE_VFORK) 2671 trace = PTRACE_EVENT_VFORK; 2672 else if (args->exit_signal != SIGCHLD) 2673 trace = PTRACE_EVENT_CLONE; 2674 else 2675 trace = PTRACE_EVENT_FORK; 2676 2677 if (likely(!ptrace_event_enabled(current, trace))) 2678 trace = 0; 2679 } 2680 2681 p = copy_process(NULL, trace, NUMA_NO_NODE, args); 2682 add_latent_entropy(); 2683 2684 if (IS_ERR(p)) 2685 return PTR_ERR(p); 2686 2687 /* 2688 * Do this prior waking up the new thread - the thread pointer 2689 * might get invalid after that point, if the thread exits quickly. 2690 */ 2691 trace_sched_process_fork(current, p); 2692 2693 pid = get_task_pid(p, PIDTYPE_PID); 2694 nr = pid_vnr(pid); 2695 2696 if (clone_flags & CLONE_PARENT_SETTID) 2697 put_user(nr, args->parent_tid); 2698 2699 if (clone_flags & CLONE_VFORK) { 2700 p->vfork_done = &vfork; 2701 init_completion(&vfork); 2702 get_task_struct(p); 2703 } 2704 2705 if (IS_ENABLED(CONFIG_LRU_GEN) && !(clone_flags & CLONE_VM)) { 2706 /* lock the task to synchronize with memcg migration */ 2707 task_lock(p); 2708 lru_gen_add_mm(p->mm); 2709 task_unlock(p); 2710 } 2711 2712 wake_up_new_task(p); 2713 2714 /* forking complete and child started to run, tell ptracer */ 2715 if (unlikely(trace)) 2716 ptrace_event_pid(trace, pid); 2717 2718 if (clone_flags & CLONE_VFORK) { 2719 if (!wait_for_vfork_done(p, &vfork)) 2720 ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); 2721 } 2722 2723 put_pid(pid); 2724 return nr; 2725 } 2726 2727 /* 2728 * Create a kernel thread. 2729 */ 2730 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 2731 { 2732 struct kernel_clone_args args = { 2733 .flags = ((lower_32_bits(flags) | CLONE_VM | 2734 CLONE_UNTRACED) & ~CSIGNAL), 2735 .exit_signal = (lower_32_bits(flags) & CSIGNAL), 2736 .fn = fn, 2737 .fn_arg = arg, 2738 .kthread = 1, 2739 }; 2740 2741 return kernel_clone(&args); 2742 } 2743 2744 /* 2745 * Create a user mode thread. 2746 */ 2747 pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags) 2748 { 2749 struct kernel_clone_args args = { 2750 .flags = ((lower_32_bits(flags) | CLONE_VM | 2751 CLONE_UNTRACED) & ~CSIGNAL), 2752 .exit_signal = (lower_32_bits(flags) & CSIGNAL), 2753 .fn = fn, 2754 .fn_arg = arg, 2755 }; 2756 2757 return kernel_clone(&args); 2758 } 2759 2760 #ifdef __ARCH_WANT_SYS_FORK 2761 SYSCALL_DEFINE0(fork) 2762 { 2763 #ifdef CONFIG_MMU 2764 struct kernel_clone_args args = { 2765 .exit_signal = SIGCHLD, 2766 }; 2767 2768 return kernel_clone(&args); 2769 #else 2770 /* can not support in nommu mode */ 2771 return -EINVAL; 2772 #endif 2773 } 2774 #endif 2775 2776 #ifdef __ARCH_WANT_SYS_VFORK 2777 SYSCALL_DEFINE0(vfork) 2778 { 2779 struct kernel_clone_args args = { 2780 .flags = CLONE_VFORK | CLONE_VM, 2781 .exit_signal = SIGCHLD, 2782 }; 2783 2784 return kernel_clone(&args); 2785 } 2786 #endif 2787 2788 #ifdef __ARCH_WANT_SYS_CLONE 2789 #ifdef CONFIG_CLONE_BACKWARDS 2790 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 2791 int __user *, parent_tidptr, 2792 unsigned long, tls, 2793 int __user *, child_tidptr) 2794 #elif defined(CONFIG_CLONE_BACKWARDS2) 2795 SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, 2796 int __user *, parent_tidptr, 2797 int __user *, child_tidptr, 2798 unsigned long, tls) 2799 #elif defined(CONFIG_CLONE_BACKWARDS3) 2800 SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, 2801 int, stack_size, 2802 int __user *, parent_tidptr, 2803 int __user *, child_tidptr, 2804 unsigned long, tls) 2805 #else 2806 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 2807 int __user *, parent_tidptr, 2808 int __user *, child_tidptr, 2809 unsigned long, tls) 2810 #endif 2811 { 2812 struct kernel_clone_args args = { 2813 .flags = (lower_32_bits(clone_flags) & ~CSIGNAL), 2814 .pidfd = parent_tidptr, 2815 .child_tid = child_tidptr, 2816 .parent_tid = parent_tidptr, 2817 .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL), 2818 .stack = newsp, 2819 .tls = tls, 2820 }; 2821 2822 return kernel_clone(&args); 2823 } 2824 #endif 2825 2826 #ifdef __ARCH_WANT_SYS_CLONE3 2827 2828 noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs, 2829 struct clone_args __user *uargs, 2830 size_t usize) 2831 { 2832 int err; 2833 struct clone_args args; 2834 pid_t *kset_tid = kargs->set_tid; 2835 2836 BUILD_BUG_ON(offsetofend(struct clone_args, tls) != 2837 CLONE_ARGS_SIZE_VER0); 2838 BUILD_BUG_ON(offsetofend(struct clone_args, set_tid_size) != 2839 CLONE_ARGS_SIZE_VER1); 2840 BUILD_BUG_ON(offsetofend(struct clone_args, cgroup) != 2841 CLONE_ARGS_SIZE_VER2); 2842 BUILD_BUG_ON(sizeof(struct clone_args) != CLONE_ARGS_SIZE_VER2); 2843 2844 if (unlikely(usize > PAGE_SIZE)) 2845 return -E2BIG; 2846 if (unlikely(usize < CLONE_ARGS_SIZE_VER0)) 2847 return -EINVAL; 2848 2849 err = copy_struct_from_user(&args, sizeof(args), uargs, usize); 2850 if (err) 2851 return err; 2852 2853 if (unlikely(args.set_tid_size > MAX_PID_NS_LEVEL)) 2854 return -EINVAL; 2855 2856 if (unlikely(!args.set_tid && args.set_tid_size > 0)) 2857 return -EINVAL; 2858 2859 if (unlikely(args.set_tid && args.set_tid_size == 0)) 2860 return -EINVAL; 2861 2862 /* 2863 * Verify that higher 32bits of exit_signal are unset and that 2864 * it is a valid signal 2865 */ 2866 if (unlikely((args.exit_signal & ~((u64)CSIGNAL)) || 2867 !valid_signal(args.exit_signal))) 2868 return -EINVAL; 2869 2870 if ((args.flags & CLONE_INTO_CGROUP) && 2871 (args.cgroup > INT_MAX || usize < CLONE_ARGS_SIZE_VER2)) 2872 return -EINVAL; 2873 2874 *kargs = (struct kernel_clone_args){ 2875 .flags = args.flags, 2876 .pidfd = u64_to_user_ptr(args.pidfd), 2877 .child_tid = u64_to_user_ptr(args.child_tid), 2878 .parent_tid = u64_to_user_ptr(args.parent_tid), 2879 .exit_signal = args.exit_signal, 2880 .stack = args.stack, 2881 .stack_size = args.stack_size, 2882 .tls = args.tls, 2883 .set_tid_size = args.set_tid_size, 2884 .cgroup = args.cgroup, 2885 }; 2886 2887 if (args.set_tid && 2888 copy_from_user(kset_tid, u64_to_user_ptr(args.set_tid), 2889 (kargs->set_tid_size * sizeof(pid_t)))) 2890 return -EFAULT; 2891 2892 kargs->set_tid = kset_tid; 2893 2894 return 0; 2895 } 2896 2897 /** 2898 * clone3_stack_valid - check and prepare stack 2899 * @kargs: kernel clone args 2900 * 2901 * Verify that the stack arguments userspace gave us are sane. 2902 * In addition, set the stack direction for userspace since it's easy for us to 2903 * determine. 2904 */ 2905 static inline bool clone3_stack_valid(struct kernel_clone_args *kargs) 2906 { 2907 if (kargs->stack == 0) { 2908 if (kargs->stack_size > 0) 2909 return false; 2910 } else { 2911 if (kargs->stack_size == 0) 2912 return false; 2913 2914 if (!access_ok((void __user *)kargs->stack, kargs->stack_size)) 2915 return false; 2916 2917 #if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_IA64) 2918 kargs->stack += kargs->stack_size; 2919 #endif 2920 } 2921 2922 return true; 2923 } 2924 2925 static bool clone3_args_valid(struct kernel_clone_args *kargs) 2926 { 2927 /* Verify that no unknown flags are passed along. */ 2928 if (kargs->flags & 2929 ~(CLONE_LEGACY_FLAGS | CLONE_CLEAR_SIGHAND | CLONE_INTO_CGROUP)) 2930 return false; 2931 2932 /* 2933 * - make the CLONE_DETACHED bit reusable for clone3 2934 * - make the CSIGNAL bits reusable for clone3 2935 */ 2936 if (kargs->flags & (CLONE_DETACHED | CSIGNAL)) 2937 return false; 2938 2939 if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) == 2940 (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) 2941 return false; 2942 2943 if ((kargs->flags & (CLONE_THREAD | CLONE_PARENT)) && 2944 kargs->exit_signal) 2945 return false; 2946 2947 if (!clone3_stack_valid(kargs)) 2948 return false; 2949 2950 return true; 2951 } 2952 2953 /** 2954 * clone3 - create a new process with specific properties 2955 * @uargs: argument structure 2956 * @size: size of @uargs 2957 * 2958 * clone3() is the extensible successor to clone()/clone2(). 2959 * It takes a struct as argument that is versioned by its size. 2960 * 2961 * Return: On success, a positive PID for the child process. 2962 * On error, a negative errno number. 2963 */ 2964 SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size) 2965 { 2966 int err; 2967 2968 struct kernel_clone_args kargs; 2969 pid_t set_tid[MAX_PID_NS_LEVEL]; 2970 2971 kargs.set_tid = set_tid; 2972 2973 err = copy_clone_args_from_user(&kargs, uargs, size); 2974 if (err) 2975 return err; 2976 2977 if (!clone3_args_valid(&kargs)) 2978 return -EINVAL; 2979 2980 return kernel_clone(&kargs); 2981 } 2982 #endif 2983 2984 void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data) 2985 { 2986 struct task_struct *leader, *parent, *child; 2987 int res; 2988 2989 read_lock(&tasklist_lock); 2990 leader = top = top->group_leader; 2991 down: 2992 for_each_thread(leader, parent) { 2993 list_for_each_entry(child, &parent->children, sibling) { 2994 res = visitor(child, data); 2995 if (res) { 2996 if (res < 0) 2997 goto out; 2998 leader = child; 2999 goto down; 3000 } 3001 up: 3002 ; 3003 } 3004 } 3005 3006 if (leader != top) { 3007 child = leader; 3008 parent = child->real_parent; 3009 leader = parent->group_leader; 3010 goto up; 3011 } 3012 out: 3013 read_unlock(&tasklist_lock); 3014 } 3015 3016 #ifndef ARCH_MIN_MMSTRUCT_ALIGN 3017 #define ARCH_MIN_MMSTRUCT_ALIGN 0 3018 #endif 3019 3020 static void sighand_ctor(void *data) 3021 { 3022 struct sighand_struct *sighand = data; 3023 3024 spin_lock_init(&sighand->siglock); 3025 init_waitqueue_head(&sighand->signalfd_wqh); 3026 } 3027 3028 void __init mm_cache_init(void) 3029 { 3030 unsigned int mm_size; 3031 3032 /* 3033 * The mm_cpumask is located at the end of mm_struct, and is 3034 * dynamically sized based on the maximum CPU number this system 3035 * can have, taking hotplug into account (nr_cpu_ids). 3036 */ 3037 mm_size = sizeof(struct mm_struct) + cpumask_size(); 3038 3039 mm_cachep = kmem_cache_create_usercopy("mm_struct", 3040 mm_size, ARCH_MIN_MMSTRUCT_ALIGN, 3041 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 3042 offsetof(struct mm_struct, saved_auxv), 3043 sizeof_field(struct mm_struct, saved_auxv), 3044 NULL); 3045 } 3046 3047 void __init proc_caches_init(void) 3048 { 3049 sighand_cachep = kmem_cache_create("sighand_cache", 3050 sizeof(struct sighand_struct), 0, 3051 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| 3052 SLAB_ACCOUNT, sighand_ctor); 3053 signal_cachep = kmem_cache_create("signal_cache", 3054 sizeof(struct signal_struct), 0, 3055 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 3056 NULL); 3057 files_cachep = kmem_cache_create("files_cache", 3058 sizeof(struct files_struct), 0, 3059 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 3060 NULL); 3061 fs_cachep = kmem_cache_create("fs_cache", 3062 sizeof(struct fs_struct), 0, 3063 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 3064 NULL); 3065 3066 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); 3067 mmap_init(); 3068 nsproxy_cache_init(); 3069 } 3070 3071 /* 3072 * Check constraints on flags passed to the unshare system call. 3073 */ 3074 static int check_unshare_flags(unsigned long unshare_flags) 3075 { 3076 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| 3077 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| 3078 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET| 3079 CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP| 3080 CLONE_NEWTIME)) 3081 return -EINVAL; 3082 /* 3083 * Not implemented, but pretend it works if there is nothing 3084 * to unshare. Note that unsharing the address space or the 3085 * signal handlers also need to unshare the signal queues (aka 3086 * CLONE_THREAD). 3087 */ 3088 if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { 3089 if (!thread_group_empty(current)) 3090 return -EINVAL; 3091 } 3092 if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) { 3093 if (refcount_read(¤t->sighand->count) > 1) 3094 return -EINVAL; 3095 } 3096 if (unshare_flags & CLONE_VM) { 3097 if (!current_is_single_threaded()) 3098 return -EINVAL; 3099 } 3100 3101 return 0; 3102 } 3103 3104 /* 3105 * Unshare the filesystem structure if it is being shared 3106 */ 3107 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) 3108 { 3109 struct fs_struct *fs = current->fs; 3110 3111 if (!(unshare_flags & CLONE_FS) || !fs) 3112 return 0; 3113 3114 /* don't need lock here; in the worst case we'll do useless copy */ 3115 if (fs->users == 1) 3116 return 0; 3117 3118 *new_fsp = copy_fs_struct(fs); 3119 if (!*new_fsp) 3120 return -ENOMEM; 3121 3122 return 0; 3123 } 3124 3125 /* 3126 * Unshare file descriptor table if it is being shared 3127 */ 3128 int unshare_fd(unsigned long unshare_flags, unsigned int max_fds, 3129 struct files_struct **new_fdp) 3130 { 3131 struct files_struct *fd = current->files; 3132 int error = 0; 3133 3134 if ((unshare_flags & CLONE_FILES) && 3135 (fd && atomic_read(&fd->count) > 1)) { 3136 *new_fdp = dup_fd(fd, max_fds, &error); 3137 if (!*new_fdp) 3138 return error; 3139 } 3140 3141 return 0; 3142 } 3143 3144 /* 3145 * unshare allows a process to 'unshare' part of the process 3146 * context which was originally shared using clone. copy_* 3147 * functions used by kernel_clone() cannot be used here directly 3148 * because they modify an inactive task_struct that is being 3149 * constructed. Here we are modifying the current, active, 3150 * task_struct. 3151 */ 3152 int ksys_unshare(unsigned long unshare_flags) 3153 { 3154 struct fs_struct *fs, *new_fs = NULL; 3155 struct files_struct *new_fd = NULL; 3156 struct cred *new_cred = NULL; 3157 struct nsproxy *new_nsproxy = NULL; 3158 int do_sysvsem = 0; 3159 int err; 3160 3161 /* 3162 * If unsharing a user namespace must also unshare the thread group 3163 * and unshare the filesystem root and working directories. 3164 */ 3165 if (unshare_flags & CLONE_NEWUSER) 3166 unshare_flags |= CLONE_THREAD | CLONE_FS; 3167 /* 3168 * If unsharing vm, must also unshare signal handlers. 3169 */ 3170 if (unshare_flags & CLONE_VM) 3171 unshare_flags |= CLONE_SIGHAND; 3172 /* 3173 * If unsharing a signal handlers, must also unshare the signal queues. 3174 */ 3175 if (unshare_flags & CLONE_SIGHAND) 3176 unshare_flags |= CLONE_THREAD; 3177 /* 3178 * If unsharing namespace, must also unshare filesystem information. 3179 */ 3180 if (unshare_flags & CLONE_NEWNS) 3181 unshare_flags |= CLONE_FS; 3182 3183 err = check_unshare_flags(unshare_flags); 3184 if (err) 3185 goto bad_unshare_out; 3186 /* 3187 * CLONE_NEWIPC must also detach from the undolist: after switching 3188 * to a new ipc namespace, the semaphore arrays from the old 3189 * namespace are unreachable. 3190 */ 3191 if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) 3192 do_sysvsem = 1; 3193 err = unshare_fs(unshare_flags, &new_fs); 3194 if (err) 3195 goto bad_unshare_out; 3196 err = unshare_fd(unshare_flags, NR_OPEN_MAX, &new_fd); 3197 if (err) 3198 goto bad_unshare_cleanup_fs; 3199 err = unshare_userns(unshare_flags, &new_cred); 3200 if (err) 3201 goto bad_unshare_cleanup_fd; 3202 err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, 3203 new_cred, new_fs); 3204 if (err) 3205 goto bad_unshare_cleanup_cred; 3206 3207 if (new_cred) { 3208 err = set_cred_ucounts(new_cred); 3209 if (err) 3210 goto bad_unshare_cleanup_cred; 3211 } 3212 3213 if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) { 3214 if (do_sysvsem) { 3215 /* 3216 * CLONE_SYSVSEM is equivalent to sys_exit(). 3217 */ 3218 exit_sem(current); 3219 } 3220 if (unshare_flags & CLONE_NEWIPC) { 3221 /* Orphan segments in old ns (see sem above). */ 3222 exit_shm(current); 3223 shm_init_task(current); 3224 } 3225 3226 if (new_nsproxy) 3227 switch_task_namespaces(current, new_nsproxy); 3228 3229 task_lock(current); 3230 3231 if (new_fs) { 3232 fs = current->fs; 3233 spin_lock(&fs->lock); 3234 current->fs = new_fs; 3235 if (--fs->users) 3236 new_fs = NULL; 3237 else 3238 new_fs = fs; 3239 spin_unlock(&fs->lock); 3240 } 3241 3242 if (new_fd) 3243 swap(current->files, new_fd); 3244 3245 task_unlock(current); 3246 3247 if (new_cred) { 3248 /* Install the new user namespace */ 3249 commit_creds(new_cred); 3250 new_cred = NULL; 3251 } 3252 } 3253 3254 perf_event_namespaces(current); 3255 3256 bad_unshare_cleanup_cred: 3257 if (new_cred) 3258 put_cred(new_cred); 3259 bad_unshare_cleanup_fd: 3260 if (new_fd) 3261 put_files_struct(new_fd); 3262 3263 bad_unshare_cleanup_fs: 3264 if (new_fs) 3265 free_fs_struct(new_fs); 3266 3267 bad_unshare_out: 3268 return err; 3269 } 3270 3271 SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) 3272 { 3273 return ksys_unshare(unshare_flags); 3274 } 3275 3276 /* 3277 * Helper to unshare the files of the current task. 3278 * We don't want to expose copy_files internals to 3279 * the exec layer of the kernel. 3280 */ 3281 3282 int unshare_files(void) 3283 { 3284 struct task_struct *task = current; 3285 struct files_struct *old, *copy = NULL; 3286 int error; 3287 3288 error = unshare_fd(CLONE_FILES, NR_OPEN_MAX, ©); 3289 if (error || !copy) 3290 return error; 3291 3292 old = task->files; 3293 task_lock(task); 3294 task->files = copy; 3295 task_unlock(task); 3296 put_files_struct(old); 3297 return 0; 3298 } 3299 3300 int sysctl_max_threads(struct ctl_table *table, int write, 3301 void *buffer, size_t *lenp, loff_t *ppos) 3302 { 3303 struct ctl_table t; 3304 int ret; 3305 int threads = max_threads; 3306 int min = 1; 3307 int max = MAX_THREADS; 3308 3309 t = *table; 3310 t.data = &threads; 3311 t.extra1 = &min; 3312 t.extra2 = &max; 3313 3314 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 3315 if (ret || !write) 3316 return ret; 3317 3318 max_threads = threads; 3319 3320 return 0; 3321 } 3322