1 /* 2 * linux/kernel/fork.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 /* 8 * 'fork.c' contains the help-routines for the 'fork' system call 9 * (see also entry.S and others). 10 * Fork is rather simple, once you get the hang of it, but the memory 11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' 12 */ 13 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/unistd.h> 17 #include <linux/module.h> 18 #include <linux/vmalloc.h> 19 #include <linux/completion.h> 20 #include <linux/personality.h> 21 #include <linux/mempolicy.h> 22 #include <linux/sem.h> 23 #include <linux/file.h> 24 #include <linux/fdtable.h> 25 #include <linux/iocontext.h> 26 #include <linux/key.h> 27 #include <linux/binfmts.h> 28 #include <linux/mman.h> 29 #include <linux/mmu_notifier.h> 30 #include <linux/fs.h> 31 #include <linux/nsproxy.h> 32 #include <linux/capability.h> 33 #include <linux/cpu.h> 34 #include <linux/cgroup.h> 35 #include <linux/security.h> 36 #include <linux/hugetlb.h> 37 #include <linux/swap.h> 38 #include <linux/syscalls.h> 39 #include <linux/jiffies.h> 40 #include <linux/tracehook.h> 41 #include <linux/futex.h> 42 #include <linux/compat.h> 43 #include <linux/kthread.h> 44 #include <linux/task_io_accounting_ops.h> 45 #include <linux/rcupdate.h> 46 #include <linux/ptrace.h> 47 #include <linux/mount.h> 48 #include <linux/audit.h> 49 #include <linux/memcontrol.h> 50 #include <linux/ftrace.h> 51 #include <linux/profile.h> 52 #include <linux/rmap.h> 53 #include <linux/ksm.h> 54 #include <linux/acct.h> 55 #include <linux/tsacct_kern.h> 56 #include <linux/cn_proc.h> 57 #include <linux/freezer.h> 58 #include <linux/delayacct.h> 59 #include <linux/taskstats_kern.h> 60 #include <linux/random.h> 61 #include <linux/tty.h> 62 #include <linux/proc_fs.h> 63 #include <linux/blkdev.h> 64 #include <linux/fs_struct.h> 65 #include <linux/magic.h> 66 #include <linux/perf_event.h> 67 #include <linux/posix-timers.h> 68 #include <linux/user-return-notifier.h> 69 #include <linux/oom.h> 70 #include <linux/khugepaged.h> 71 72 #include <asm/pgtable.h> 73 #include <asm/pgalloc.h> 74 #include <asm/uaccess.h> 75 #include <asm/mmu_context.h> 76 #include <asm/cacheflush.h> 77 #include <asm/tlbflush.h> 78 79 #include <trace/events/sched.h> 80 81 /* 82 * Protected counters by write_lock_irq(&tasklist_lock) 83 */ 84 unsigned long total_forks; /* Handle normal Linux uptimes. */ 85 int nr_threads; /* The idle threads do not count.. */ 86 87 int max_threads; /* tunable limit on nr_threads */ 88 89 DEFINE_PER_CPU(unsigned long, process_counts) = 0; 90 91 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 92 93 #ifdef CONFIG_PROVE_RCU 94 int lockdep_tasklist_lock_is_held(void) 95 { 96 return lockdep_is_held(&tasklist_lock); 97 } 98 EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); 99 #endif /* #ifdef CONFIG_PROVE_RCU */ 100 101 int nr_processes(void) 102 { 103 int cpu; 104 int total = 0; 105 106 for_each_possible_cpu(cpu) 107 total += per_cpu(process_counts, cpu); 108 109 return total; 110 } 111 112 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 113 # define alloc_task_struct_node(node) \ 114 kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node) 115 # define free_task_struct(tsk) \ 116 kmem_cache_free(task_struct_cachep, (tsk)) 117 static struct kmem_cache *task_struct_cachep; 118 #endif 119 120 #ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR 121 static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, 122 int node) 123 { 124 #ifdef CONFIG_DEBUG_STACK_USAGE 125 gfp_t mask = GFP_KERNEL | __GFP_ZERO; 126 #else 127 gfp_t mask = GFP_KERNEL; 128 #endif 129 struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER); 130 131 return page ? page_address(page) : NULL; 132 } 133 134 static inline void free_thread_info(struct thread_info *ti) 135 { 136 free_pages((unsigned long)ti, THREAD_SIZE_ORDER); 137 } 138 #endif 139 140 /* SLAB cache for signal_struct structures (tsk->signal) */ 141 static struct kmem_cache *signal_cachep; 142 143 /* SLAB cache for sighand_struct structures (tsk->sighand) */ 144 struct kmem_cache *sighand_cachep; 145 146 /* SLAB cache for files_struct structures (tsk->files) */ 147 struct kmem_cache *files_cachep; 148 149 /* SLAB cache for fs_struct structures (tsk->fs) */ 150 struct kmem_cache *fs_cachep; 151 152 /* SLAB cache for vm_area_struct structures */ 153 struct kmem_cache *vm_area_cachep; 154 155 /* SLAB cache for mm_struct structures (tsk->mm) */ 156 static struct kmem_cache *mm_cachep; 157 158 static void account_kernel_stack(struct thread_info *ti, int account) 159 { 160 struct zone *zone = page_zone(virt_to_page(ti)); 161 162 mod_zone_page_state(zone, NR_KERNEL_STACK, account); 163 } 164 165 void free_task(struct task_struct *tsk) 166 { 167 prop_local_destroy_single(&tsk->dirties); 168 account_kernel_stack(tsk->stack, -1); 169 free_thread_info(tsk->stack); 170 rt_mutex_debug_task_free(tsk); 171 ftrace_graph_exit_task(tsk); 172 free_task_struct(tsk); 173 } 174 EXPORT_SYMBOL(free_task); 175 176 static inline void free_signal_struct(struct signal_struct *sig) 177 { 178 taskstats_tgid_free(sig); 179 sched_autogroup_exit(sig); 180 kmem_cache_free(signal_cachep, sig); 181 } 182 183 static inline void put_signal_struct(struct signal_struct *sig) 184 { 185 if (atomic_dec_and_test(&sig->sigcnt)) 186 free_signal_struct(sig); 187 } 188 189 void __put_task_struct(struct task_struct *tsk) 190 { 191 WARN_ON(!tsk->exit_state); 192 WARN_ON(atomic_read(&tsk->usage)); 193 WARN_ON(tsk == current); 194 195 exit_creds(tsk); 196 delayacct_tsk_free(tsk); 197 put_signal_struct(tsk->signal); 198 199 if (!profile_handoff_task(tsk)) 200 free_task(tsk); 201 } 202 EXPORT_SYMBOL_GPL(__put_task_struct); 203 204 /* 205 * macro override instead of weak attribute alias, to workaround 206 * gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions. 207 */ 208 #ifndef arch_task_cache_init 209 #define arch_task_cache_init() 210 #endif 211 212 void __init fork_init(unsigned long mempages) 213 { 214 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 215 #ifndef ARCH_MIN_TASKALIGN 216 #define ARCH_MIN_TASKALIGN L1_CACHE_BYTES 217 #endif 218 /* create a slab on which task_structs can be allocated */ 219 task_struct_cachep = 220 kmem_cache_create("task_struct", sizeof(struct task_struct), 221 ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL); 222 #endif 223 224 /* do the arch specific task caches init */ 225 arch_task_cache_init(); 226 227 /* 228 * The default maximum number of threads is set to a safe 229 * value: the thread structures can take up at most half 230 * of memory. 231 */ 232 max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE); 233 234 /* 235 * we need to allow at least 20 threads to boot a system 236 */ 237 if(max_threads < 20) 238 max_threads = 20; 239 240 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; 241 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; 242 init_task.signal->rlim[RLIMIT_SIGPENDING] = 243 init_task.signal->rlim[RLIMIT_NPROC]; 244 } 245 246 int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst, 247 struct task_struct *src) 248 { 249 *dst = *src; 250 return 0; 251 } 252 253 static struct task_struct *dup_task_struct(struct task_struct *orig) 254 { 255 struct task_struct *tsk; 256 struct thread_info *ti; 257 unsigned long *stackend; 258 int node = tsk_fork_get_node(orig); 259 int err; 260 261 prepare_to_copy(orig); 262 263 tsk = alloc_task_struct_node(node); 264 if (!tsk) 265 return NULL; 266 267 ti = alloc_thread_info_node(tsk, node); 268 if (!ti) { 269 free_task_struct(tsk); 270 return NULL; 271 } 272 273 err = arch_dup_task_struct(tsk, orig); 274 if (err) 275 goto out; 276 277 tsk->stack = ti; 278 279 err = prop_local_init_single(&tsk->dirties); 280 if (err) 281 goto out; 282 283 setup_thread_stack(tsk, orig); 284 clear_user_return_notifier(tsk); 285 clear_tsk_need_resched(tsk); 286 stackend = end_of_stack(tsk); 287 *stackend = STACK_END_MAGIC; /* for overflow detection */ 288 289 #ifdef CONFIG_CC_STACKPROTECTOR 290 tsk->stack_canary = get_random_int(); 291 #endif 292 293 /* One for us, one for whoever does the "release_task()" (usually parent) */ 294 atomic_set(&tsk->usage,2); 295 atomic_set(&tsk->fs_excl, 0); 296 #ifdef CONFIG_BLK_DEV_IO_TRACE 297 tsk->btrace_seq = 0; 298 #endif 299 tsk->splice_pipe = NULL; 300 301 account_kernel_stack(ti, 1); 302 303 return tsk; 304 305 out: 306 free_thread_info(ti); 307 free_task_struct(tsk); 308 return NULL; 309 } 310 311 #ifdef CONFIG_MMU 312 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 313 { 314 struct vm_area_struct *mpnt, *tmp, *prev, **pprev; 315 struct rb_node **rb_link, *rb_parent; 316 int retval; 317 unsigned long charge; 318 struct mempolicy *pol; 319 320 down_write(&oldmm->mmap_sem); 321 flush_cache_dup_mm(oldmm); 322 /* 323 * Not linked in yet - no deadlock potential: 324 */ 325 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); 326 327 mm->locked_vm = 0; 328 mm->mmap = NULL; 329 mm->mmap_cache = NULL; 330 mm->free_area_cache = oldmm->mmap_base; 331 mm->cached_hole_size = ~0UL; 332 mm->map_count = 0; 333 cpumask_clear(mm_cpumask(mm)); 334 mm->mm_rb = RB_ROOT; 335 rb_link = &mm->mm_rb.rb_node; 336 rb_parent = NULL; 337 pprev = &mm->mmap; 338 retval = ksm_fork(mm, oldmm); 339 if (retval) 340 goto out; 341 retval = khugepaged_fork(mm, oldmm); 342 if (retval) 343 goto out; 344 345 prev = NULL; 346 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { 347 struct file *file; 348 349 if (mpnt->vm_flags & VM_DONTCOPY) { 350 long pages = vma_pages(mpnt); 351 mm->total_vm -= pages; 352 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, 353 -pages); 354 continue; 355 } 356 charge = 0; 357 if (mpnt->vm_flags & VM_ACCOUNT) { 358 unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; 359 if (security_vm_enough_memory(len)) 360 goto fail_nomem; 361 charge = len; 362 } 363 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 364 if (!tmp) 365 goto fail_nomem; 366 *tmp = *mpnt; 367 INIT_LIST_HEAD(&tmp->anon_vma_chain); 368 pol = mpol_dup(vma_policy(mpnt)); 369 retval = PTR_ERR(pol); 370 if (IS_ERR(pol)) 371 goto fail_nomem_policy; 372 vma_set_policy(tmp, pol); 373 tmp->vm_mm = mm; 374 if (anon_vma_fork(tmp, mpnt)) 375 goto fail_nomem_anon_vma_fork; 376 tmp->vm_flags &= ~VM_LOCKED; 377 tmp->vm_next = tmp->vm_prev = NULL; 378 file = tmp->vm_file; 379 if (file) { 380 struct inode *inode = file->f_path.dentry->d_inode; 381 struct address_space *mapping = file->f_mapping; 382 383 get_file(file); 384 if (tmp->vm_flags & VM_DENYWRITE) 385 atomic_dec(&inode->i_writecount); 386 spin_lock(&mapping->i_mmap_lock); 387 if (tmp->vm_flags & VM_SHARED) 388 mapping->i_mmap_writable++; 389 tmp->vm_truncate_count = mpnt->vm_truncate_count; 390 flush_dcache_mmap_lock(mapping); 391 /* insert tmp into the share list, just after mpnt */ 392 vma_prio_tree_add(tmp, mpnt); 393 flush_dcache_mmap_unlock(mapping); 394 spin_unlock(&mapping->i_mmap_lock); 395 } 396 397 /* 398 * Clear hugetlb-related page reserves for children. This only 399 * affects MAP_PRIVATE mappings. Faults generated by the child 400 * are not guaranteed to succeed, even if read-only 401 */ 402 if (is_vm_hugetlb_page(tmp)) 403 reset_vma_resv_huge_pages(tmp); 404 405 /* 406 * Link in the new vma and copy the page table entries. 407 */ 408 *pprev = tmp; 409 pprev = &tmp->vm_next; 410 tmp->vm_prev = prev; 411 prev = tmp; 412 413 __vma_link_rb(mm, tmp, rb_link, rb_parent); 414 rb_link = &tmp->vm_rb.rb_right; 415 rb_parent = &tmp->vm_rb; 416 417 mm->map_count++; 418 retval = copy_page_range(mm, oldmm, mpnt); 419 420 if (tmp->vm_ops && tmp->vm_ops->open) 421 tmp->vm_ops->open(tmp); 422 423 if (retval) 424 goto out; 425 } 426 /* a new mm has just been created */ 427 arch_dup_mmap(oldmm, mm); 428 retval = 0; 429 out: 430 up_write(&mm->mmap_sem); 431 flush_tlb_mm(oldmm); 432 up_write(&oldmm->mmap_sem); 433 return retval; 434 fail_nomem_anon_vma_fork: 435 mpol_put(pol); 436 fail_nomem_policy: 437 kmem_cache_free(vm_area_cachep, tmp); 438 fail_nomem: 439 retval = -ENOMEM; 440 vm_unacct_memory(charge); 441 goto out; 442 } 443 444 static inline int mm_alloc_pgd(struct mm_struct * mm) 445 { 446 mm->pgd = pgd_alloc(mm); 447 if (unlikely(!mm->pgd)) 448 return -ENOMEM; 449 return 0; 450 } 451 452 static inline void mm_free_pgd(struct mm_struct * mm) 453 { 454 pgd_free(mm, mm->pgd); 455 } 456 #else 457 #define dup_mmap(mm, oldmm) (0) 458 #define mm_alloc_pgd(mm) (0) 459 #define mm_free_pgd(mm) 460 #endif /* CONFIG_MMU */ 461 462 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); 463 464 #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) 465 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) 466 467 static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; 468 469 static int __init coredump_filter_setup(char *s) 470 { 471 default_dump_filter = 472 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & 473 MMF_DUMP_FILTER_MASK; 474 return 1; 475 } 476 477 __setup("coredump_filter=", coredump_filter_setup); 478 479 #include <linux/init_task.h> 480 481 static void mm_init_aio(struct mm_struct *mm) 482 { 483 #ifdef CONFIG_AIO 484 spin_lock_init(&mm->ioctx_lock); 485 INIT_HLIST_HEAD(&mm->ioctx_list); 486 #endif 487 } 488 489 static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) 490 { 491 atomic_set(&mm->mm_users, 1); 492 atomic_set(&mm->mm_count, 1); 493 init_rwsem(&mm->mmap_sem); 494 INIT_LIST_HEAD(&mm->mmlist); 495 mm->flags = (current->mm) ? 496 (current->mm->flags & MMF_INIT_MASK) : default_dump_filter; 497 mm->core_state = NULL; 498 mm->nr_ptes = 0; 499 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); 500 spin_lock_init(&mm->page_table_lock); 501 mm->free_area_cache = TASK_UNMAPPED_BASE; 502 mm->cached_hole_size = ~0UL; 503 mm_init_aio(mm); 504 mm_init_owner(mm, p); 505 atomic_set(&mm->oom_disable_count, 0); 506 507 if (likely(!mm_alloc_pgd(mm))) { 508 mm->def_flags = 0; 509 mmu_notifier_mm_init(mm); 510 return mm; 511 } 512 513 free_mm(mm); 514 return NULL; 515 } 516 517 /* 518 * Allocate and initialize an mm_struct. 519 */ 520 struct mm_struct * mm_alloc(void) 521 { 522 struct mm_struct * mm; 523 524 mm = allocate_mm(); 525 if (mm) { 526 memset(mm, 0, sizeof(*mm)); 527 mm = mm_init(mm, current); 528 } 529 return mm; 530 } 531 532 /* 533 * Called when the last reference to the mm 534 * is dropped: either by a lazy thread or by 535 * mmput. Free the page directory and the mm. 536 */ 537 void __mmdrop(struct mm_struct *mm) 538 { 539 BUG_ON(mm == &init_mm); 540 mm_free_pgd(mm); 541 destroy_context(mm); 542 mmu_notifier_mm_destroy(mm); 543 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 544 VM_BUG_ON(mm->pmd_huge_pte); 545 #endif 546 free_mm(mm); 547 } 548 EXPORT_SYMBOL_GPL(__mmdrop); 549 550 /* 551 * Decrement the use count and release all resources for an mm. 552 */ 553 void mmput(struct mm_struct *mm) 554 { 555 might_sleep(); 556 557 if (atomic_dec_and_test(&mm->mm_users)) { 558 exit_aio(mm); 559 ksm_exit(mm); 560 khugepaged_exit(mm); /* must run before exit_mmap */ 561 exit_mmap(mm); 562 set_mm_exe_file(mm, NULL); 563 if (!list_empty(&mm->mmlist)) { 564 spin_lock(&mmlist_lock); 565 list_del(&mm->mmlist); 566 spin_unlock(&mmlist_lock); 567 } 568 put_swap_token(mm); 569 if (mm->binfmt) 570 module_put(mm->binfmt->module); 571 mmdrop(mm); 572 } 573 } 574 EXPORT_SYMBOL_GPL(mmput); 575 576 /** 577 * get_task_mm - acquire a reference to the task's mm 578 * 579 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning 580 * this kernel workthread has transiently adopted a user mm with use_mm, 581 * to do its AIO) is not set and if so returns a reference to it, after 582 * bumping up the use count. User must release the mm via mmput() 583 * after use. Typically used by /proc and ptrace. 584 */ 585 struct mm_struct *get_task_mm(struct task_struct *task) 586 { 587 struct mm_struct *mm; 588 589 task_lock(task); 590 mm = task->mm; 591 if (mm) { 592 if (task->flags & PF_KTHREAD) 593 mm = NULL; 594 else 595 atomic_inc(&mm->mm_users); 596 } 597 task_unlock(task); 598 return mm; 599 } 600 EXPORT_SYMBOL_GPL(get_task_mm); 601 602 /* Please note the differences between mmput and mm_release. 603 * mmput is called whenever we stop holding onto a mm_struct, 604 * error success whatever. 605 * 606 * mm_release is called after a mm_struct has been removed 607 * from the current process. 608 * 609 * This difference is important for error handling, when we 610 * only half set up a mm_struct for a new process and need to restore 611 * the old one. Because we mmput the new mm_struct before 612 * restoring the old one. . . 613 * Eric Biederman 10 January 1998 614 */ 615 void mm_release(struct task_struct *tsk, struct mm_struct *mm) 616 { 617 struct completion *vfork_done = tsk->vfork_done; 618 619 /* Get rid of any futexes when releasing the mm */ 620 #ifdef CONFIG_FUTEX 621 if (unlikely(tsk->robust_list)) { 622 exit_robust_list(tsk); 623 tsk->robust_list = NULL; 624 } 625 #ifdef CONFIG_COMPAT 626 if (unlikely(tsk->compat_robust_list)) { 627 compat_exit_robust_list(tsk); 628 tsk->compat_robust_list = NULL; 629 } 630 #endif 631 if (unlikely(!list_empty(&tsk->pi_state_list))) 632 exit_pi_state_list(tsk); 633 #endif 634 635 /* Get rid of any cached register state */ 636 deactivate_mm(tsk, mm); 637 638 /* notify parent sleeping on vfork() */ 639 if (vfork_done) { 640 tsk->vfork_done = NULL; 641 complete(vfork_done); 642 } 643 644 /* 645 * If we're exiting normally, clear a user-space tid field if 646 * requested. We leave this alone when dying by signal, to leave 647 * the value intact in a core dump, and to save the unnecessary 648 * trouble otherwise. Userland only wants this done for a sys_exit. 649 */ 650 if (tsk->clear_child_tid) { 651 if (!(tsk->flags & PF_SIGNALED) && 652 atomic_read(&mm->mm_users) > 1) { 653 /* 654 * We don't check the error code - if userspace has 655 * not set up a proper pointer then tough luck. 656 */ 657 put_user(0, tsk->clear_child_tid); 658 sys_futex(tsk->clear_child_tid, FUTEX_WAKE, 659 1, NULL, NULL, 0); 660 } 661 tsk->clear_child_tid = NULL; 662 } 663 } 664 665 /* 666 * Allocate a new mm structure and copy contents from the 667 * mm structure of the passed in task structure. 668 */ 669 struct mm_struct *dup_mm(struct task_struct *tsk) 670 { 671 struct mm_struct *mm, *oldmm = current->mm; 672 int err; 673 674 if (!oldmm) 675 return NULL; 676 677 mm = allocate_mm(); 678 if (!mm) 679 goto fail_nomem; 680 681 memcpy(mm, oldmm, sizeof(*mm)); 682 683 /* Initializing for Swap token stuff */ 684 mm->token_priority = 0; 685 mm->last_interval = 0; 686 687 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 688 mm->pmd_huge_pte = NULL; 689 #endif 690 691 if (!mm_init(mm, tsk)) 692 goto fail_nomem; 693 694 if (init_new_context(tsk, mm)) 695 goto fail_nocontext; 696 697 dup_mm_exe_file(oldmm, mm); 698 699 err = dup_mmap(mm, oldmm); 700 if (err) 701 goto free_pt; 702 703 mm->hiwater_rss = get_mm_rss(mm); 704 mm->hiwater_vm = mm->total_vm; 705 706 if (mm->binfmt && !try_module_get(mm->binfmt->module)) 707 goto free_pt; 708 709 return mm; 710 711 free_pt: 712 /* don't put binfmt in mmput, we haven't got module yet */ 713 mm->binfmt = NULL; 714 mmput(mm); 715 716 fail_nomem: 717 return NULL; 718 719 fail_nocontext: 720 /* 721 * If init_new_context() failed, we cannot use mmput() to free the mm 722 * because it calls destroy_context() 723 */ 724 mm_free_pgd(mm); 725 free_mm(mm); 726 return NULL; 727 } 728 729 static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) 730 { 731 struct mm_struct * mm, *oldmm; 732 int retval; 733 734 tsk->min_flt = tsk->maj_flt = 0; 735 tsk->nvcsw = tsk->nivcsw = 0; 736 #ifdef CONFIG_DETECT_HUNG_TASK 737 tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; 738 #endif 739 740 tsk->mm = NULL; 741 tsk->active_mm = NULL; 742 743 /* 744 * Are we cloning a kernel thread? 745 * 746 * We need to steal a active VM for that.. 747 */ 748 oldmm = current->mm; 749 if (!oldmm) 750 return 0; 751 752 if (clone_flags & CLONE_VM) { 753 atomic_inc(&oldmm->mm_users); 754 mm = oldmm; 755 goto good_mm; 756 } 757 758 retval = -ENOMEM; 759 mm = dup_mm(tsk); 760 if (!mm) 761 goto fail_nomem; 762 763 good_mm: 764 /* Initializing for Swap token stuff */ 765 mm->token_priority = 0; 766 mm->last_interval = 0; 767 if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) 768 atomic_inc(&mm->oom_disable_count); 769 770 tsk->mm = mm; 771 tsk->active_mm = mm; 772 return 0; 773 774 fail_nomem: 775 return retval; 776 } 777 778 static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) 779 { 780 struct fs_struct *fs = current->fs; 781 if (clone_flags & CLONE_FS) { 782 /* tsk->fs is already what we want */ 783 spin_lock(&fs->lock); 784 if (fs->in_exec) { 785 spin_unlock(&fs->lock); 786 return -EAGAIN; 787 } 788 fs->users++; 789 spin_unlock(&fs->lock); 790 return 0; 791 } 792 tsk->fs = copy_fs_struct(fs); 793 if (!tsk->fs) 794 return -ENOMEM; 795 return 0; 796 } 797 798 static int copy_files(unsigned long clone_flags, struct task_struct * tsk) 799 { 800 struct files_struct *oldf, *newf; 801 int error = 0; 802 803 /* 804 * A background process may not have any files ... 805 */ 806 oldf = current->files; 807 if (!oldf) 808 goto out; 809 810 if (clone_flags & CLONE_FILES) { 811 atomic_inc(&oldf->count); 812 goto out; 813 } 814 815 newf = dup_fd(oldf, &error); 816 if (!newf) 817 goto out; 818 819 tsk->files = newf; 820 error = 0; 821 out: 822 return error; 823 } 824 825 static int copy_io(unsigned long clone_flags, struct task_struct *tsk) 826 { 827 #ifdef CONFIG_BLOCK 828 struct io_context *ioc = current->io_context; 829 830 if (!ioc) 831 return 0; 832 /* 833 * Share io context with parent, if CLONE_IO is set 834 */ 835 if (clone_flags & CLONE_IO) { 836 tsk->io_context = ioc_task_link(ioc); 837 if (unlikely(!tsk->io_context)) 838 return -ENOMEM; 839 } else if (ioprio_valid(ioc->ioprio)) { 840 tsk->io_context = alloc_io_context(GFP_KERNEL, -1); 841 if (unlikely(!tsk->io_context)) 842 return -ENOMEM; 843 844 tsk->io_context->ioprio = ioc->ioprio; 845 } 846 #endif 847 return 0; 848 } 849 850 static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) 851 { 852 struct sighand_struct *sig; 853 854 if (clone_flags & CLONE_SIGHAND) { 855 atomic_inc(¤t->sighand->count); 856 return 0; 857 } 858 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); 859 rcu_assign_pointer(tsk->sighand, sig); 860 if (!sig) 861 return -ENOMEM; 862 atomic_set(&sig->count, 1); 863 memcpy(sig->action, current->sighand->action, sizeof(sig->action)); 864 return 0; 865 } 866 867 void __cleanup_sighand(struct sighand_struct *sighand) 868 { 869 if (atomic_dec_and_test(&sighand->count)) 870 kmem_cache_free(sighand_cachep, sighand); 871 } 872 873 874 /* 875 * Initialize POSIX timer handling for a thread group. 876 */ 877 static void posix_cpu_timers_init_group(struct signal_struct *sig) 878 { 879 unsigned long cpu_limit; 880 881 /* Thread group counters. */ 882 thread_group_cputime_init(sig); 883 884 cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); 885 if (cpu_limit != RLIM_INFINITY) { 886 sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit); 887 sig->cputimer.running = 1; 888 } 889 890 /* The timer lists. */ 891 INIT_LIST_HEAD(&sig->cpu_timers[0]); 892 INIT_LIST_HEAD(&sig->cpu_timers[1]); 893 INIT_LIST_HEAD(&sig->cpu_timers[2]); 894 } 895 896 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) 897 { 898 struct signal_struct *sig; 899 900 if (clone_flags & CLONE_THREAD) 901 return 0; 902 903 sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL); 904 tsk->signal = sig; 905 if (!sig) 906 return -ENOMEM; 907 908 sig->nr_threads = 1; 909 atomic_set(&sig->live, 1); 910 atomic_set(&sig->sigcnt, 1); 911 init_waitqueue_head(&sig->wait_chldexit); 912 if (clone_flags & CLONE_NEWPID) 913 sig->flags |= SIGNAL_UNKILLABLE; 914 sig->curr_target = tsk; 915 init_sigpending(&sig->shared_pending); 916 INIT_LIST_HEAD(&sig->posix_timers); 917 918 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 919 sig->real_timer.function = it_real_fn; 920 921 task_lock(current->group_leader); 922 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 923 task_unlock(current->group_leader); 924 925 posix_cpu_timers_init_group(sig); 926 927 tty_audit_fork(sig); 928 sched_autogroup_fork(sig); 929 930 sig->oom_adj = current->signal->oom_adj; 931 sig->oom_score_adj = current->signal->oom_score_adj; 932 sig->oom_score_adj_min = current->signal->oom_score_adj_min; 933 934 mutex_init(&sig->cred_guard_mutex); 935 936 return 0; 937 } 938 939 static void copy_flags(unsigned long clone_flags, struct task_struct *p) 940 { 941 unsigned long new_flags = p->flags; 942 943 new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER); 944 new_flags |= PF_FORKNOEXEC; 945 new_flags |= PF_STARTING; 946 p->flags = new_flags; 947 clear_freeze_flag(p); 948 } 949 950 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) 951 { 952 current->clear_child_tid = tidptr; 953 954 return task_pid_vnr(current); 955 } 956 957 static void rt_mutex_init_task(struct task_struct *p) 958 { 959 raw_spin_lock_init(&p->pi_lock); 960 #ifdef CONFIG_RT_MUTEXES 961 plist_head_init_raw(&p->pi_waiters, &p->pi_lock); 962 p->pi_blocked_on = NULL; 963 #endif 964 } 965 966 #ifdef CONFIG_MM_OWNER 967 void mm_init_owner(struct mm_struct *mm, struct task_struct *p) 968 { 969 mm->owner = p; 970 } 971 #endif /* CONFIG_MM_OWNER */ 972 973 /* 974 * Initialize POSIX timer handling for a single task. 975 */ 976 static void posix_cpu_timers_init(struct task_struct *tsk) 977 { 978 tsk->cputime_expires.prof_exp = cputime_zero; 979 tsk->cputime_expires.virt_exp = cputime_zero; 980 tsk->cputime_expires.sched_exp = 0; 981 INIT_LIST_HEAD(&tsk->cpu_timers[0]); 982 INIT_LIST_HEAD(&tsk->cpu_timers[1]); 983 INIT_LIST_HEAD(&tsk->cpu_timers[2]); 984 } 985 986 /* 987 * This creates a new process as a copy of the old one, 988 * but does not actually start it yet. 989 * 990 * It copies the registers, and all the appropriate 991 * parts of the process environment (as per the clone 992 * flags). The actual kick-off is left to the caller. 993 */ 994 static struct task_struct *copy_process(unsigned long clone_flags, 995 unsigned long stack_start, 996 struct pt_regs *regs, 997 unsigned long stack_size, 998 int __user *child_tidptr, 999 struct pid *pid, 1000 int trace) 1001 { 1002 int retval; 1003 struct task_struct *p; 1004 int cgroup_callbacks_done = 0; 1005 1006 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) 1007 return ERR_PTR(-EINVAL); 1008 1009 /* 1010 * Thread groups must share signals as well, and detached threads 1011 * can only be started up within the thread group. 1012 */ 1013 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) 1014 return ERR_PTR(-EINVAL); 1015 1016 /* 1017 * Shared signal handlers imply shared VM. By way of the above, 1018 * thread groups also imply shared VM. Blocking this case allows 1019 * for various simplifications in other code. 1020 */ 1021 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) 1022 return ERR_PTR(-EINVAL); 1023 1024 /* 1025 * Siblings of global init remain as zombies on exit since they are 1026 * not reaped by their parent (swapper). To solve this and to avoid 1027 * multi-rooted process trees, prevent global and container-inits 1028 * from creating siblings. 1029 */ 1030 if ((clone_flags & CLONE_PARENT) && 1031 current->signal->flags & SIGNAL_UNKILLABLE) 1032 return ERR_PTR(-EINVAL); 1033 1034 retval = security_task_create(clone_flags); 1035 if (retval) 1036 goto fork_out; 1037 1038 retval = -ENOMEM; 1039 p = dup_task_struct(current); 1040 if (!p) 1041 goto fork_out; 1042 1043 ftrace_graph_init_task(p); 1044 1045 rt_mutex_init_task(p); 1046 1047 #ifdef CONFIG_PROVE_LOCKING 1048 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); 1049 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 1050 #endif 1051 retval = -EAGAIN; 1052 if (atomic_read(&p->real_cred->user->processes) >= 1053 task_rlimit(p, RLIMIT_NPROC)) { 1054 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && 1055 p->real_cred->user != INIT_USER) 1056 goto bad_fork_free; 1057 } 1058 1059 retval = copy_creds(p, clone_flags); 1060 if (retval < 0) 1061 goto bad_fork_free; 1062 1063 /* 1064 * If multiple threads are within copy_process(), then this check 1065 * triggers too late. This doesn't hurt, the check is only there 1066 * to stop root fork bombs. 1067 */ 1068 retval = -EAGAIN; 1069 if (nr_threads >= max_threads) 1070 goto bad_fork_cleanup_count; 1071 1072 if (!try_module_get(task_thread_info(p)->exec_domain->module)) 1073 goto bad_fork_cleanup_count; 1074 1075 p->did_exec = 0; 1076 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ 1077 copy_flags(clone_flags, p); 1078 INIT_LIST_HEAD(&p->children); 1079 INIT_LIST_HEAD(&p->sibling); 1080 rcu_copy_process(p); 1081 p->vfork_done = NULL; 1082 spin_lock_init(&p->alloc_lock); 1083 1084 init_sigpending(&p->pending); 1085 1086 p->utime = cputime_zero; 1087 p->stime = cputime_zero; 1088 p->gtime = cputime_zero; 1089 p->utimescaled = cputime_zero; 1090 p->stimescaled = cputime_zero; 1091 #ifndef CONFIG_VIRT_CPU_ACCOUNTING 1092 p->prev_utime = cputime_zero; 1093 p->prev_stime = cputime_zero; 1094 #endif 1095 #if defined(SPLIT_RSS_COUNTING) 1096 memset(&p->rss_stat, 0, sizeof(p->rss_stat)); 1097 #endif 1098 1099 p->default_timer_slack_ns = current->timer_slack_ns; 1100 1101 task_io_accounting_init(&p->ioac); 1102 acct_clear_integrals(p); 1103 1104 posix_cpu_timers_init(p); 1105 1106 p->lock_depth = -1; /* -1 = no lock */ 1107 do_posix_clock_monotonic_gettime(&p->start_time); 1108 p->real_start_time = p->start_time; 1109 monotonic_to_bootbased(&p->real_start_time); 1110 p->io_context = NULL; 1111 p->audit_context = NULL; 1112 cgroup_fork(p); 1113 #ifdef CONFIG_NUMA 1114 p->mempolicy = mpol_dup(p->mempolicy); 1115 if (IS_ERR(p->mempolicy)) { 1116 retval = PTR_ERR(p->mempolicy); 1117 p->mempolicy = NULL; 1118 goto bad_fork_cleanup_cgroup; 1119 } 1120 mpol_fix_fork_child_flag(p); 1121 #endif 1122 #ifdef CONFIG_TRACE_IRQFLAGS 1123 p->irq_events = 0; 1124 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 1125 p->hardirqs_enabled = 1; 1126 #else 1127 p->hardirqs_enabled = 0; 1128 #endif 1129 p->hardirq_enable_ip = 0; 1130 p->hardirq_enable_event = 0; 1131 p->hardirq_disable_ip = _THIS_IP_; 1132 p->hardirq_disable_event = 0; 1133 p->softirqs_enabled = 1; 1134 p->softirq_enable_ip = _THIS_IP_; 1135 p->softirq_enable_event = 0; 1136 p->softirq_disable_ip = 0; 1137 p->softirq_disable_event = 0; 1138 p->hardirq_context = 0; 1139 p->softirq_context = 0; 1140 #endif 1141 #ifdef CONFIG_LOCKDEP 1142 p->lockdep_depth = 0; /* no locks held yet */ 1143 p->curr_chain_key = 0; 1144 p->lockdep_recursion = 0; 1145 #endif 1146 1147 #ifdef CONFIG_DEBUG_MUTEXES 1148 p->blocked_on = NULL; /* not blocked yet */ 1149 #endif 1150 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 1151 p->memcg_batch.do_batch = 0; 1152 p->memcg_batch.memcg = NULL; 1153 #endif 1154 1155 /* Perform scheduler related setup. Assign this task to a CPU. */ 1156 sched_fork(p, clone_flags); 1157 1158 retval = perf_event_init_task(p); 1159 if (retval) 1160 goto bad_fork_cleanup_policy; 1161 1162 if ((retval = audit_alloc(p))) 1163 goto bad_fork_cleanup_policy; 1164 /* copy all the process information */ 1165 if ((retval = copy_semundo(clone_flags, p))) 1166 goto bad_fork_cleanup_audit; 1167 if ((retval = copy_files(clone_flags, p))) 1168 goto bad_fork_cleanup_semundo; 1169 if ((retval = copy_fs(clone_flags, p))) 1170 goto bad_fork_cleanup_files; 1171 if ((retval = copy_sighand(clone_flags, p))) 1172 goto bad_fork_cleanup_fs; 1173 if ((retval = copy_signal(clone_flags, p))) 1174 goto bad_fork_cleanup_sighand; 1175 if ((retval = copy_mm(clone_flags, p))) 1176 goto bad_fork_cleanup_signal; 1177 if ((retval = copy_namespaces(clone_flags, p))) 1178 goto bad_fork_cleanup_mm; 1179 if ((retval = copy_io(clone_flags, p))) 1180 goto bad_fork_cleanup_namespaces; 1181 retval = copy_thread(clone_flags, stack_start, stack_size, p, regs); 1182 if (retval) 1183 goto bad_fork_cleanup_io; 1184 1185 if (pid != &init_struct_pid) { 1186 retval = -ENOMEM; 1187 pid = alloc_pid(p->nsproxy->pid_ns); 1188 if (!pid) 1189 goto bad_fork_cleanup_io; 1190 } 1191 1192 p->pid = pid_nr(pid); 1193 p->tgid = p->pid; 1194 if (clone_flags & CLONE_THREAD) 1195 p->tgid = current->tgid; 1196 1197 if (current->nsproxy != p->nsproxy) { 1198 retval = ns_cgroup_clone(p, pid); 1199 if (retval) 1200 goto bad_fork_free_pid; 1201 } 1202 1203 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; 1204 /* 1205 * Clear TID on mm_release()? 1206 */ 1207 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; 1208 #ifdef CONFIG_BLOCK 1209 p->plug = NULL; 1210 #endif 1211 #ifdef CONFIG_FUTEX 1212 p->robust_list = NULL; 1213 #ifdef CONFIG_COMPAT 1214 p->compat_robust_list = NULL; 1215 #endif 1216 INIT_LIST_HEAD(&p->pi_state_list); 1217 p->pi_state_cache = NULL; 1218 #endif 1219 /* 1220 * sigaltstack should be cleared when sharing the same VM 1221 */ 1222 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) 1223 p->sas_ss_sp = p->sas_ss_size = 0; 1224 1225 /* 1226 * Syscall tracing and stepping should be turned off in the 1227 * child regardless of CLONE_PTRACE. 1228 */ 1229 user_disable_single_step(p); 1230 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); 1231 #ifdef TIF_SYSCALL_EMU 1232 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); 1233 #endif 1234 clear_all_latency_tracing(p); 1235 1236 /* ok, now we should be set up.. */ 1237 p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); 1238 p->pdeath_signal = 0; 1239 p->exit_state = 0; 1240 1241 /* 1242 * Ok, make it visible to the rest of the system. 1243 * We dont wake it up yet. 1244 */ 1245 p->group_leader = p; 1246 INIT_LIST_HEAD(&p->thread_group); 1247 1248 /* Now that the task is set up, run cgroup callbacks if 1249 * necessary. We need to run them before the task is visible 1250 * on the tasklist. */ 1251 cgroup_fork_callbacks(p); 1252 cgroup_callbacks_done = 1; 1253 1254 /* Need tasklist lock for parent etc handling! */ 1255 write_lock_irq(&tasklist_lock); 1256 1257 /* CLONE_PARENT re-uses the old parent */ 1258 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { 1259 p->real_parent = current->real_parent; 1260 p->parent_exec_id = current->parent_exec_id; 1261 } else { 1262 p->real_parent = current; 1263 p->parent_exec_id = current->self_exec_id; 1264 } 1265 1266 spin_lock(¤t->sighand->siglock); 1267 1268 /* 1269 * Process group and session signals need to be delivered to just the 1270 * parent before the fork or both the parent and the child after the 1271 * fork. Restart if a signal comes in before we add the new process to 1272 * it's process group. 1273 * A fatal signal pending means that current will exit, so the new 1274 * thread can't slip out of an OOM kill (or normal SIGKILL). 1275 */ 1276 recalc_sigpending(); 1277 if (signal_pending(current)) { 1278 spin_unlock(¤t->sighand->siglock); 1279 write_unlock_irq(&tasklist_lock); 1280 retval = -ERESTARTNOINTR; 1281 goto bad_fork_free_pid; 1282 } 1283 1284 if (clone_flags & CLONE_THREAD) { 1285 current->signal->nr_threads++; 1286 atomic_inc(¤t->signal->live); 1287 atomic_inc(¤t->signal->sigcnt); 1288 p->group_leader = current->group_leader; 1289 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); 1290 } 1291 1292 if (likely(p->pid)) { 1293 tracehook_finish_clone(p, clone_flags, trace); 1294 1295 if (thread_group_leader(p)) { 1296 if (is_child_reaper(pid)) 1297 p->nsproxy->pid_ns->child_reaper = p; 1298 1299 p->signal->leader_pid = pid; 1300 p->signal->tty = tty_kref_get(current->signal->tty); 1301 attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); 1302 attach_pid(p, PIDTYPE_SID, task_session(current)); 1303 list_add_tail(&p->sibling, &p->real_parent->children); 1304 list_add_tail_rcu(&p->tasks, &init_task.tasks); 1305 __this_cpu_inc(process_counts); 1306 } 1307 attach_pid(p, PIDTYPE_PID, pid); 1308 nr_threads++; 1309 } 1310 1311 total_forks++; 1312 spin_unlock(¤t->sighand->siglock); 1313 write_unlock_irq(&tasklist_lock); 1314 proc_fork_connector(p); 1315 cgroup_post_fork(p); 1316 perf_event_fork(p); 1317 return p; 1318 1319 bad_fork_free_pid: 1320 if (pid != &init_struct_pid) 1321 free_pid(pid); 1322 bad_fork_cleanup_io: 1323 if (p->io_context) 1324 exit_io_context(p); 1325 bad_fork_cleanup_namespaces: 1326 exit_task_namespaces(p); 1327 bad_fork_cleanup_mm: 1328 if (p->mm) { 1329 task_lock(p); 1330 if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) 1331 atomic_dec(&p->mm->oom_disable_count); 1332 task_unlock(p); 1333 mmput(p->mm); 1334 } 1335 bad_fork_cleanup_signal: 1336 if (!(clone_flags & CLONE_THREAD)) 1337 free_signal_struct(p->signal); 1338 bad_fork_cleanup_sighand: 1339 __cleanup_sighand(p->sighand); 1340 bad_fork_cleanup_fs: 1341 exit_fs(p); /* blocking */ 1342 bad_fork_cleanup_files: 1343 exit_files(p); /* blocking */ 1344 bad_fork_cleanup_semundo: 1345 exit_sem(p); 1346 bad_fork_cleanup_audit: 1347 audit_free(p); 1348 bad_fork_cleanup_policy: 1349 perf_event_free_task(p); 1350 #ifdef CONFIG_NUMA 1351 mpol_put(p->mempolicy); 1352 bad_fork_cleanup_cgroup: 1353 #endif 1354 cgroup_exit(p, cgroup_callbacks_done); 1355 delayacct_tsk_free(p); 1356 module_put(task_thread_info(p)->exec_domain->module); 1357 bad_fork_cleanup_count: 1358 atomic_dec(&p->cred->user->processes); 1359 exit_creds(p); 1360 bad_fork_free: 1361 free_task(p); 1362 fork_out: 1363 return ERR_PTR(retval); 1364 } 1365 1366 noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs) 1367 { 1368 memset(regs, 0, sizeof(struct pt_regs)); 1369 return regs; 1370 } 1371 1372 static inline void init_idle_pids(struct pid_link *links) 1373 { 1374 enum pid_type type; 1375 1376 for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { 1377 INIT_HLIST_NODE(&links[type].node); /* not really needed */ 1378 links[type].pid = &init_struct_pid; 1379 } 1380 } 1381 1382 struct task_struct * __cpuinit fork_idle(int cpu) 1383 { 1384 struct task_struct *task; 1385 struct pt_regs regs; 1386 1387 task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, 1388 &init_struct_pid, 0); 1389 if (!IS_ERR(task)) { 1390 init_idle_pids(task->pids); 1391 init_idle(task, cpu); 1392 } 1393 1394 return task; 1395 } 1396 1397 /* 1398 * Ok, this is the main fork-routine. 1399 * 1400 * It copies the process, and if successful kick-starts 1401 * it and waits for it to finish using the VM if required. 1402 */ 1403 long do_fork(unsigned long clone_flags, 1404 unsigned long stack_start, 1405 struct pt_regs *regs, 1406 unsigned long stack_size, 1407 int __user *parent_tidptr, 1408 int __user *child_tidptr) 1409 { 1410 struct task_struct *p; 1411 int trace = 0; 1412 long nr; 1413 1414 /* 1415 * Do some preliminary argument and permissions checking before we 1416 * actually start allocating stuff 1417 */ 1418 if (clone_flags & CLONE_NEWUSER) { 1419 if (clone_flags & CLONE_THREAD) 1420 return -EINVAL; 1421 /* hopefully this check will go away when userns support is 1422 * complete 1423 */ 1424 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) || 1425 !capable(CAP_SETGID)) 1426 return -EPERM; 1427 } 1428 1429 /* 1430 * When called from kernel_thread, don't do user tracing stuff. 1431 */ 1432 if (likely(user_mode(regs))) 1433 trace = tracehook_prepare_clone(clone_flags); 1434 1435 p = copy_process(clone_flags, stack_start, regs, stack_size, 1436 child_tidptr, NULL, trace); 1437 /* 1438 * Do this prior waking up the new thread - the thread pointer 1439 * might get invalid after that point, if the thread exits quickly. 1440 */ 1441 if (!IS_ERR(p)) { 1442 struct completion vfork; 1443 1444 trace_sched_process_fork(current, p); 1445 1446 nr = task_pid_vnr(p); 1447 1448 if (clone_flags & CLONE_PARENT_SETTID) 1449 put_user(nr, parent_tidptr); 1450 1451 if (clone_flags & CLONE_VFORK) { 1452 p->vfork_done = &vfork; 1453 init_completion(&vfork); 1454 } 1455 1456 audit_finish_fork(p); 1457 tracehook_report_clone(regs, clone_flags, nr, p); 1458 1459 /* 1460 * We set PF_STARTING at creation in case tracing wants to 1461 * use this to distinguish a fully live task from one that 1462 * hasn't gotten to tracehook_report_clone() yet. Now we 1463 * clear it and set the child going. 1464 */ 1465 p->flags &= ~PF_STARTING; 1466 1467 wake_up_new_task(p, clone_flags); 1468 1469 tracehook_report_clone_complete(trace, regs, 1470 clone_flags, nr, p); 1471 1472 if (clone_flags & CLONE_VFORK) { 1473 freezer_do_not_count(); 1474 wait_for_completion(&vfork); 1475 freezer_count(); 1476 tracehook_report_vfork_done(p, nr); 1477 } 1478 } else { 1479 nr = PTR_ERR(p); 1480 } 1481 return nr; 1482 } 1483 1484 #ifndef ARCH_MIN_MMSTRUCT_ALIGN 1485 #define ARCH_MIN_MMSTRUCT_ALIGN 0 1486 #endif 1487 1488 static void sighand_ctor(void *data) 1489 { 1490 struct sighand_struct *sighand = data; 1491 1492 spin_lock_init(&sighand->siglock); 1493 init_waitqueue_head(&sighand->signalfd_wqh); 1494 } 1495 1496 void __init proc_caches_init(void) 1497 { 1498 sighand_cachep = kmem_cache_create("sighand_cache", 1499 sizeof(struct sighand_struct), 0, 1500 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU| 1501 SLAB_NOTRACK, sighand_ctor); 1502 signal_cachep = kmem_cache_create("signal_cache", 1503 sizeof(struct signal_struct), 0, 1504 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); 1505 files_cachep = kmem_cache_create("files_cache", 1506 sizeof(struct files_struct), 0, 1507 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); 1508 fs_cachep = kmem_cache_create("fs_cache", 1509 sizeof(struct fs_struct), 0, 1510 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); 1511 mm_cachep = kmem_cache_create("mm_struct", 1512 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, 1513 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); 1514 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); 1515 mmap_init(); 1516 } 1517 1518 /* 1519 * Check constraints on flags passed to the unshare system call. 1520 */ 1521 static int check_unshare_flags(unsigned long unshare_flags) 1522 { 1523 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| 1524 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| 1525 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET)) 1526 return -EINVAL; 1527 /* 1528 * Not implemented, but pretend it works if there is nothing to 1529 * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND 1530 * needs to unshare vm. 1531 */ 1532 if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { 1533 /* FIXME: get_task_mm() increments ->mm_users */ 1534 if (atomic_read(¤t->mm->mm_users) > 1) 1535 return -EINVAL; 1536 } 1537 1538 return 0; 1539 } 1540 1541 /* 1542 * Unshare the filesystem structure if it is being shared 1543 */ 1544 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) 1545 { 1546 struct fs_struct *fs = current->fs; 1547 1548 if (!(unshare_flags & CLONE_FS) || !fs) 1549 return 0; 1550 1551 /* don't need lock here; in the worst case we'll do useless copy */ 1552 if (fs->users == 1) 1553 return 0; 1554 1555 *new_fsp = copy_fs_struct(fs); 1556 if (!*new_fsp) 1557 return -ENOMEM; 1558 1559 return 0; 1560 } 1561 1562 /* 1563 * Unshare file descriptor table if it is being shared 1564 */ 1565 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) 1566 { 1567 struct files_struct *fd = current->files; 1568 int error = 0; 1569 1570 if ((unshare_flags & CLONE_FILES) && 1571 (fd && atomic_read(&fd->count) > 1)) { 1572 *new_fdp = dup_fd(fd, &error); 1573 if (!*new_fdp) 1574 return error; 1575 } 1576 1577 return 0; 1578 } 1579 1580 /* 1581 * unshare allows a process to 'unshare' part of the process 1582 * context which was originally shared using clone. copy_* 1583 * functions used by do_fork() cannot be used here directly 1584 * because they modify an inactive task_struct that is being 1585 * constructed. Here we are modifying the current, active, 1586 * task_struct. 1587 */ 1588 SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) 1589 { 1590 struct fs_struct *fs, *new_fs = NULL; 1591 struct files_struct *fd, *new_fd = NULL; 1592 struct nsproxy *new_nsproxy = NULL; 1593 int do_sysvsem = 0; 1594 int err; 1595 1596 err = check_unshare_flags(unshare_flags); 1597 if (err) 1598 goto bad_unshare_out; 1599 1600 /* 1601 * If unsharing namespace, must also unshare filesystem information. 1602 */ 1603 if (unshare_flags & CLONE_NEWNS) 1604 unshare_flags |= CLONE_FS; 1605 /* 1606 * CLONE_NEWIPC must also detach from the undolist: after switching 1607 * to a new ipc namespace, the semaphore arrays from the old 1608 * namespace are unreachable. 1609 */ 1610 if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) 1611 do_sysvsem = 1; 1612 if ((err = unshare_fs(unshare_flags, &new_fs))) 1613 goto bad_unshare_out; 1614 if ((err = unshare_fd(unshare_flags, &new_fd))) 1615 goto bad_unshare_cleanup_fs; 1616 if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, 1617 new_fs))) 1618 goto bad_unshare_cleanup_fd; 1619 1620 if (new_fs || new_fd || do_sysvsem || new_nsproxy) { 1621 if (do_sysvsem) { 1622 /* 1623 * CLONE_SYSVSEM is equivalent to sys_exit(). 1624 */ 1625 exit_sem(current); 1626 } 1627 1628 if (new_nsproxy) { 1629 switch_task_namespaces(current, new_nsproxy); 1630 new_nsproxy = NULL; 1631 } 1632 1633 task_lock(current); 1634 1635 if (new_fs) { 1636 fs = current->fs; 1637 spin_lock(&fs->lock); 1638 current->fs = new_fs; 1639 if (--fs->users) 1640 new_fs = NULL; 1641 else 1642 new_fs = fs; 1643 spin_unlock(&fs->lock); 1644 } 1645 1646 if (new_fd) { 1647 fd = current->files; 1648 current->files = new_fd; 1649 new_fd = fd; 1650 } 1651 1652 task_unlock(current); 1653 } 1654 1655 if (new_nsproxy) 1656 put_nsproxy(new_nsproxy); 1657 1658 bad_unshare_cleanup_fd: 1659 if (new_fd) 1660 put_files_struct(new_fd); 1661 1662 bad_unshare_cleanup_fs: 1663 if (new_fs) 1664 free_fs_struct(new_fs); 1665 1666 bad_unshare_out: 1667 return err; 1668 } 1669 1670 /* 1671 * Helper to unshare the files of the current task. 1672 * We don't want to expose copy_files internals to 1673 * the exec layer of the kernel. 1674 */ 1675 1676 int unshare_files(struct files_struct **displaced) 1677 { 1678 struct task_struct *task = current; 1679 struct files_struct *copy = NULL; 1680 int error; 1681 1682 error = unshare_fd(CLONE_FILES, ©); 1683 if (error || !copy) { 1684 *displaced = NULL; 1685 return error; 1686 } 1687 *displaced = task->files; 1688 task_lock(task); 1689 task->files = copy; 1690 task_unlock(task); 1691 return 0; 1692 } 1693