1 /* 2 * linux/kernel/fork.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 /* 8 * 'fork.c' contains the help-routines for the 'fork' system call 9 * (see also entry.S and others). 10 * Fork is rather simple, once you get the hang of it, but the memory 11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' 12 */ 13 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/unistd.h> 17 #include <linux/module.h> 18 #include <linux/vmalloc.h> 19 #include <linux/completion.h> 20 #include <linux/mnt_namespace.h> 21 #include <linux/personality.h> 22 #include <linux/mempolicy.h> 23 #include <linux/sem.h> 24 #include <linux/file.h> 25 #include <linux/fdtable.h> 26 #include <linux/iocontext.h> 27 #include <linux/key.h> 28 #include <linux/binfmts.h> 29 #include <linux/mman.h> 30 #include <linux/mmu_notifier.h> 31 #include <linux/fs.h> 32 #include <linux/nsproxy.h> 33 #include <linux/capability.h> 34 #include <linux/cpu.h> 35 #include <linux/cgroup.h> 36 #include <linux/security.h> 37 #include <linux/hugetlb.h> 38 #include <linux/swap.h> 39 #include <linux/syscalls.h> 40 #include <linux/jiffies.h> 41 #include <linux/tracehook.h> 42 #include <linux/futex.h> 43 #include <linux/compat.h> 44 #include <linux/task_io_accounting_ops.h> 45 #include <linux/rcupdate.h> 46 #include <linux/ptrace.h> 47 #include <linux/mount.h> 48 #include <linux/audit.h> 49 #include <linux/memcontrol.h> 50 #include <linux/ftrace.h> 51 #include <linux/profile.h> 52 #include <linux/rmap.h> 53 #include <linux/acct.h> 54 #include <linux/tsacct_kern.h> 55 #include <linux/cn_proc.h> 56 #include <linux/freezer.h> 57 #include <linux/delayacct.h> 58 #include <linux/taskstats_kern.h> 59 #include <linux/random.h> 60 #include <linux/tty.h> 61 #include <linux/proc_fs.h> 62 #include <linux/blkdev.h> 63 #include <linux/fs_struct.h> 64 #include <trace/sched.h> 65 #include <linux/magic.h> 66 67 #include <asm/pgtable.h> 68 #include <asm/pgalloc.h> 69 #include <asm/uaccess.h> 70 #include <asm/mmu_context.h> 71 #include <asm/cacheflush.h> 72 #include <asm/tlbflush.h> 73 74 /* 75 * Protected counters by write_lock_irq(&tasklist_lock) 76 */ 77 unsigned long total_forks; /* Handle normal Linux uptimes. */ 78 int nr_threads; /* The idle threads do not count.. */ 79 80 int max_threads; /* tunable limit on nr_threads */ 81 82 DEFINE_PER_CPU(unsigned long, process_counts) = 0; 83 84 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 85 86 DEFINE_TRACE(sched_process_fork); 87 88 int nr_processes(void) 89 { 90 int cpu; 91 int total = 0; 92 93 for_each_online_cpu(cpu) 94 total += per_cpu(process_counts, cpu); 95 96 return total; 97 } 98 99 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 100 # define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL) 101 # define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk)) 102 static struct kmem_cache *task_struct_cachep; 103 #endif 104 105 #ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR 106 static inline struct thread_info *alloc_thread_info(struct task_struct *tsk) 107 { 108 #ifdef CONFIG_DEBUG_STACK_USAGE 109 gfp_t mask = GFP_KERNEL | __GFP_ZERO; 110 #else 111 gfp_t mask = GFP_KERNEL; 112 #endif 113 return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER); 114 } 115 116 static inline void free_thread_info(struct thread_info *ti) 117 { 118 free_pages((unsigned long)ti, THREAD_SIZE_ORDER); 119 } 120 #endif 121 122 /* SLAB cache for signal_struct structures (tsk->signal) */ 123 static struct kmem_cache *signal_cachep; 124 125 /* SLAB cache for sighand_struct structures (tsk->sighand) */ 126 struct kmem_cache *sighand_cachep; 127 128 /* SLAB cache for files_struct structures (tsk->files) */ 129 struct kmem_cache *files_cachep; 130 131 /* SLAB cache for fs_struct structures (tsk->fs) */ 132 struct kmem_cache *fs_cachep; 133 134 /* SLAB cache for vm_area_struct structures */ 135 struct kmem_cache *vm_area_cachep; 136 137 /* SLAB cache for mm_struct structures (tsk->mm) */ 138 static struct kmem_cache *mm_cachep; 139 140 void free_task(struct task_struct *tsk) 141 { 142 prop_local_destroy_single(&tsk->dirties); 143 free_thread_info(tsk->stack); 144 rt_mutex_debug_task_free(tsk); 145 ftrace_graph_exit_task(tsk); 146 free_task_struct(tsk); 147 } 148 EXPORT_SYMBOL(free_task); 149 150 void __put_task_struct(struct task_struct *tsk) 151 { 152 WARN_ON(!tsk->exit_state); 153 WARN_ON(atomic_read(&tsk->usage)); 154 WARN_ON(tsk == current); 155 156 put_cred(tsk->real_cred); 157 put_cred(tsk->cred); 158 delayacct_tsk_free(tsk); 159 160 if (!profile_handoff_task(tsk)) 161 free_task(tsk); 162 } 163 164 /* 165 * macro override instead of weak attribute alias, to workaround 166 * gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions. 167 */ 168 #ifndef arch_task_cache_init 169 #define arch_task_cache_init() 170 #endif 171 172 void __init fork_init(unsigned long mempages) 173 { 174 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 175 #ifndef ARCH_MIN_TASKALIGN 176 #define ARCH_MIN_TASKALIGN L1_CACHE_BYTES 177 #endif 178 /* create a slab on which task_structs can be allocated */ 179 task_struct_cachep = 180 kmem_cache_create("task_struct", sizeof(struct task_struct), 181 ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL); 182 #endif 183 184 /* do the arch specific task caches init */ 185 arch_task_cache_init(); 186 187 /* 188 * The default maximum number of threads is set to a safe 189 * value: the thread structures can take up at most half 190 * of memory. 191 */ 192 max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE); 193 194 /* 195 * we need to allow at least 20 threads to boot a system 196 */ 197 if(max_threads < 20) 198 max_threads = 20; 199 200 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; 201 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; 202 init_task.signal->rlim[RLIMIT_SIGPENDING] = 203 init_task.signal->rlim[RLIMIT_NPROC]; 204 } 205 206 int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst, 207 struct task_struct *src) 208 { 209 *dst = *src; 210 return 0; 211 } 212 213 static struct task_struct *dup_task_struct(struct task_struct *orig) 214 { 215 struct task_struct *tsk; 216 struct thread_info *ti; 217 unsigned long *stackend; 218 219 int err; 220 221 prepare_to_copy(orig); 222 223 tsk = alloc_task_struct(); 224 if (!tsk) 225 return NULL; 226 227 ti = alloc_thread_info(tsk); 228 if (!ti) { 229 free_task_struct(tsk); 230 return NULL; 231 } 232 233 err = arch_dup_task_struct(tsk, orig); 234 if (err) 235 goto out; 236 237 tsk->stack = ti; 238 239 err = prop_local_init_single(&tsk->dirties); 240 if (err) 241 goto out; 242 243 setup_thread_stack(tsk, orig); 244 stackend = end_of_stack(tsk); 245 *stackend = STACK_END_MAGIC; /* for overflow detection */ 246 247 #ifdef CONFIG_CC_STACKPROTECTOR 248 tsk->stack_canary = get_random_int(); 249 #endif 250 251 /* One for us, one for whoever does the "release_task()" (usually parent) */ 252 atomic_set(&tsk->usage,2); 253 atomic_set(&tsk->fs_excl, 0); 254 #ifdef CONFIG_BLK_DEV_IO_TRACE 255 tsk->btrace_seq = 0; 256 #endif 257 tsk->splice_pipe = NULL; 258 return tsk; 259 260 out: 261 free_thread_info(ti); 262 free_task_struct(tsk); 263 return NULL; 264 } 265 266 #ifdef CONFIG_MMU 267 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 268 { 269 struct vm_area_struct *mpnt, *tmp, **pprev; 270 struct rb_node **rb_link, *rb_parent; 271 int retval; 272 unsigned long charge; 273 struct mempolicy *pol; 274 275 down_write(&oldmm->mmap_sem); 276 flush_cache_dup_mm(oldmm); 277 /* 278 * Not linked in yet - no deadlock potential: 279 */ 280 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); 281 282 mm->locked_vm = 0; 283 mm->mmap = NULL; 284 mm->mmap_cache = NULL; 285 mm->free_area_cache = oldmm->mmap_base; 286 mm->cached_hole_size = ~0UL; 287 mm->map_count = 0; 288 cpumask_clear(mm_cpumask(mm)); 289 mm->mm_rb = RB_ROOT; 290 rb_link = &mm->mm_rb.rb_node; 291 rb_parent = NULL; 292 pprev = &mm->mmap; 293 294 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { 295 struct file *file; 296 297 if (mpnt->vm_flags & VM_DONTCOPY) { 298 long pages = vma_pages(mpnt); 299 mm->total_vm -= pages; 300 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, 301 -pages); 302 continue; 303 } 304 charge = 0; 305 if (mpnt->vm_flags & VM_ACCOUNT) { 306 unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; 307 if (security_vm_enough_memory(len)) 308 goto fail_nomem; 309 charge = len; 310 } 311 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 312 if (!tmp) 313 goto fail_nomem; 314 *tmp = *mpnt; 315 pol = mpol_dup(vma_policy(mpnt)); 316 retval = PTR_ERR(pol); 317 if (IS_ERR(pol)) 318 goto fail_nomem_policy; 319 vma_set_policy(tmp, pol); 320 tmp->vm_flags &= ~VM_LOCKED; 321 tmp->vm_mm = mm; 322 tmp->vm_next = NULL; 323 anon_vma_link(tmp); 324 file = tmp->vm_file; 325 if (file) { 326 struct inode *inode = file->f_path.dentry->d_inode; 327 struct address_space *mapping = file->f_mapping; 328 329 get_file(file); 330 if (tmp->vm_flags & VM_DENYWRITE) 331 atomic_dec(&inode->i_writecount); 332 spin_lock(&mapping->i_mmap_lock); 333 if (tmp->vm_flags & VM_SHARED) 334 mapping->i_mmap_writable++; 335 tmp->vm_truncate_count = mpnt->vm_truncate_count; 336 flush_dcache_mmap_lock(mapping); 337 /* insert tmp into the share list, just after mpnt */ 338 vma_prio_tree_add(tmp, mpnt); 339 flush_dcache_mmap_unlock(mapping); 340 spin_unlock(&mapping->i_mmap_lock); 341 } 342 343 /* 344 * Clear hugetlb-related page reserves for children. This only 345 * affects MAP_PRIVATE mappings. Faults generated by the child 346 * are not guaranteed to succeed, even if read-only 347 */ 348 if (is_vm_hugetlb_page(tmp)) 349 reset_vma_resv_huge_pages(tmp); 350 351 /* 352 * Link in the new vma and copy the page table entries. 353 */ 354 *pprev = tmp; 355 pprev = &tmp->vm_next; 356 357 __vma_link_rb(mm, tmp, rb_link, rb_parent); 358 rb_link = &tmp->vm_rb.rb_right; 359 rb_parent = &tmp->vm_rb; 360 361 mm->map_count++; 362 retval = copy_page_range(mm, oldmm, mpnt); 363 364 if (tmp->vm_ops && tmp->vm_ops->open) 365 tmp->vm_ops->open(tmp); 366 367 if (retval) 368 goto out; 369 } 370 /* a new mm has just been created */ 371 arch_dup_mmap(oldmm, mm); 372 retval = 0; 373 out: 374 up_write(&mm->mmap_sem); 375 flush_tlb_mm(oldmm); 376 up_write(&oldmm->mmap_sem); 377 return retval; 378 fail_nomem_policy: 379 kmem_cache_free(vm_area_cachep, tmp); 380 fail_nomem: 381 retval = -ENOMEM; 382 vm_unacct_memory(charge); 383 goto out; 384 } 385 386 static inline int mm_alloc_pgd(struct mm_struct * mm) 387 { 388 mm->pgd = pgd_alloc(mm); 389 if (unlikely(!mm->pgd)) 390 return -ENOMEM; 391 return 0; 392 } 393 394 static inline void mm_free_pgd(struct mm_struct * mm) 395 { 396 pgd_free(mm, mm->pgd); 397 } 398 #else 399 #define dup_mmap(mm, oldmm) (0) 400 #define mm_alloc_pgd(mm) (0) 401 #define mm_free_pgd(mm) 402 #endif /* CONFIG_MMU */ 403 404 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); 405 406 #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) 407 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) 408 409 static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; 410 411 static int __init coredump_filter_setup(char *s) 412 { 413 default_dump_filter = 414 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & 415 MMF_DUMP_FILTER_MASK; 416 return 1; 417 } 418 419 __setup("coredump_filter=", coredump_filter_setup); 420 421 #include <linux/init_task.h> 422 423 static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) 424 { 425 atomic_set(&mm->mm_users, 1); 426 atomic_set(&mm->mm_count, 1); 427 init_rwsem(&mm->mmap_sem); 428 INIT_LIST_HEAD(&mm->mmlist); 429 mm->flags = (current->mm) ? current->mm->flags : default_dump_filter; 430 mm->core_state = NULL; 431 mm->nr_ptes = 0; 432 set_mm_counter(mm, file_rss, 0); 433 set_mm_counter(mm, anon_rss, 0); 434 spin_lock_init(&mm->page_table_lock); 435 spin_lock_init(&mm->ioctx_lock); 436 INIT_HLIST_HEAD(&mm->ioctx_list); 437 mm->free_area_cache = TASK_UNMAPPED_BASE; 438 mm->cached_hole_size = ~0UL; 439 mm_init_owner(mm, p); 440 441 if (likely(!mm_alloc_pgd(mm))) { 442 mm->def_flags = 0; 443 mmu_notifier_mm_init(mm); 444 return mm; 445 } 446 447 free_mm(mm); 448 return NULL; 449 } 450 451 /* 452 * Allocate and initialize an mm_struct. 453 */ 454 struct mm_struct * mm_alloc(void) 455 { 456 struct mm_struct * mm; 457 458 mm = allocate_mm(); 459 if (mm) { 460 memset(mm, 0, sizeof(*mm)); 461 mm = mm_init(mm, current); 462 } 463 return mm; 464 } 465 466 /* 467 * Called when the last reference to the mm 468 * is dropped: either by a lazy thread or by 469 * mmput. Free the page directory and the mm. 470 */ 471 void __mmdrop(struct mm_struct *mm) 472 { 473 BUG_ON(mm == &init_mm); 474 mm_free_pgd(mm); 475 destroy_context(mm); 476 mmu_notifier_mm_destroy(mm); 477 free_mm(mm); 478 } 479 EXPORT_SYMBOL_GPL(__mmdrop); 480 481 /* 482 * Decrement the use count and release all resources for an mm. 483 */ 484 void mmput(struct mm_struct *mm) 485 { 486 might_sleep(); 487 488 if (atomic_dec_and_test(&mm->mm_users)) { 489 exit_aio(mm); 490 exit_mmap(mm); 491 set_mm_exe_file(mm, NULL); 492 if (!list_empty(&mm->mmlist)) { 493 spin_lock(&mmlist_lock); 494 list_del(&mm->mmlist); 495 spin_unlock(&mmlist_lock); 496 } 497 put_swap_token(mm); 498 mmdrop(mm); 499 } 500 } 501 EXPORT_SYMBOL_GPL(mmput); 502 503 /** 504 * get_task_mm - acquire a reference to the task's mm 505 * 506 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning 507 * this kernel workthread has transiently adopted a user mm with use_mm, 508 * to do its AIO) is not set and if so returns a reference to it, after 509 * bumping up the use count. User must release the mm via mmput() 510 * after use. Typically used by /proc and ptrace. 511 */ 512 struct mm_struct *get_task_mm(struct task_struct *task) 513 { 514 struct mm_struct *mm; 515 516 task_lock(task); 517 mm = task->mm; 518 if (mm) { 519 if (task->flags & PF_KTHREAD) 520 mm = NULL; 521 else 522 atomic_inc(&mm->mm_users); 523 } 524 task_unlock(task); 525 return mm; 526 } 527 EXPORT_SYMBOL_GPL(get_task_mm); 528 529 /* Please note the differences between mmput and mm_release. 530 * mmput is called whenever we stop holding onto a mm_struct, 531 * error success whatever. 532 * 533 * mm_release is called after a mm_struct has been removed 534 * from the current process. 535 * 536 * This difference is important for error handling, when we 537 * only half set up a mm_struct for a new process and need to restore 538 * the old one. Because we mmput the new mm_struct before 539 * restoring the old one. . . 540 * Eric Biederman 10 January 1998 541 */ 542 void mm_release(struct task_struct *tsk, struct mm_struct *mm) 543 { 544 struct completion *vfork_done = tsk->vfork_done; 545 546 /* Get rid of any futexes when releasing the mm */ 547 #ifdef CONFIG_FUTEX 548 if (unlikely(tsk->robust_list)) 549 exit_robust_list(tsk); 550 #ifdef CONFIG_COMPAT 551 if (unlikely(tsk->compat_robust_list)) 552 compat_exit_robust_list(tsk); 553 #endif 554 #endif 555 556 /* Get rid of any cached register state */ 557 deactivate_mm(tsk, mm); 558 559 /* notify parent sleeping on vfork() */ 560 if (vfork_done) { 561 tsk->vfork_done = NULL; 562 complete(vfork_done); 563 } 564 565 /* 566 * If we're exiting normally, clear a user-space tid field if 567 * requested. We leave this alone when dying by signal, to leave 568 * the value intact in a core dump, and to save the unnecessary 569 * trouble otherwise. Userland only wants this done for a sys_exit. 570 */ 571 if (tsk->clear_child_tid 572 && !(tsk->flags & PF_SIGNALED) 573 && atomic_read(&mm->mm_users) > 1) { 574 u32 __user * tidptr = tsk->clear_child_tid; 575 tsk->clear_child_tid = NULL; 576 577 /* 578 * We don't check the error code - if userspace has 579 * not set up a proper pointer then tough luck. 580 */ 581 put_user(0, tidptr); 582 sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0); 583 } 584 } 585 586 /* 587 * Allocate a new mm structure and copy contents from the 588 * mm structure of the passed in task structure. 589 */ 590 struct mm_struct *dup_mm(struct task_struct *tsk) 591 { 592 struct mm_struct *mm, *oldmm = current->mm; 593 int err; 594 595 if (!oldmm) 596 return NULL; 597 598 mm = allocate_mm(); 599 if (!mm) 600 goto fail_nomem; 601 602 memcpy(mm, oldmm, sizeof(*mm)); 603 604 /* Initializing for Swap token stuff */ 605 mm->token_priority = 0; 606 mm->last_interval = 0; 607 608 if (!mm_init(mm, tsk)) 609 goto fail_nomem; 610 611 if (init_new_context(tsk, mm)) 612 goto fail_nocontext; 613 614 dup_mm_exe_file(oldmm, mm); 615 616 err = dup_mmap(mm, oldmm); 617 if (err) 618 goto free_pt; 619 620 mm->hiwater_rss = get_mm_rss(mm); 621 mm->hiwater_vm = mm->total_vm; 622 623 return mm; 624 625 free_pt: 626 mmput(mm); 627 628 fail_nomem: 629 return NULL; 630 631 fail_nocontext: 632 /* 633 * If init_new_context() failed, we cannot use mmput() to free the mm 634 * because it calls destroy_context() 635 */ 636 mm_free_pgd(mm); 637 free_mm(mm); 638 return NULL; 639 } 640 641 static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) 642 { 643 struct mm_struct * mm, *oldmm; 644 int retval; 645 646 tsk->min_flt = tsk->maj_flt = 0; 647 tsk->nvcsw = tsk->nivcsw = 0; 648 #ifdef CONFIG_DETECT_HUNG_TASK 649 tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; 650 #endif 651 652 tsk->mm = NULL; 653 tsk->active_mm = NULL; 654 655 /* 656 * Are we cloning a kernel thread? 657 * 658 * We need to steal a active VM for that.. 659 */ 660 oldmm = current->mm; 661 if (!oldmm) 662 return 0; 663 664 if (clone_flags & CLONE_VM) { 665 atomic_inc(&oldmm->mm_users); 666 mm = oldmm; 667 goto good_mm; 668 } 669 670 retval = -ENOMEM; 671 mm = dup_mm(tsk); 672 if (!mm) 673 goto fail_nomem; 674 675 good_mm: 676 /* Initializing for Swap token stuff */ 677 mm->token_priority = 0; 678 mm->last_interval = 0; 679 680 tsk->mm = mm; 681 tsk->active_mm = mm; 682 return 0; 683 684 fail_nomem: 685 return retval; 686 } 687 688 static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) 689 { 690 struct fs_struct *fs = current->fs; 691 if (clone_flags & CLONE_FS) { 692 /* tsk->fs is already what we want */ 693 write_lock(&fs->lock); 694 if (fs->in_exec) { 695 write_unlock(&fs->lock); 696 return -EAGAIN; 697 } 698 fs->users++; 699 write_unlock(&fs->lock); 700 return 0; 701 } 702 tsk->fs = copy_fs_struct(fs); 703 if (!tsk->fs) 704 return -ENOMEM; 705 return 0; 706 } 707 708 static int copy_files(unsigned long clone_flags, struct task_struct * tsk) 709 { 710 struct files_struct *oldf, *newf; 711 int error = 0; 712 713 /* 714 * A background process may not have any files ... 715 */ 716 oldf = current->files; 717 if (!oldf) 718 goto out; 719 720 if (clone_flags & CLONE_FILES) { 721 atomic_inc(&oldf->count); 722 goto out; 723 } 724 725 newf = dup_fd(oldf, &error); 726 if (!newf) 727 goto out; 728 729 tsk->files = newf; 730 error = 0; 731 out: 732 return error; 733 } 734 735 static int copy_io(unsigned long clone_flags, struct task_struct *tsk) 736 { 737 #ifdef CONFIG_BLOCK 738 struct io_context *ioc = current->io_context; 739 740 if (!ioc) 741 return 0; 742 /* 743 * Share io context with parent, if CLONE_IO is set 744 */ 745 if (clone_flags & CLONE_IO) { 746 tsk->io_context = ioc_task_link(ioc); 747 if (unlikely(!tsk->io_context)) 748 return -ENOMEM; 749 } else if (ioprio_valid(ioc->ioprio)) { 750 tsk->io_context = alloc_io_context(GFP_KERNEL, -1); 751 if (unlikely(!tsk->io_context)) 752 return -ENOMEM; 753 754 tsk->io_context->ioprio = ioc->ioprio; 755 } 756 #endif 757 return 0; 758 } 759 760 static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) 761 { 762 struct sighand_struct *sig; 763 764 if (clone_flags & CLONE_SIGHAND) { 765 atomic_inc(¤t->sighand->count); 766 return 0; 767 } 768 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); 769 rcu_assign_pointer(tsk->sighand, sig); 770 if (!sig) 771 return -ENOMEM; 772 atomic_set(&sig->count, 1); 773 memcpy(sig->action, current->sighand->action, sizeof(sig->action)); 774 return 0; 775 } 776 777 void __cleanup_sighand(struct sighand_struct *sighand) 778 { 779 if (atomic_dec_and_test(&sighand->count)) 780 kmem_cache_free(sighand_cachep, sighand); 781 } 782 783 784 /* 785 * Initialize POSIX timer handling for a thread group. 786 */ 787 static void posix_cpu_timers_init_group(struct signal_struct *sig) 788 { 789 /* Thread group counters. */ 790 thread_group_cputime_init(sig); 791 792 /* Expiration times and increments. */ 793 sig->it_virt_expires = cputime_zero; 794 sig->it_virt_incr = cputime_zero; 795 sig->it_prof_expires = cputime_zero; 796 sig->it_prof_incr = cputime_zero; 797 798 /* Cached expiration times. */ 799 sig->cputime_expires.prof_exp = cputime_zero; 800 sig->cputime_expires.virt_exp = cputime_zero; 801 sig->cputime_expires.sched_exp = 0; 802 803 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { 804 sig->cputime_expires.prof_exp = 805 secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); 806 sig->cputimer.running = 1; 807 } 808 809 /* The timer lists. */ 810 INIT_LIST_HEAD(&sig->cpu_timers[0]); 811 INIT_LIST_HEAD(&sig->cpu_timers[1]); 812 INIT_LIST_HEAD(&sig->cpu_timers[2]); 813 } 814 815 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) 816 { 817 struct signal_struct *sig; 818 819 if (clone_flags & CLONE_THREAD) { 820 atomic_inc(¤t->signal->count); 821 atomic_inc(¤t->signal->live); 822 return 0; 823 } 824 825 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); 826 tsk->signal = sig; 827 if (!sig) 828 return -ENOMEM; 829 830 atomic_set(&sig->count, 1); 831 atomic_set(&sig->live, 1); 832 init_waitqueue_head(&sig->wait_chldexit); 833 sig->flags = 0; 834 if (clone_flags & CLONE_NEWPID) 835 sig->flags |= SIGNAL_UNKILLABLE; 836 sig->group_exit_code = 0; 837 sig->group_exit_task = NULL; 838 sig->group_stop_count = 0; 839 sig->curr_target = tsk; 840 init_sigpending(&sig->shared_pending); 841 INIT_LIST_HEAD(&sig->posix_timers); 842 843 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 844 sig->it_real_incr.tv64 = 0; 845 sig->real_timer.function = it_real_fn; 846 847 sig->leader = 0; /* session leadership doesn't inherit */ 848 sig->tty_old_pgrp = NULL; 849 sig->tty = NULL; 850 851 sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; 852 sig->gtime = cputime_zero; 853 sig->cgtime = cputime_zero; 854 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; 855 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; 856 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; 857 task_io_accounting_init(&sig->ioac); 858 sig->sum_sched_runtime = 0; 859 taskstats_tgid_init(sig); 860 861 task_lock(current->group_leader); 862 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 863 task_unlock(current->group_leader); 864 865 posix_cpu_timers_init_group(sig); 866 867 acct_init_pacct(&sig->pacct); 868 869 tty_audit_fork(sig); 870 871 return 0; 872 } 873 874 void __cleanup_signal(struct signal_struct *sig) 875 { 876 thread_group_cputime_free(sig); 877 tty_kref_put(sig->tty); 878 kmem_cache_free(signal_cachep, sig); 879 } 880 881 static void cleanup_signal(struct task_struct *tsk) 882 { 883 struct signal_struct *sig = tsk->signal; 884 885 atomic_dec(&sig->live); 886 887 if (atomic_dec_and_test(&sig->count)) 888 __cleanup_signal(sig); 889 } 890 891 static void copy_flags(unsigned long clone_flags, struct task_struct *p) 892 { 893 unsigned long new_flags = p->flags; 894 895 new_flags &= ~PF_SUPERPRIV; 896 new_flags |= PF_FORKNOEXEC; 897 new_flags |= PF_STARTING; 898 p->flags = new_flags; 899 clear_freeze_flag(p); 900 } 901 902 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) 903 { 904 current->clear_child_tid = tidptr; 905 906 return task_pid_vnr(current); 907 } 908 909 static void rt_mutex_init_task(struct task_struct *p) 910 { 911 spin_lock_init(&p->pi_lock); 912 #ifdef CONFIG_RT_MUTEXES 913 plist_head_init(&p->pi_waiters, &p->pi_lock); 914 p->pi_blocked_on = NULL; 915 #endif 916 } 917 918 #ifdef CONFIG_MM_OWNER 919 void mm_init_owner(struct mm_struct *mm, struct task_struct *p) 920 { 921 mm->owner = p; 922 } 923 #endif /* CONFIG_MM_OWNER */ 924 925 /* 926 * Initialize POSIX timer handling for a single task. 927 */ 928 static void posix_cpu_timers_init(struct task_struct *tsk) 929 { 930 tsk->cputime_expires.prof_exp = cputime_zero; 931 tsk->cputime_expires.virt_exp = cputime_zero; 932 tsk->cputime_expires.sched_exp = 0; 933 INIT_LIST_HEAD(&tsk->cpu_timers[0]); 934 INIT_LIST_HEAD(&tsk->cpu_timers[1]); 935 INIT_LIST_HEAD(&tsk->cpu_timers[2]); 936 } 937 938 /* 939 * This creates a new process as a copy of the old one, 940 * but does not actually start it yet. 941 * 942 * It copies the registers, and all the appropriate 943 * parts of the process environment (as per the clone 944 * flags). The actual kick-off is left to the caller. 945 */ 946 static struct task_struct *copy_process(unsigned long clone_flags, 947 unsigned long stack_start, 948 struct pt_regs *regs, 949 unsigned long stack_size, 950 int __user *child_tidptr, 951 struct pid *pid, 952 int trace) 953 { 954 int retval; 955 struct task_struct *p; 956 int cgroup_callbacks_done = 0; 957 958 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) 959 return ERR_PTR(-EINVAL); 960 961 /* 962 * Thread groups must share signals as well, and detached threads 963 * can only be started up within the thread group. 964 */ 965 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) 966 return ERR_PTR(-EINVAL); 967 968 /* 969 * Shared signal handlers imply shared VM. By way of the above, 970 * thread groups also imply shared VM. Blocking this case allows 971 * for various simplifications in other code. 972 */ 973 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) 974 return ERR_PTR(-EINVAL); 975 976 retval = security_task_create(clone_flags); 977 if (retval) 978 goto fork_out; 979 980 retval = -ENOMEM; 981 p = dup_task_struct(current); 982 if (!p) 983 goto fork_out; 984 985 rt_mutex_init_task(p); 986 987 #ifdef CONFIG_PROVE_LOCKING 988 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); 989 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 990 #endif 991 retval = -EAGAIN; 992 if (atomic_read(&p->real_cred->user->processes) >= 993 p->signal->rlim[RLIMIT_NPROC].rlim_cur) { 994 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && 995 p->real_cred->user != INIT_USER) 996 goto bad_fork_free; 997 } 998 999 retval = copy_creds(p, clone_flags); 1000 if (retval < 0) 1001 goto bad_fork_free; 1002 1003 /* 1004 * If multiple threads are within copy_process(), then this check 1005 * triggers too late. This doesn't hurt, the check is only there 1006 * to stop root fork bombs. 1007 */ 1008 retval = -EAGAIN; 1009 if (nr_threads >= max_threads) 1010 goto bad_fork_cleanup_count; 1011 1012 if (!try_module_get(task_thread_info(p)->exec_domain->module)) 1013 goto bad_fork_cleanup_count; 1014 1015 if (p->binfmt && !try_module_get(p->binfmt->module)) 1016 goto bad_fork_cleanup_put_domain; 1017 1018 p->did_exec = 0; 1019 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ 1020 copy_flags(clone_flags, p); 1021 INIT_LIST_HEAD(&p->children); 1022 INIT_LIST_HEAD(&p->sibling); 1023 #ifdef CONFIG_PREEMPT_RCU 1024 p->rcu_read_lock_nesting = 0; 1025 p->rcu_flipctr_idx = 0; 1026 #endif /* #ifdef CONFIG_PREEMPT_RCU */ 1027 p->vfork_done = NULL; 1028 spin_lock_init(&p->alloc_lock); 1029 1030 clear_tsk_thread_flag(p, TIF_SIGPENDING); 1031 init_sigpending(&p->pending); 1032 1033 p->utime = cputime_zero; 1034 p->stime = cputime_zero; 1035 p->gtime = cputime_zero; 1036 p->utimescaled = cputime_zero; 1037 p->stimescaled = cputime_zero; 1038 p->prev_utime = cputime_zero; 1039 p->prev_stime = cputime_zero; 1040 1041 p->default_timer_slack_ns = current->timer_slack_ns; 1042 1043 task_io_accounting_init(&p->ioac); 1044 acct_clear_integrals(p); 1045 1046 posix_cpu_timers_init(p); 1047 1048 p->lock_depth = -1; /* -1 = no lock */ 1049 do_posix_clock_monotonic_gettime(&p->start_time); 1050 p->real_start_time = p->start_time; 1051 monotonic_to_bootbased(&p->real_start_time); 1052 p->io_context = NULL; 1053 p->audit_context = NULL; 1054 cgroup_fork(p); 1055 #ifdef CONFIG_NUMA 1056 p->mempolicy = mpol_dup(p->mempolicy); 1057 if (IS_ERR(p->mempolicy)) { 1058 retval = PTR_ERR(p->mempolicy); 1059 p->mempolicy = NULL; 1060 goto bad_fork_cleanup_cgroup; 1061 } 1062 mpol_fix_fork_child_flag(p); 1063 #endif 1064 #ifdef CONFIG_TRACE_IRQFLAGS 1065 p->irq_events = 0; 1066 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 1067 p->hardirqs_enabled = 1; 1068 #else 1069 p->hardirqs_enabled = 0; 1070 #endif 1071 p->hardirq_enable_ip = 0; 1072 p->hardirq_enable_event = 0; 1073 p->hardirq_disable_ip = _THIS_IP_; 1074 p->hardirq_disable_event = 0; 1075 p->softirqs_enabled = 1; 1076 p->softirq_enable_ip = _THIS_IP_; 1077 p->softirq_enable_event = 0; 1078 p->softirq_disable_ip = 0; 1079 p->softirq_disable_event = 0; 1080 p->hardirq_context = 0; 1081 p->softirq_context = 0; 1082 #endif 1083 #ifdef CONFIG_LOCKDEP 1084 p->lockdep_depth = 0; /* no locks held yet */ 1085 p->curr_chain_key = 0; 1086 p->lockdep_recursion = 0; 1087 #endif 1088 1089 #ifdef CONFIG_DEBUG_MUTEXES 1090 p->blocked_on = NULL; /* not blocked yet */ 1091 #endif 1092 if (unlikely(current->ptrace)) 1093 ptrace_fork(p, clone_flags); 1094 1095 /* Perform scheduler related setup. Assign this task to a CPU. */ 1096 sched_fork(p, clone_flags); 1097 1098 if ((retval = audit_alloc(p))) 1099 goto bad_fork_cleanup_policy; 1100 /* copy all the process information */ 1101 if ((retval = copy_semundo(clone_flags, p))) 1102 goto bad_fork_cleanup_audit; 1103 if ((retval = copy_files(clone_flags, p))) 1104 goto bad_fork_cleanup_semundo; 1105 if ((retval = copy_fs(clone_flags, p))) 1106 goto bad_fork_cleanup_files; 1107 if ((retval = copy_sighand(clone_flags, p))) 1108 goto bad_fork_cleanup_fs; 1109 if ((retval = copy_signal(clone_flags, p))) 1110 goto bad_fork_cleanup_sighand; 1111 if ((retval = copy_mm(clone_flags, p))) 1112 goto bad_fork_cleanup_signal; 1113 if ((retval = copy_namespaces(clone_flags, p))) 1114 goto bad_fork_cleanup_mm; 1115 if ((retval = copy_io(clone_flags, p))) 1116 goto bad_fork_cleanup_namespaces; 1117 retval = copy_thread(clone_flags, stack_start, stack_size, p, regs); 1118 if (retval) 1119 goto bad_fork_cleanup_io; 1120 1121 if (pid != &init_struct_pid) { 1122 retval = -ENOMEM; 1123 pid = alloc_pid(p->nsproxy->pid_ns); 1124 if (!pid) 1125 goto bad_fork_cleanup_io; 1126 1127 if (clone_flags & CLONE_NEWPID) { 1128 retval = pid_ns_prepare_proc(p->nsproxy->pid_ns); 1129 if (retval < 0) 1130 goto bad_fork_free_pid; 1131 } 1132 } 1133 1134 ftrace_graph_init_task(p); 1135 1136 p->pid = pid_nr(pid); 1137 p->tgid = p->pid; 1138 if (clone_flags & CLONE_THREAD) 1139 p->tgid = current->tgid; 1140 1141 if (current->nsproxy != p->nsproxy) { 1142 retval = ns_cgroup_clone(p, pid); 1143 if (retval) 1144 goto bad_fork_free_graph; 1145 } 1146 1147 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; 1148 /* 1149 * Clear TID on mm_release()? 1150 */ 1151 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; 1152 #ifdef CONFIG_FUTEX 1153 p->robust_list = NULL; 1154 #ifdef CONFIG_COMPAT 1155 p->compat_robust_list = NULL; 1156 #endif 1157 INIT_LIST_HEAD(&p->pi_state_list); 1158 p->pi_state_cache = NULL; 1159 #endif 1160 /* 1161 * sigaltstack should be cleared when sharing the same VM 1162 */ 1163 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) 1164 p->sas_ss_sp = p->sas_ss_size = 0; 1165 1166 /* 1167 * Syscall tracing should be turned off in the child regardless 1168 * of CLONE_PTRACE. 1169 */ 1170 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); 1171 #ifdef TIF_SYSCALL_EMU 1172 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); 1173 #endif 1174 clear_all_latency_tracing(p); 1175 1176 /* ok, now we should be set up.. */ 1177 p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); 1178 p->pdeath_signal = 0; 1179 p->exit_state = 0; 1180 1181 /* 1182 * Ok, make it visible to the rest of the system. 1183 * We dont wake it up yet. 1184 */ 1185 p->group_leader = p; 1186 INIT_LIST_HEAD(&p->thread_group); 1187 1188 /* Now that the task is set up, run cgroup callbacks if 1189 * necessary. We need to run them before the task is visible 1190 * on the tasklist. */ 1191 cgroup_fork_callbacks(p); 1192 cgroup_callbacks_done = 1; 1193 1194 /* Need tasklist lock for parent etc handling! */ 1195 write_lock_irq(&tasklist_lock); 1196 1197 /* 1198 * The task hasn't been attached yet, so its cpus_allowed mask will 1199 * not be changed, nor will its assigned CPU. 1200 * 1201 * The cpus_allowed mask of the parent may have changed after it was 1202 * copied first time - so re-copy it here, then check the child's CPU 1203 * to ensure it is on a valid CPU (and if not, just force it back to 1204 * parent's CPU). This avoids alot of nasty races. 1205 */ 1206 p->cpus_allowed = current->cpus_allowed; 1207 p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed; 1208 if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) || 1209 !cpu_online(task_cpu(p)))) 1210 set_task_cpu(p, smp_processor_id()); 1211 1212 /* CLONE_PARENT re-uses the old parent */ 1213 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { 1214 p->real_parent = current->real_parent; 1215 p->parent_exec_id = current->parent_exec_id; 1216 } else { 1217 p->real_parent = current; 1218 p->parent_exec_id = current->self_exec_id; 1219 } 1220 1221 spin_lock(¤t->sighand->siglock); 1222 1223 /* 1224 * Process group and session signals need to be delivered to just the 1225 * parent before the fork or both the parent and the child after the 1226 * fork. Restart if a signal comes in before we add the new process to 1227 * it's process group. 1228 * A fatal signal pending means that current will exit, so the new 1229 * thread can't slip out of an OOM kill (or normal SIGKILL). 1230 */ 1231 recalc_sigpending(); 1232 if (signal_pending(current)) { 1233 spin_unlock(¤t->sighand->siglock); 1234 write_unlock_irq(&tasklist_lock); 1235 retval = -ERESTARTNOINTR; 1236 goto bad_fork_free_graph; 1237 } 1238 1239 if (clone_flags & CLONE_THREAD) { 1240 p->group_leader = current->group_leader; 1241 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); 1242 } 1243 1244 if (likely(p->pid)) { 1245 list_add_tail(&p->sibling, &p->real_parent->children); 1246 tracehook_finish_clone(p, clone_flags, trace); 1247 1248 if (thread_group_leader(p)) { 1249 if (clone_flags & CLONE_NEWPID) 1250 p->nsproxy->pid_ns->child_reaper = p; 1251 1252 p->signal->leader_pid = pid; 1253 tty_kref_put(p->signal->tty); 1254 p->signal->tty = tty_kref_get(current->signal->tty); 1255 attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); 1256 attach_pid(p, PIDTYPE_SID, task_session(current)); 1257 list_add_tail_rcu(&p->tasks, &init_task.tasks); 1258 __get_cpu_var(process_counts)++; 1259 } 1260 attach_pid(p, PIDTYPE_PID, pid); 1261 nr_threads++; 1262 } 1263 1264 total_forks++; 1265 spin_unlock(¤t->sighand->siglock); 1266 write_unlock_irq(&tasklist_lock); 1267 proc_fork_connector(p); 1268 cgroup_post_fork(p); 1269 return p; 1270 1271 bad_fork_free_graph: 1272 ftrace_graph_exit_task(p); 1273 bad_fork_free_pid: 1274 if (pid != &init_struct_pid) 1275 free_pid(pid); 1276 bad_fork_cleanup_io: 1277 put_io_context(p->io_context); 1278 bad_fork_cleanup_namespaces: 1279 exit_task_namespaces(p); 1280 bad_fork_cleanup_mm: 1281 if (p->mm) 1282 mmput(p->mm); 1283 bad_fork_cleanup_signal: 1284 cleanup_signal(p); 1285 bad_fork_cleanup_sighand: 1286 __cleanup_sighand(p->sighand); 1287 bad_fork_cleanup_fs: 1288 exit_fs(p); /* blocking */ 1289 bad_fork_cleanup_files: 1290 exit_files(p); /* blocking */ 1291 bad_fork_cleanup_semundo: 1292 exit_sem(p); 1293 bad_fork_cleanup_audit: 1294 audit_free(p); 1295 bad_fork_cleanup_policy: 1296 #ifdef CONFIG_NUMA 1297 mpol_put(p->mempolicy); 1298 bad_fork_cleanup_cgroup: 1299 #endif 1300 cgroup_exit(p, cgroup_callbacks_done); 1301 delayacct_tsk_free(p); 1302 if (p->binfmt) 1303 module_put(p->binfmt->module); 1304 bad_fork_cleanup_put_domain: 1305 module_put(task_thread_info(p)->exec_domain->module); 1306 bad_fork_cleanup_count: 1307 atomic_dec(&p->cred->user->processes); 1308 put_cred(p->real_cred); 1309 put_cred(p->cred); 1310 bad_fork_free: 1311 free_task(p); 1312 fork_out: 1313 return ERR_PTR(retval); 1314 } 1315 1316 noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs) 1317 { 1318 memset(regs, 0, sizeof(struct pt_regs)); 1319 return regs; 1320 } 1321 1322 struct task_struct * __cpuinit fork_idle(int cpu) 1323 { 1324 struct task_struct *task; 1325 struct pt_regs regs; 1326 1327 task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, 1328 &init_struct_pid, 0); 1329 if (!IS_ERR(task)) 1330 init_idle(task, cpu); 1331 1332 return task; 1333 } 1334 1335 /* 1336 * Ok, this is the main fork-routine. 1337 * 1338 * It copies the process, and if successful kick-starts 1339 * it and waits for it to finish using the VM if required. 1340 */ 1341 long do_fork(unsigned long clone_flags, 1342 unsigned long stack_start, 1343 struct pt_regs *regs, 1344 unsigned long stack_size, 1345 int __user *parent_tidptr, 1346 int __user *child_tidptr) 1347 { 1348 struct task_struct *p; 1349 int trace = 0; 1350 long nr; 1351 1352 /* 1353 * Do some preliminary argument and permissions checking before we 1354 * actually start allocating stuff 1355 */ 1356 if (clone_flags & CLONE_NEWUSER) { 1357 if (clone_flags & CLONE_THREAD) 1358 return -EINVAL; 1359 /* hopefully this check will go away when userns support is 1360 * complete 1361 */ 1362 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) || 1363 !capable(CAP_SETGID)) 1364 return -EPERM; 1365 } 1366 1367 /* 1368 * We hope to recycle these flags after 2.6.26 1369 */ 1370 if (unlikely(clone_flags & CLONE_STOPPED)) { 1371 static int __read_mostly count = 100; 1372 1373 if (count > 0 && printk_ratelimit()) { 1374 char comm[TASK_COMM_LEN]; 1375 1376 count--; 1377 printk(KERN_INFO "fork(): process `%s' used deprecated " 1378 "clone flags 0x%lx\n", 1379 get_task_comm(comm, current), 1380 clone_flags & CLONE_STOPPED); 1381 } 1382 } 1383 1384 /* 1385 * When called from kernel_thread, don't do user tracing stuff. 1386 */ 1387 if (likely(user_mode(regs))) 1388 trace = tracehook_prepare_clone(clone_flags); 1389 1390 p = copy_process(clone_flags, stack_start, regs, stack_size, 1391 child_tidptr, NULL, trace); 1392 /* 1393 * Do this prior waking up the new thread - the thread pointer 1394 * might get invalid after that point, if the thread exits quickly. 1395 */ 1396 if (!IS_ERR(p)) { 1397 struct completion vfork; 1398 1399 trace_sched_process_fork(current, p); 1400 1401 nr = task_pid_vnr(p); 1402 1403 if (clone_flags & CLONE_PARENT_SETTID) 1404 put_user(nr, parent_tidptr); 1405 1406 if (clone_flags & CLONE_VFORK) { 1407 p->vfork_done = &vfork; 1408 init_completion(&vfork); 1409 } 1410 1411 audit_finish_fork(p); 1412 tracehook_report_clone(trace, regs, clone_flags, nr, p); 1413 1414 /* 1415 * We set PF_STARTING at creation in case tracing wants to 1416 * use this to distinguish a fully live task from one that 1417 * hasn't gotten to tracehook_report_clone() yet. Now we 1418 * clear it and set the child going. 1419 */ 1420 p->flags &= ~PF_STARTING; 1421 1422 if (unlikely(clone_flags & CLONE_STOPPED)) { 1423 /* 1424 * We'll start up with an immediate SIGSTOP. 1425 */ 1426 sigaddset(&p->pending.signal, SIGSTOP); 1427 set_tsk_thread_flag(p, TIF_SIGPENDING); 1428 __set_task_state(p, TASK_STOPPED); 1429 } else { 1430 wake_up_new_task(p, clone_flags); 1431 } 1432 1433 tracehook_report_clone_complete(trace, regs, 1434 clone_flags, nr, p); 1435 1436 if (clone_flags & CLONE_VFORK) { 1437 freezer_do_not_count(); 1438 wait_for_completion(&vfork); 1439 freezer_count(); 1440 tracehook_report_vfork_done(p, nr); 1441 } 1442 } else { 1443 nr = PTR_ERR(p); 1444 } 1445 return nr; 1446 } 1447 1448 #ifndef ARCH_MIN_MMSTRUCT_ALIGN 1449 #define ARCH_MIN_MMSTRUCT_ALIGN 0 1450 #endif 1451 1452 static void sighand_ctor(void *data) 1453 { 1454 struct sighand_struct *sighand = data; 1455 1456 spin_lock_init(&sighand->siglock); 1457 init_waitqueue_head(&sighand->signalfd_wqh); 1458 } 1459 1460 void __init proc_caches_init(void) 1461 { 1462 sighand_cachep = kmem_cache_create("sighand_cache", 1463 sizeof(struct sighand_struct), 0, 1464 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU, 1465 sighand_ctor); 1466 signal_cachep = kmem_cache_create("signal_cache", 1467 sizeof(struct signal_struct), 0, 1468 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1469 files_cachep = kmem_cache_create("files_cache", 1470 sizeof(struct files_struct), 0, 1471 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1472 fs_cachep = kmem_cache_create("fs_cache", 1473 sizeof(struct fs_struct), 0, 1474 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1475 mm_cachep = kmem_cache_create("mm_struct", 1476 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, 1477 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1478 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); 1479 mmap_init(); 1480 } 1481 1482 /* 1483 * Check constraints on flags passed to the unshare system call and 1484 * force unsharing of additional process context as appropriate. 1485 */ 1486 static void check_unshare_flags(unsigned long *flags_ptr) 1487 { 1488 /* 1489 * If unsharing a thread from a thread group, must also 1490 * unshare vm. 1491 */ 1492 if (*flags_ptr & CLONE_THREAD) 1493 *flags_ptr |= CLONE_VM; 1494 1495 /* 1496 * If unsharing vm, must also unshare signal handlers. 1497 */ 1498 if (*flags_ptr & CLONE_VM) 1499 *flags_ptr |= CLONE_SIGHAND; 1500 1501 /* 1502 * If unsharing signal handlers and the task was created 1503 * using CLONE_THREAD, then must unshare the thread 1504 */ 1505 if ((*flags_ptr & CLONE_SIGHAND) && 1506 (atomic_read(¤t->signal->count) > 1)) 1507 *flags_ptr |= CLONE_THREAD; 1508 1509 /* 1510 * If unsharing namespace, must also unshare filesystem information. 1511 */ 1512 if (*flags_ptr & CLONE_NEWNS) 1513 *flags_ptr |= CLONE_FS; 1514 } 1515 1516 /* 1517 * Unsharing of tasks created with CLONE_THREAD is not supported yet 1518 */ 1519 static int unshare_thread(unsigned long unshare_flags) 1520 { 1521 if (unshare_flags & CLONE_THREAD) 1522 return -EINVAL; 1523 1524 return 0; 1525 } 1526 1527 /* 1528 * Unshare the filesystem structure if it is being shared 1529 */ 1530 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) 1531 { 1532 struct fs_struct *fs = current->fs; 1533 1534 if (!(unshare_flags & CLONE_FS) || !fs) 1535 return 0; 1536 1537 /* don't need lock here; in the worst case we'll do useless copy */ 1538 if (fs->users == 1) 1539 return 0; 1540 1541 *new_fsp = copy_fs_struct(fs); 1542 if (!*new_fsp) 1543 return -ENOMEM; 1544 1545 return 0; 1546 } 1547 1548 /* 1549 * Unsharing of sighand is not supported yet 1550 */ 1551 static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp) 1552 { 1553 struct sighand_struct *sigh = current->sighand; 1554 1555 if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1) 1556 return -EINVAL; 1557 else 1558 return 0; 1559 } 1560 1561 /* 1562 * Unshare vm if it is being shared 1563 */ 1564 static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp) 1565 { 1566 struct mm_struct *mm = current->mm; 1567 1568 if ((unshare_flags & CLONE_VM) && 1569 (mm && atomic_read(&mm->mm_users) > 1)) { 1570 return -EINVAL; 1571 } 1572 1573 return 0; 1574 } 1575 1576 /* 1577 * Unshare file descriptor table if it is being shared 1578 */ 1579 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) 1580 { 1581 struct files_struct *fd = current->files; 1582 int error = 0; 1583 1584 if ((unshare_flags & CLONE_FILES) && 1585 (fd && atomic_read(&fd->count) > 1)) { 1586 *new_fdp = dup_fd(fd, &error); 1587 if (!*new_fdp) 1588 return error; 1589 } 1590 1591 return 0; 1592 } 1593 1594 /* 1595 * unshare allows a process to 'unshare' part of the process 1596 * context which was originally shared using clone. copy_* 1597 * functions used by do_fork() cannot be used here directly 1598 * because they modify an inactive task_struct that is being 1599 * constructed. Here we are modifying the current, active, 1600 * task_struct. 1601 */ 1602 SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) 1603 { 1604 int err = 0; 1605 struct fs_struct *fs, *new_fs = NULL; 1606 struct sighand_struct *new_sigh = NULL; 1607 struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL; 1608 struct files_struct *fd, *new_fd = NULL; 1609 struct nsproxy *new_nsproxy = NULL; 1610 int do_sysvsem = 0; 1611 1612 check_unshare_flags(&unshare_flags); 1613 1614 /* Return -EINVAL for all unsupported flags */ 1615 err = -EINVAL; 1616 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| 1617 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| 1618 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET)) 1619 goto bad_unshare_out; 1620 1621 /* 1622 * CLONE_NEWIPC must also detach from the undolist: after switching 1623 * to a new ipc namespace, the semaphore arrays from the old 1624 * namespace are unreachable. 1625 */ 1626 if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) 1627 do_sysvsem = 1; 1628 if ((err = unshare_thread(unshare_flags))) 1629 goto bad_unshare_out; 1630 if ((err = unshare_fs(unshare_flags, &new_fs))) 1631 goto bad_unshare_cleanup_thread; 1632 if ((err = unshare_sighand(unshare_flags, &new_sigh))) 1633 goto bad_unshare_cleanup_fs; 1634 if ((err = unshare_vm(unshare_flags, &new_mm))) 1635 goto bad_unshare_cleanup_sigh; 1636 if ((err = unshare_fd(unshare_flags, &new_fd))) 1637 goto bad_unshare_cleanup_vm; 1638 if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, 1639 new_fs))) 1640 goto bad_unshare_cleanup_fd; 1641 1642 if (new_fs || new_mm || new_fd || do_sysvsem || new_nsproxy) { 1643 if (do_sysvsem) { 1644 /* 1645 * CLONE_SYSVSEM is equivalent to sys_exit(). 1646 */ 1647 exit_sem(current); 1648 } 1649 1650 if (new_nsproxy) { 1651 switch_task_namespaces(current, new_nsproxy); 1652 new_nsproxy = NULL; 1653 } 1654 1655 task_lock(current); 1656 1657 if (new_fs) { 1658 fs = current->fs; 1659 write_lock(&fs->lock); 1660 current->fs = new_fs; 1661 if (--fs->users) 1662 new_fs = NULL; 1663 else 1664 new_fs = fs; 1665 write_unlock(&fs->lock); 1666 } 1667 1668 if (new_mm) { 1669 mm = current->mm; 1670 active_mm = current->active_mm; 1671 current->mm = new_mm; 1672 current->active_mm = new_mm; 1673 activate_mm(active_mm, new_mm); 1674 new_mm = mm; 1675 } 1676 1677 if (new_fd) { 1678 fd = current->files; 1679 current->files = new_fd; 1680 new_fd = fd; 1681 } 1682 1683 task_unlock(current); 1684 } 1685 1686 if (new_nsproxy) 1687 put_nsproxy(new_nsproxy); 1688 1689 bad_unshare_cleanup_fd: 1690 if (new_fd) 1691 put_files_struct(new_fd); 1692 1693 bad_unshare_cleanup_vm: 1694 if (new_mm) 1695 mmput(new_mm); 1696 1697 bad_unshare_cleanup_sigh: 1698 if (new_sigh) 1699 if (atomic_dec_and_test(&new_sigh->count)) 1700 kmem_cache_free(sighand_cachep, new_sigh); 1701 1702 bad_unshare_cleanup_fs: 1703 if (new_fs) 1704 free_fs_struct(new_fs); 1705 1706 bad_unshare_cleanup_thread: 1707 bad_unshare_out: 1708 return err; 1709 } 1710 1711 /* 1712 * Helper to unshare the files of the current task. 1713 * We don't want to expose copy_files internals to 1714 * the exec layer of the kernel. 1715 */ 1716 1717 int unshare_files(struct files_struct **displaced) 1718 { 1719 struct task_struct *task = current; 1720 struct files_struct *copy = NULL; 1721 int error; 1722 1723 error = unshare_fd(CLONE_FILES, ©); 1724 if (error || !copy) { 1725 *displaced = NULL; 1726 return error; 1727 } 1728 *displaced = task->files; 1729 task_lock(task); 1730 task->files = copy; 1731 task_unlock(task); 1732 return 0; 1733 } 1734