1 /* 2 * linux/kernel/fork.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 /* 8 * 'fork.c' contains the help-routines for the 'fork' system call 9 * (see also entry.S and others). 10 * Fork is rather simple, once you get the hang of it, but the memory 11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' 12 */ 13 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/unistd.h> 17 #include <linux/module.h> 18 #include <linux/vmalloc.h> 19 #include <linux/completion.h> 20 #include <linux/mnt_namespace.h> 21 #include <linux/personality.h> 22 #include <linux/mempolicy.h> 23 #include <linux/sem.h> 24 #include <linux/file.h> 25 #include <linux/key.h> 26 #include <linux/binfmts.h> 27 #include <linux/mman.h> 28 #include <linux/fs.h> 29 #include <linux/nsproxy.h> 30 #include <linux/capability.h> 31 #include <linux/cpu.h> 32 #include <linux/cpuset.h> 33 #include <linux/security.h> 34 #include <linux/swap.h> 35 #include <linux/syscalls.h> 36 #include <linux/jiffies.h> 37 #include <linux/futex.h> 38 #include <linux/task_io_accounting_ops.h> 39 #include <linux/rcupdate.h> 40 #include <linux/ptrace.h> 41 #include <linux/mount.h> 42 #include <linux/audit.h> 43 #include <linux/profile.h> 44 #include <linux/rmap.h> 45 #include <linux/acct.h> 46 #include <linux/tsacct_kern.h> 47 #include <linux/cn_proc.h> 48 #include <linux/freezer.h> 49 #include <linux/delayacct.h> 50 #include <linux/taskstats_kern.h> 51 #include <linux/random.h> 52 53 #include <asm/pgtable.h> 54 #include <asm/pgalloc.h> 55 #include <asm/uaccess.h> 56 #include <asm/mmu_context.h> 57 #include <asm/cacheflush.h> 58 #include <asm/tlbflush.h> 59 60 /* 61 * Protected counters by write_lock_irq(&tasklist_lock) 62 */ 63 unsigned long total_forks; /* Handle normal Linux uptimes. */ 64 int nr_threads; /* The idle threads do not count.. */ 65 66 int max_threads; /* tunable limit on nr_threads */ 67 68 DEFINE_PER_CPU(unsigned long, process_counts) = 0; 69 70 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 71 72 int nr_processes(void) 73 { 74 int cpu; 75 int total = 0; 76 77 for_each_online_cpu(cpu) 78 total += per_cpu(process_counts, cpu); 79 80 return total; 81 } 82 83 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 84 # define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL) 85 # define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk)) 86 static struct kmem_cache *task_struct_cachep; 87 #endif 88 89 /* SLAB cache for signal_struct structures (tsk->signal) */ 90 static struct kmem_cache *signal_cachep; 91 92 /* SLAB cache for sighand_struct structures (tsk->sighand) */ 93 struct kmem_cache *sighand_cachep; 94 95 /* SLAB cache for files_struct structures (tsk->files) */ 96 struct kmem_cache *files_cachep; 97 98 /* SLAB cache for fs_struct structures (tsk->fs) */ 99 struct kmem_cache *fs_cachep; 100 101 /* SLAB cache for vm_area_struct structures */ 102 struct kmem_cache *vm_area_cachep; 103 104 /* SLAB cache for mm_struct structures (tsk->mm) */ 105 static struct kmem_cache *mm_cachep; 106 107 void free_task(struct task_struct *tsk) 108 { 109 free_thread_info(tsk->stack); 110 rt_mutex_debug_task_free(tsk); 111 free_task_struct(tsk); 112 } 113 EXPORT_SYMBOL(free_task); 114 115 void __put_task_struct(struct task_struct *tsk) 116 { 117 WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE))); 118 WARN_ON(atomic_read(&tsk->usage)); 119 WARN_ON(tsk == current); 120 121 security_task_free(tsk); 122 free_uid(tsk->user); 123 put_group_info(tsk->group_info); 124 delayacct_tsk_free(tsk); 125 126 if (!profile_handoff_task(tsk)) 127 free_task(tsk); 128 } 129 130 void __init fork_init(unsigned long mempages) 131 { 132 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 133 #ifndef ARCH_MIN_TASKALIGN 134 #define ARCH_MIN_TASKALIGN L1_CACHE_BYTES 135 #endif 136 /* create a slab on which task_structs can be allocated */ 137 task_struct_cachep = 138 kmem_cache_create("task_struct", sizeof(struct task_struct), 139 ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL, NULL); 140 #endif 141 142 /* 143 * The default maximum number of threads is set to a safe 144 * value: the thread structures can take up at most half 145 * of memory. 146 */ 147 max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE); 148 149 /* 150 * we need to allow at least 20 threads to boot a system 151 */ 152 if(max_threads < 20) 153 max_threads = 20; 154 155 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; 156 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; 157 init_task.signal->rlim[RLIMIT_SIGPENDING] = 158 init_task.signal->rlim[RLIMIT_NPROC]; 159 } 160 161 static struct task_struct *dup_task_struct(struct task_struct *orig) 162 { 163 struct task_struct *tsk; 164 struct thread_info *ti; 165 166 prepare_to_copy(orig); 167 168 tsk = alloc_task_struct(); 169 if (!tsk) 170 return NULL; 171 172 ti = alloc_thread_info(tsk); 173 if (!ti) { 174 free_task_struct(tsk); 175 return NULL; 176 } 177 178 *tsk = *orig; 179 tsk->stack = ti; 180 setup_thread_stack(tsk, orig); 181 182 #ifdef CONFIG_CC_STACKPROTECTOR 183 tsk->stack_canary = get_random_int(); 184 #endif 185 186 /* One for us, one for whoever does the "release_task()" (usually parent) */ 187 atomic_set(&tsk->usage,2); 188 atomic_set(&tsk->fs_excl, 0); 189 #ifdef CONFIG_BLK_DEV_IO_TRACE 190 tsk->btrace_seq = 0; 191 #endif 192 tsk->splice_pipe = NULL; 193 return tsk; 194 } 195 196 #ifdef CONFIG_MMU 197 static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 198 { 199 struct vm_area_struct *mpnt, *tmp, **pprev; 200 struct rb_node **rb_link, *rb_parent; 201 int retval; 202 unsigned long charge; 203 struct mempolicy *pol; 204 205 down_write(&oldmm->mmap_sem); 206 flush_cache_dup_mm(oldmm); 207 /* 208 * Not linked in yet - no deadlock potential: 209 */ 210 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); 211 212 mm->locked_vm = 0; 213 mm->mmap = NULL; 214 mm->mmap_cache = NULL; 215 mm->free_area_cache = oldmm->mmap_base; 216 mm->cached_hole_size = ~0UL; 217 mm->map_count = 0; 218 cpus_clear(mm->cpu_vm_mask); 219 mm->mm_rb = RB_ROOT; 220 rb_link = &mm->mm_rb.rb_node; 221 rb_parent = NULL; 222 pprev = &mm->mmap; 223 224 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { 225 struct file *file; 226 227 if (mpnt->vm_flags & VM_DONTCOPY) { 228 long pages = vma_pages(mpnt); 229 mm->total_vm -= pages; 230 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, 231 -pages); 232 continue; 233 } 234 charge = 0; 235 if (mpnt->vm_flags & VM_ACCOUNT) { 236 unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; 237 if (security_vm_enough_memory(len)) 238 goto fail_nomem; 239 charge = len; 240 } 241 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 242 if (!tmp) 243 goto fail_nomem; 244 *tmp = *mpnt; 245 pol = mpol_copy(vma_policy(mpnt)); 246 retval = PTR_ERR(pol); 247 if (IS_ERR(pol)) 248 goto fail_nomem_policy; 249 vma_set_policy(tmp, pol); 250 tmp->vm_flags &= ~VM_LOCKED; 251 tmp->vm_mm = mm; 252 tmp->vm_next = NULL; 253 anon_vma_link(tmp); 254 file = tmp->vm_file; 255 if (file) { 256 struct inode *inode = file->f_path.dentry->d_inode; 257 get_file(file); 258 if (tmp->vm_flags & VM_DENYWRITE) 259 atomic_dec(&inode->i_writecount); 260 261 /* insert tmp into the share list, just after mpnt */ 262 spin_lock(&file->f_mapping->i_mmap_lock); 263 tmp->vm_truncate_count = mpnt->vm_truncate_count; 264 flush_dcache_mmap_lock(file->f_mapping); 265 vma_prio_tree_add(tmp, mpnt); 266 flush_dcache_mmap_unlock(file->f_mapping); 267 spin_unlock(&file->f_mapping->i_mmap_lock); 268 } 269 270 /* 271 * Link in the new vma and copy the page table entries. 272 */ 273 *pprev = tmp; 274 pprev = &tmp->vm_next; 275 276 __vma_link_rb(mm, tmp, rb_link, rb_parent); 277 rb_link = &tmp->vm_rb.rb_right; 278 rb_parent = &tmp->vm_rb; 279 280 mm->map_count++; 281 retval = copy_page_range(mm, oldmm, mpnt); 282 283 if (tmp->vm_ops && tmp->vm_ops->open) 284 tmp->vm_ops->open(tmp); 285 286 if (retval) 287 goto out; 288 } 289 /* a new mm has just been created */ 290 arch_dup_mmap(oldmm, mm); 291 retval = 0; 292 out: 293 up_write(&mm->mmap_sem); 294 flush_tlb_mm(oldmm); 295 up_write(&oldmm->mmap_sem); 296 return retval; 297 fail_nomem_policy: 298 kmem_cache_free(vm_area_cachep, tmp); 299 fail_nomem: 300 retval = -ENOMEM; 301 vm_unacct_memory(charge); 302 goto out; 303 } 304 305 static inline int mm_alloc_pgd(struct mm_struct * mm) 306 { 307 mm->pgd = pgd_alloc(mm); 308 if (unlikely(!mm->pgd)) 309 return -ENOMEM; 310 return 0; 311 } 312 313 static inline void mm_free_pgd(struct mm_struct * mm) 314 { 315 pgd_free(mm->pgd); 316 } 317 #else 318 #define dup_mmap(mm, oldmm) (0) 319 #define mm_alloc_pgd(mm) (0) 320 #define mm_free_pgd(mm) 321 #endif /* CONFIG_MMU */ 322 323 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); 324 325 #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) 326 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) 327 328 #include <linux/init_task.h> 329 330 static struct mm_struct * mm_init(struct mm_struct * mm) 331 { 332 atomic_set(&mm->mm_users, 1); 333 atomic_set(&mm->mm_count, 1); 334 init_rwsem(&mm->mmap_sem); 335 INIT_LIST_HEAD(&mm->mmlist); 336 mm->core_waiters = 0; 337 mm->nr_ptes = 0; 338 set_mm_counter(mm, file_rss, 0); 339 set_mm_counter(mm, anon_rss, 0); 340 spin_lock_init(&mm->page_table_lock); 341 rwlock_init(&mm->ioctx_list_lock); 342 mm->ioctx_list = NULL; 343 mm->free_area_cache = TASK_UNMAPPED_BASE; 344 mm->cached_hole_size = ~0UL; 345 346 if (likely(!mm_alloc_pgd(mm))) { 347 mm->def_flags = 0; 348 return mm; 349 } 350 free_mm(mm); 351 return NULL; 352 } 353 354 /* 355 * Allocate and initialize an mm_struct. 356 */ 357 struct mm_struct * mm_alloc(void) 358 { 359 struct mm_struct * mm; 360 361 mm = allocate_mm(); 362 if (mm) { 363 memset(mm, 0, sizeof(*mm)); 364 mm = mm_init(mm); 365 } 366 return mm; 367 } 368 369 /* 370 * Called when the last reference to the mm 371 * is dropped: either by a lazy thread or by 372 * mmput. Free the page directory and the mm. 373 */ 374 void fastcall __mmdrop(struct mm_struct *mm) 375 { 376 BUG_ON(mm == &init_mm); 377 mm_free_pgd(mm); 378 destroy_context(mm); 379 free_mm(mm); 380 } 381 382 /* 383 * Decrement the use count and release all resources for an mm. 384 */ 385 void mmput(struct mm_struct *mm) 386 { 387 might_sleep(); 388 389 if (atomic_dec_and_test(&mm->mm_users)) { 390 exit_aio(mm); 391 exit_mmap(mm); 392 if (!list_empty(&mm->mmlist)) { 393 spin_lock(&mmlist_lock); 394 list_del(&mm->mmlist); 395 spin_unlock(&mmlist_lock); 396 } 397 put_swap_token(mm); 398 mmdrop(mm); 399 } 400 } 401 EXPORT_SYMBOL_GPL(mmput); 402 403 /** 404 * get_task_mm - acquire a reference to the task's mm 405 * 406 * Returns %NULL if the task has no mm. Checks PF_BORROWED_MM (meaning 407 * this kernel workthread has transiently adopted a user mm with use_mm, 408 * to do its AIO) is not set and if so returns a reference to it, after 409 * bumping up the use count. User must release the mm via mmput() 410 * after use. Typically used by /proc and ptrace. 411 */ 412 struct mm_struct *get_task_mm(struct task_struct *task) 413 { 414 struct mm_struct *mm; 415 416 task_lock(task); 417 mm = task->mm; 418 if (mm) { 419 if (task->flags & PF_BORROWED_MM) 420 mm = NULL; 421 else 422 atomic_inc(&mm->mm_users); 423 } 424 task_unlock(task); 425 return mm; 426 } 427 EXPORT_SYMBOL_GPL(get_task_mm); 428 429 /* Please note the differences between mmput and mm_release. 430 * mmput is called whenever we stop holding onto a mm_struct, 431 * error success whatever. 432 * 433 * mm_release is called after a mm_struct has been removed 434 * from the current process. 435 * 436 * This difference is important for error handling, when we 437 * only half set up a mm_struct for a new process and need to restore 438 * the old one. Because we mmput the new mm_struct before 439 * restoring the old one. . . 440 * Eric Biederman 10 January 1998 441 */ 442 void mm_release(struct task_struct *tsk, struct mm_struct *mm) 443 { 444 struct completion *vfork_done = tsk->vfork_done; 445 446 /* Get rid of any cached register state */ 447 deactivate_mm(tsk, mm); 448 449 /* notify parent sleeping on vfork() */ 450 if (vfork_done) { 451 tsk->vfork_done = NULL; 452 complete(vfork_done); 453 } 454 455 /* 456 * If we're exiting normally, clear a user-space tid field if 457 * requested. We leave this alone when dying by signal, to leave 458 * the value intact in a core dump, and to save the unnecessary 459 * trouble otherwise. Userland only wants this done for a sys_exit. 460 */ 461 if (tsk->clear_child_tid 462 && !(tsk->flags & PF_SIGNALED) 463 && atomic_read(&mm->mm_users) > 1) { 464 u32 __user * tidptr = tsk->clear_child_tid; 465 tsk->clear_child_tid = NULL; 466 467 /* 468 * We don't check the error code - if userspace has 469 * not set up a proper pointer then tough luck. 470 */ 471 put_user(0, tidptr); 472 sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0); 473 } 474 } 475 476 /* 477 * Allocate a new mm structure and copy contents from the 478 * mm structure of the passed in task structure. 479 */ 480 static struct mm_struct *dup_mm(struct task_struct *tsk) 481 { 482 struct mm_struct *mm, *oldmm = current->mm; 483 int err; 484 485 if (!oldmm) 486 return NULL; 487 488 mm = allocate_mm(); 489 if (!mm) 490 goto fail_nomem; 491 492 memcpy(mm, oldmm, sizeof(*mm)); 493 494 /* Initializing for Swap token stuff */ 495 mm->token_priority = 0; 496 mm->last_interval = 0; 497 498 if (!mm_init(mm)) 499 goto fail_nomem; 500 501 if (init_new_context(tsk, mm)) 502 goto fail_nocontext; 503 504 err = dup_mmap(mm, oldmm); 505 if (err) 506 goto free_pt; 507 508 mm->hiwater_rss = get_mm_rss(mm); 509 mm->hiwater_vm = mm->total_vm; 510 511 return mm; 512 513 free_pt: 514 mmput(mm); 515 516 fail_nomem: 517 return NULL; 518 519 fail_nocontext: 520 /* 521 * If init_new_context() failed, we cannot use mmput() to free the mm 522 * because it calls destroy_context() 523 */ 524 mm_free_pgd(mm); 525 free_mm(mm); 526 return NULL; 527 } 528 529 static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) 530 { 531 struct mm_struct * mm, *oldmm; 532 int retval; 533 534 tsk->min_flt = tsk->maj_flt = 0; 535 tsk->nvcsw = tsk->nivcsw = 0; 536 537 tsk->mm = NULL; 538 tsk->active_mm = NULL; 539 540 /* 541 * Are we cloning a kernel thread? 542 * 543 * We need to steal a active VM for that.. 544 */ 545 oldmm = current->mm; 546 if (!oldmm) 547 return 0; 548 549 if (clone_flags & CLONE_VM) { 550 atomic_inc(&oldmm->mm_users); 551 mm = oldmm; 552 goto good_mm; 553 } 554 555 retval = -ENOMEM; 556 mm = dup_mm(tsk); 557 if (!mm) 558 goto fail_nomem; 559 560 good_mm: 561 /* Initializing for Swap token stuff */ 562 mm->token_priority = 0; 563 mm->last_interval = 0; 564 565 tsk->mm = mm; 566 tsk->active_mm = mm; 567 return 0; 568 569 fail_nomem: 570 return retval; 571 } 572 573 static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old) 574 { 575 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL); 576 /* We don't need to lock fs - think why ;-) */ 577 if (fs) { 578 atomic_set(&fs->count, 1); 579 rwlock_init(&fs->lock); 580 fs->umask = old->umask; 581 read_lock(&old->lock); 582 fs->rootmnt = mntget(old->rootmnt); 583 fs->root = dget(old->root); 584 fs->pwdmnt = mntget(old->pwdmnt); 585 fs->pwd = dget(old->pwd); 586 if (old->altroot) { 587 fs->altrootmnt = mntget(old->altrootmnt); 588 fs->altroot = dget(old->altroot); 589 } else { 590 fs->altrootmnt = NULL; 591 fs->altroot = NULL; 592 } 593 read_unlock(&old->lock); 594 } 595 return fs; 596 } 597 598 struct fs_struct *copy_fs_struct(struct fs_struct *old) 599 { 600 return __copy_fs_struct(old); 601 } 602 603 EXPORT_SYMBOL_GPL(copy_fs_struct); 604 605 static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk) 606 { 607 if (clone_flags & CLONE_FS) { 608 atomic_inc(¤t->fs->count); 609 return 0; 610 } 611 tsk->fs = __copy_fs_struct(current->fs); 612 if (!tsk->fs) 613 return -ENOMEM; 614 return 0; 615 } 616 617 static int count_open_files(struct fdtable *fdt) 618 { 619 int size = fdt->max_fds; 620 int i; 621 622 /* Find the last open fd */ 623 for (i = size/(8*sizeof(long)); i > 0; ) { 624 if (fdt->open_fds->fds_bits[--i]) 625 break; 626 } 627 i = (i+1) * 8 * sizeof(long); 628 return i; 629 } 630 631 static struct files_struct *alloc_files(void) 632 { 633 struct files_struct *newf; 634 struct fdtable *fdt; 635 636 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); 637 if (!newf) 638 goto out; 639 640 atomic_set(&newf->count, 1); 641 642 spin_lock_init(&newf->file_lock); 643 newf->next_fd = 0; 644 fdt = &newf->fdtab; 645 fdt->max_fds = NR_OPEN_DEFAULT; 646 fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init; 647 fdt->open_fds = (fd_set *)&newf->open_fds_init; 648 fdt->fd = &newf->fd_array[0]; 649 INIT_RCU_HEAD(&fdt->rcu); 650 fdt->next = NULL; 651 rcu_assign_pointer(newf->fdt, fdt); 652 out: 653 return newf; 654 } 655 656 /* 657 * Allocate a new files structure and copy contents from the 658 * passed in files structure. 659 * errorp will be valid only when the returned files_struct is NULL. 660 */ 661 static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) 662 { 663 struct files_struct *newf; 664 struct file **old_fds, **new_fds; 665 int open_files, size, i; 666 struct fdtable *old_fdt, *new_fdt; 667 668 *errorp = -ENOMEM; 669 newf = alloc_files(); 670 if (!newf) 671 goto out; 672 673 spin_lock(&oldf->file_lock); 674 old_fdt = files_fdtable(oldf); 675 new_fdt = files_fdtable(newf); 676 open_files = count_open_files(old_fdt); 677 678 /* 679 * Check whether we need to allocate a larger fd array and fd set. 680 * Note: we're not a clone task, so the open count won't change. 681 */ 682 if (open_files > new_fdt->max_fds) { 683 new_fdt->max_fds = 0; 684 spin_unlock(&oldf->file_lock); 685 spin_lock(&newf->file_lock); 686 *errorp = expand_files(newf, open_files-1); 687 spin_unlock(&newf->file_lock); 688 if (*errorp < 0) 689 goto out_release; 690 new_fdt = files_fdtable(newf); 691 /* 692 * Reacquire the oldf lock and a pointer to its fd table 693 * who knows it may have a new bigger fd table. We need 694 * the latest pointer. 695 */ 696 spin_lock(&oldf->file_lock); 697 old_fdt = files_fdtable(oldf); 698 } 699 700 old_fds = old_fdt->fd; 701 new_fds = new_fdt->fd; 702 703 memcpy(new_fdt->open_fds->fds_bits, 704 old_fdt->open_fds->fds_bits, open_files/8); 705 memcpy(new_fdt->close_on_exec->fds_bits, 706 old_fdt->close_on_exec->fds_bits, open_files/8); 707 708 for (i = open_files; i != 0; i--) { 709 struct file *f = *old_fds++; 710 if (f) { 711 get_file(f); 712 } else { 713 /* 714 * The fd may be claimed in the fd bitmap but not yet 715 * instantiated in the files array if a sibling thread 716 * is partway through open(). So make sure that this 717 * fd is available to the new process. 718 */ 719 FD_CLR(open_files - i, new_fdt->open_fds); 720 } 721 rcu_assign_pointer(*new_fds++, f); 722 } 723 spin_unlock(&oldf->file_lock); 724 725 /* compute the remainder to be cleared */ 726 size = (new_fdt->max_fds - open_files) * sizeof(struct file *); 727 728 /* This is long word aligned thus could use a optimized version */ 729 memset(new_fds, 0, size); 730 731 if (new_fdt->max_fds > open_files) { 732 int left = (new_fdt->max_fds-open_files)/8; 733 int start = open_files / (8 * sizeof(unsigned long)); 734 735 memset(&new_fdt->open_fds->fds_bits[start], 0, left); 736 memset(&new_fdt->close_on_exec->fds_bits[start], 0, left); 737 } 738 739 return newf; 740 741 out_release: 742 kmem_cache_free(files_cachep, newf); 743 out: 744 return NULL; 745 } 746 747 static int copy_files(unsigned long clone_flags, struct task_struct * tsk) 748 { 749 struct files_struct *oldf, *newf; 750 int error = 0; 751 752 /* 753 * A background process may not have any files ... 754 */ 755 oldf = current->files; 756 if (!oldf) 757 goto out; 758 759 if (clone_flags & CLONE_FILES) { 760 atomic_inc(&oldf->count); 761 goto out; 762 } 763 764 /* 765 * Note: we may be using current for both targets (See exec.c) 766 * This works because we cache current->files (old) as oldf. Don't 767 * break this. 768 */ 769 tsk->files = NULL; 770 newf = dup_fd(oldf, &error); 771 if (!newf) 772 goto out; 773 774 tsk->files = newf; 775 error = 0; 776 out: 777 return error; 778 } 779 780 /* 781 * Helper to unshare the files of the current task. 782 * We don't want to expose copy_files internals to 783 * the exec layer of the kernel. 784 */ 785 786 int unshare_files(void) 787 { 788 struct files_struct *files = current->files; 789 int rc; 790 791 BUG_ON(!files); 792 793 /* This can race but the race causes us to copy when we don't 794 need to and drop the copy */ 795 if(atomic_read(&files->count) == 1) 796 { 797 atomic_inc(&files->count); 798 return 0; 799 } 800 rc = copy_files(0, current); 801 if(rc) 802 current->files = files; 803 return rc; 804 } 805 806 EXPORT_SYMBOL(unshare_files); 807 808 static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk) 809 { 810 struct sighand_struct *sig; 811 812 if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) { 813 atomic_inc(¤t->sighand->count); 814 return 0; 815 } 816 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); 817 rcu_assign_pointer(tsk->sighand, sig); 818 if (!sig) 819 return -ENOMEM; 820 atomic_set(&sig->count, 1); 821 memcpy(sig->action, current->sighand->action, sizeof(sig->action)); 822 return 0; 823 } 824 825 void __cleanup_sighand(struct sighand_struct *sighand) 826 { 827 if (atomic_dec_and_test(&sighand->count)) 828 kmem_cache_free(sighand_cachep, sighand); 829 } 830 831 static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk) 832 { 833 struct signal_struct *sig; 834 int ret; 835 836 if (clone_flags & CLONE_THREAD) { 837 atomic_inc(¤t->signal->count); 838 atomic_inc(¤t->signal->live); 839 return 0; 840 } 841 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); 842 tsk->signal = sig; 843 if (!sig) 844 return -ENOMEM; 845 846 ret = copy_thread_group_keys(tsk); 847 if (ret < 0) { 848 kmem_cache_free(signal_cachep, sig); 849 return ret; 850 } 851 852 atomic_set(&sig->count, 1); 853 atomic_set(&sig->live, 1); 854 init_waitqueue_head(&sig->wait_chldexit); 855 sig->flags = 0; 856 sig->group_exit_code = 0; 857 sig->group_exit_task = NULL; 858 sig->group_stop_count = 0; 859 sig->curr_target = NULL; 860 init_sigpending(&sig->shared_pending); 861 INIT_LIST_HEAD(&sig->posix_timers); 862 863 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 864 sig->it_real_incr.tv64 = 0; 865 sig->real_timer.function = it_real_fn; 866 sig->tsk = tsk; 867 868 sig->it_virt_expires = cputime_zero; 869 sig->it_virt_incr = cputime_zero; 870 sig->it_prof_expires = cputime_zero; 871 sig->it_prof_incr = cputime_zero; 872 873 sig->leader = 0; /* session leadership doesn't inherit */ 874 sig->tty_old_pgrp = NULL; 875 876 sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; 877 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; 878 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; 879 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; 880 sig->sum_sched_runtime = 0; 881 INIT_LIST_HEAD(&sig->cpu_timers[0]); 882 INIT_LIST_HEAD(&sig->cpu_timers[1]); 883 INIT_LIST_HEAD(&sig->cpu_timers[2]); 884 taskstats_tgid_init(sig); 885 886 task_lock(current->group_leader); 887 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 888 task_unlock(current->group_leader); 889 890 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { 891 /* 892 * New sole thread in the process gets an expiry time 893 * of the whole CPU time limit. 894 */ 895 tsk->it_prof_expires = 896 secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); 897 } 898 acct_init_pacct(&sig->pacct); 899 900 return 0; 901 } 902 903 void __cleanup_signal(struct signal_struct *sig) 904 { 905 exit_thread_group_keys(sig); 906 kmem_cache_free(signal_cachep, sig); 907 } 908 909 static inline void cleanup_signal(struct task_struct *tsk) 910 { 911 struct signal_struct *sig = tsk->signal; 912 913 atomic_dec(&sig->live); 914 915 if (atomic_dec_and_test(&sig->count)) 916 __cleanup_signal(sig); 917 } 918 919 static inline void copy_flags(unsigned long clone_flags, struct task_struct *p) 920 { 921 unsigned long new_flags = p->flags; 922 923 new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE); 924 new_flags |= PF_FORKNOEXEC; 925 if (!(clone_flags & CLONE_PTRACE)) 926 p->ptrace = 0; 927 p->flags = new_flags; 928 } 929 930 asmlinkage long sys_set_tid_address(int __user *tidptr) 931 { 932 current->clear_child_tid = tidptr; 933 934 return current->pid; 935 } 936 937 static inline void rt_mutex_init_task(struct task_struct *p) 938 { 939 spin_lock_init(&p->pi_lock); 940 #ifdef CONFIG_RT_MUTEXES 941 plist_head_init(&p->pi_waiters, &p->pi_lock); 942 p->pi_blocked_on = NULL; 943 #endif 944 } 945 946 /* 947 * This creates a new process as a copy of the old one, 948 * but does not actually start it yet. 949 * 950 * It copies the registers, and all the appropriate 951 * parts of the process environment (as per the clone 952 * flags). The actual kick-off is left to the caller. 953 */ 954 static struct task_struct *copy_process(unsigned long clone_flags, 955 unsigned long stack_start, 956 struct pt_regs *regs, 957 unsigned long stack_size, 958 int __user *parent_tidptr, 959 int __user *child_tidptr, 960 struct pid *pid) 961 { 962 int retval; 963 struct task_struct *p = NULL; 964 965 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) 966 return ERR_PTR(-EINVAL); 967 968 /* 969 * Thread groups must share signals as well, and detached threads 970 * can only be started up within the thread group. 971 */ 972 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) 973 return ERR_PTR(-EINVAL); 974 975 /* 976 * Shared signal handlers imply shared VM. By way of the above, 977 * thread groups also imply shared VM. Blocking this case allows 978 * for various simplifications in other code. 979 */ 980 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) 981 return ERR_PTR(-EINVAL); 982 983 retval = security_task_create(clone_flags); 984 if (retval) 985 goto fork_out; 986 987 retval = -ENOMEM; 988 p = dup_task_struct(current); 989 if (!p) 990 goto fork_out; 991 992 rt_mutex_init_task(p); 993 994 #ifdef CONFIG_TRACE_IRQFLAGS 995 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); 996 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 997 #endif 998 retval = -EAGAIN; 999 if (atomic_read(&p->user->processes) >= 1000 p->signal->rlim[RLIMIT_NPROC].rlim_cur) { 1001 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && 1002 p->user != &root_user) 1003 goto bad_fork_free; 1004 } 1005 1006 atomic_inc(&p->user->__count); 1007 atomic_inc(&p->user->processes); 1008 get_group_info(p->group_info); 1009 1010 /* 1011 * If multiple threads are within copy_process(), then this check 1012 * triggers too late. This doesn't hurt, the check is only there 1013 * to stop root fork bombs. 1014 */ 1015 if (nr_threads >= max_threads) 1016 goto bad_fork_cleanup_count; 1017 1018 if (!try_module_get(task_thread_info(p)->exec_domain->module)) 1019 goto bad_fork_cleanup_count; 1020 1021 if (p->binfmt && !try_module_get(p->binfmt->module)) 1022 goto bad_fork_cleanup_put_domain; 1023 1024 p->did_exec = 0; 1025 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ 1026 copy_flags(clone_flags, p); 1027 p->pid = pid_nr(pid); 1028 retval = -EFAULT; 1029 if (clone_flags & CLONE_PARENT_SETTID) 1030 if (put_user(p->pid, parent_tidptr)) 1031 goto bad_fork_cleanup_delays_binfmt; 1032 1033 INIT_LIST_HEAD(&p->children); 1034 INIT_LIST_HEAD(&p->sibling); 1035 p->vfork_done = NULL; 1036 spin_lock_init(&p->alloc_lock); 1037 1038 clear_tsk_thread_flag(p, TIF_SIGPENDING); 1039 init_sigpending(&p->pending); 1040 1041 p->utime = cputime_zero; 1042 p->stime = cputime_zero; 1043 1044 #ifdef CONFIG_TASK_XACCT 1045 p->rchar = 0; /* I/O counter: bytes read */ 1046 p->wchar = 0; /* I/O counter: bytes written */ 1047 p->syscr = 0; /* I/O counter: read syscalls */ 1048 p->syscw = 0; /* I/O counter: write syscalls */ 1049 #endif 1050 task_io_accounting_init(p); 1051 acct_clear_integrals(p); 1052 1053 p->it_virt_expires = cputime_zero; 1054 p->it_prof_expires = cputime_zero; 1055 p->it_sched_expires = 0; 1056 INIT_LIST_HEAD(&p->cpu_timers[0]); 1057 INIT_LIST_HEAD(&p->cpu_timers[1]); 1058 INIT_LIST_HEAD(&p->cpu_timers[2]); 1059 1060 p->lock_depth = -1; /* -1 = no lock */ 1061 do_posix_clock_monotonic_gettime(&p->start_time); 1062 p->security = NULL; 1063 p->io_context = NULL; 1064 p->io_wait = NULL; 1065 p->audit_context = NULL; 1066 cpuset_fork(p); 1067 #ifdef CONFIG_NUMA 1068 p->mempolicy = mpol_copy(p->mempolicy); 1069 if (IS_ERR(p->mempolicy)) { 1070 retval = PTR_ERR(p->mempolicy); 1071 p->mempolicy = NULL; 1072 goto bad_fork_cleanup_cpuset; 1073 } 1074 mpol_fix_fork_child_flag(p); 1075 #endif 1076 #ifdef CONFIG_TRACE_IRQFLAGS 1077 p->irq_events = 0; 1078 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 1079 p->hardirqs_enabled = 1; 1080 #else 1081 p->hardirqs_enabled = 0; 1082 #endif 1083 p->hardirq_enable_ip = 0; 1084 p->hardirq_enable_event = 0; 1085 p->hardirq_disable_ip = _THIS_IP_; 1086 p->hardirq_disable_event = 0; 1087 p->softirqs_enabled = 1; 1088 p->softirq_enable_ip = _THIS_IP_; 1089 p->softirq_enable_event = 0; 1090 p->softirq_disable_ip = 0; 1091 p->softirq_disable_event = 0; 1092 p->hardirq_context = 0; 1093 p->softirq_context = 0; 1094 #endif 1095 #ifdef CONFIG_LOCKDEP 1096 p->lockdep_depth = 0; /* no locks held yet */ 1097 p->curr_chain_key = 0; 1098 p->lockdep_recursion = 0; 1099 #endif 1100 1101 #ifdef CONFIG_DEBUG_MUTEXES 1102 p->blocked_on = NULL; /* not blocked yet */ 1103 #endif 1104 1105 p->tgid = p->pid; 1106 if (clone_flags & CLONE_THREAD) 1107 p->tgid = current->tgid; 1108 1109 if ((retval = security_task_alloc(p))) 1110 goto bad_fork_cleanup_policy; 1111 if ((retval = audit_alloc(p))) 1112 goto bad_fork_cleanup_security; 1113 /* copy all the process information */ 1114 if ((retval = copy_semundo(clone_flags, p))) 1115 goto bad_fork_cleanup_audit; 1116 if ((retval = copy_files(clone_flags, p))) 1117 goto bad_fork_cleanup_semundo; 1118 if ((retval = copy_fs(clone_flags, p))) 1119 goto bad_fork_cleanup_files; 1120 if ((retval = copy_sighand(clone_flags, p))) 1121 goto bad_fork_cleanup_fs; 1122 if ((retval = copy_signal(clone_flags, p))) 1123 goto bad_fork_cleanup_sighand; 1124 if ((retval = copy_mm(clone_flags, p))) 1125 goto bad_fork_cleanup_signal; 1126 if ((retval = copy_keys(clone_flags, p))) 1127 goto bad_fork_cleanup_mm; 1128 if ((retval = copy_namespaces(clone_flags, p))) 1129 goto bad_fork_cleanup_keys; 1130 retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); 1131 if (retval) 1132 goto bad_fork_cleanup_namespaces; 1133 1134 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; 1135 /* 1136 * Clear TID on mm_release()? 1137 */ 1138 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; 1139 p->robust_list = NULL; 1140 #ifdef CONFIG_COMPAT 1141 p->compat_robust_list = NULL; 1142 #endif 1143 INIT_LIST_HEAD(&p->pi_state_list); 1144 p->pi_state_cache = NULL; 1145 1146 /* 1147 * sigaltstack should be cleared when sharing the same VM 1148 */ 1149 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) 1150 p->sas_ss_sp = p->sas_ss_size = 0; 1151 1152 /* 1153 * Syscall tracing should be turned off in the child regardless 1154 * of CLONE_PTRACE. 1155 */ 1156 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); 1157 #ifdef TIF_SYSCALL_EMU 1158 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); 1159 #endif 1160 1161 /* Our parent execution domain becomes current domain 1162 These must match for thread signalling to apply */ 1163 p->parent_exec_id = p->self_exec_id; 1164 1165 /* ok, now we should be set up.. */ 1166 p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); 1167 p->pdeath_signal = 0; 1168 p->exit_state = 0; 1169 1170 /* 1171 * Ok, make it visible to the rest of the system. 1172 * We dont wake it up yet. 1173 */ 1174 p->group_leader = p; 1175 INIT_LIST_HEAD(&p->thread_group); 1176 INIT_LIST_HEAD(&p->ptrace_children); 1177 INIT_LIST_HEAD(&p->ptrace_list); 1178 1179 /* Perform scheduler related setup. Assign this task to a CPU. */ 1180 sched_fork(p, clone_flags); 1181 1182 /* Need tasklist lock for parent etc handling! */ 1183 write_lock_irq(&tasklist_lock); 1184 1185 /* for sys_ioprio_set(IOPRIO_WHO_PGRP) */ 1186 p->ioprio = current->ioprio; 1187 1188 /* 1189 * The task hasn't been attached yet, so its cpus_allowed mask will 1190 * not be changed, nor will its assigned CPU. 1191 * 1192 * The cpus_allowed mask of the parent may have changed after it was 1193 * copied first time - so re-copy it here, then check the child's CPU 1194 * to ensure it is on a valid CPU (and if not, just force it back to 1195 * parent's CPU). This avoids alot of nasty races. 1196 */ 1197 p->cpus_allowed = current->cpus_allowed; 1198 if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) || 1199 !cpu_online(task_cpu(p)))) 1200 set_task_cpu(p, smp_processor_id()); 1201 1202 /* CLONE_PARENT re-uses the old parent */ 1203 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) 1204 p->real_parent = current->real_parent; 1205 else 1206 p->real_parent = current; 1207 p->parent = p->real_parent; 1208 1209 spin_lock(¤t->sighand->siglock); 1210 1211 /* 1212 * Process group and session signals need to be delivered to just the 1213 * parent before the fork or both the parent and the child after the 1214 * fork. Restart if a signal comes in before we add the new process to 1215 * it's process group. 1216 * A fatal signal pending means that current will exit, so the new 1217 * thread can't slip out of an OOM kill (or normal SIGKILL). 1218 */ 1219 recalc_sigpending(); 1220 if (signal_pending(current)) { 1221 spin_unlock(¤t->sighand->siglock); 1222 write_unlock_irq(&tasklist_lock); 1223 retval = -ERESTARTNOINTR; 1224 goto bad_fork_cleanup_namespaces; 1225 } 1226 1227 if (clone_flags & CLONE_THREAD) { 1228 p->group_leader = current->group_leader; 1229 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); 1230 1231 if (!cputime_eq(current->signal->it_virt_expires, 1232 cputime_zero) || 1233 !cputime_eq(current->signal->it_prof_expires, 1234 cputime_zero) || 1235 current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY || 1236 !list_empty(¤t->signal->cpu_timers[0]) || 1237 !list_empty(¤t->signal->cpu_timers[1]) || 1238 !list_empty(¤t->signal->cpu_timers[2])) { 1239 /* 1240 * Have child wake up on its first tick to check 1241 * for process CPU timers. 1242 */ 1243 p->it_prof_expires = jiffies_to_cputime(1); 1244 } 1245 } 1246 1247 if (likely(p->pid)) { 1248 add_parent(p); 1249 if (unlikely(p->ptrace & PT_PTRACED)) 1250 __ptrace_link(p, current->parent); 1251 1252 if (thread_group_leader(p)) { 1253 p->signal->tty = current->signal->tty; 1254 p->signal->pgrp = process_group(current); 1255 set_signal_session(p->signal, process_session(current)); 1256 attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); 1257 attach_pid(p, PIDTYPE_SID, task_session(current)); 1258 1259 list_add_tail_rcu(&p->tasks, &init_task.tasks); 1260 __get_cpu_var(process_counts)++; 1261 } 1262 attach_pid(p, PIDTYPE_PID, pid); 1263 nr_threads++; 1264 } 1265 1266 total_forks++; 1267 spin_unlock(¤t->sighand->siglock); 1268 write_unlock_irq(&tasklist_lock); 1269 proc_fork_connector(p); 1270 return p; 1271 1272 bad_fork_cleanup_namespaces: 1273 exit_task_namespaces(p); 1274 bad_fork_cleanup_keys: 1275 exit_keys(p); 1276 bad_fork_cleanup_mm: 1277 if (p->mm) 1278 mmput(p->mm); 1279 bad_fork_cleanup_signal: 1280 cleanup_signal(p); 1281 bad_fork_cleanup_sighand: 1282 __cleanup_sighand(p->sighand); 1283 bad_fork_cleanup_fs: 1284 exit_fs(p); /* blocking */ 1285 bad_fork_cleanup_files: 1286 exit_files(p); /* blocking */ 1287 bad_fork_cleanup_semundo: 1288 exit_sem(p); 1289 bad_fork_cleanup_audit: 1290 audit_free(p); 1291 bad_fork_cleanup_security: 1292 security_task_free(p); 1293 bad_fork_cleanup_policy: 1294 #ifdef CONFIG_NUMA 1295 mpol_free(p->mempolicy); 1296 bad_fork_cleanup_cpuset: 1297 #endif 1298 cpuset_exit(p); 1299 bad_fork_cleanup_delays_binfmt: 1300 delayacct_tsk_free(p); 1301 if (p->binfmt) 1302 module_put(p->binfmt->module); 1303 bad_fork_cleanup_put_domain: 1304 module_put(task_thread_info(p)->exec_domain->module); 1305 bad_fork_cleanup_count: 1306 put_group_info(p->group_info); 1307 atomic_dec(&p->user->processes); 1308 free_uid(p->user); 1309 bad_fork_free: 1310 free_task(p); 1311 fork_out: 1312 return ERR_PTR(retval); 1313 } 1314 1315 noinline struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs) 1316 { 1317 memset(regs, 0, sizeof(struct pt_regs)); 1318 return regs; 1319 } 1320 1321 struct task_struct * __cpuinit fork_idle(int cpu) 1322 { 1323 struct task_struct *task; 1324 struct pt_regs regs; 1325 1326 task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, NULL, 1327 &init_struct_pid); 1328 if (!IS_ERR(task)) 1329 init_idle(task, cpu); 1330 1331 return task; 1332 } 1333 1334 static inline int fork_traceflag (unsigned clone_flags) 1335 { 1336 if (clone_flags & CLONE_UNTRACED) 1337 return 0; 1338 else if (clone_flags & CLONE_VFORK) { 1339 if (current->ptrace & PT_TRACE_VFORK) 1340 return PTRACE_EVENT_VFORK; 1341 } else if ((clone_flags & CSIGNAL) != SIGCHLD) { 1342 if (current->ptrace & PT_TRACE_CLONE) 1343 return PTRACE_EVENT_CLONE; 1344 } else if (current->ptrace & PT_TRACE_FORK) 1345 return PTRACE_EVENT_FORK; 1346 1347 return 0; 1348 } 1349 1350 /* 1351 * Ok, this is the main fork-routine. 1352 * 1353 * It copies the process, and if successful kick-starts 1354 * it and waits for it to finish using the VM if required. 1355 */ 1356 long do_fork(unsigned long clone_flags, 1357 unsigned long stack_start, 1358 struct pt_regs *regs, 1359 unsigned long stack_size, 1360 int __user *parent_tidptr, 1361 int __user *child_tidptr) 1362 { 1363 struct task_struct *p; 1364 int trace = 0; 1365 struct pid *pid = alloc_pid(); 1366 long nr; 1367 1368 if (!pid) 1369 return -EAGAIN; 1370 nr = pid->nr; 1371 if (unlikely(current->ptrace)) { 1372 trace = fork_traceflag (clone_flags); 1373 if (trace) 1374 clone_flags |= CLONE_PTRACE; 1375 } 1376 1377 p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, pid); 1378 /* 1379 * Do this prior waking up the new thread - the thread pointer 1380 * might get invalid after that point, if the thread exits quickly. 1381 */ 1382 if (!IS_ERR(p)) { 1383 struct completion vfork; 1384 1385 if (clone_flags & CLONE_VFORK) { 1386 p->vfork_done = &vfork; 1387 init_completion(&vfork); 1388 } 1389 1390 if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) { 1391 /* 1392 * We'll start up with an immediate SIGSTOP. 1393 */ 1394 sigaddset(&p->pending.signal, SIGSTOP); 1395 set_tsk_thread_flag(p, TIF_SIGPENDING); 1396 } 1397 1398 if (!(clone_flags & CLONE_STOPPED)) 1399 wake_up_new_task(p, clone_flags); 1400 else 1401 p->state = TASK_STOPPED; 1402 1403 if (unlikely (trace)) { 1404 current->ptrace_message = nr; 1405 ptrace_notify ((trace << 8) | SIGTRAP); 1406 } 1407 1408 if (clone_flags & CLONE_VFORK) { 1409 freezer_do_not_count(); 1410 wait_for_completion(&vfork); 1411 freezer_count(); 1412 if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) { 1413 current->ptrace_message = nr; 1414 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP); 1415 } 1416 } 1417 } else { 1418 free_pid(pid); 1419 nr = PTR_ERR(p); 1420 } 1421 return nr; 1422 } 1423 1424 #ifndef ARCH_MIN_MMSTRUCT_ALIGN 1425 #define ARCH_MIN_MMSTRUCT_ALIGN 0 1426 #endif 1427 1428 static void sighand_ctor(void *data, struct kmem_cache *cachep, 1429 unsigned long flags) 1430 { 1431 struct sighand_struct *sighand = data; 1432 1433 spin_lock_init(&sighand->siglock); 1434 INIT_LIST_HEAD(&sighand->signalfd_list); 1435 } 1436 1437 void __init proc_caches_init(void) 1438 { 1439 sighand_cachep = kmem_cache_create("sighand_cache", 1440 sizeof(struct sighand_struct), 0, 1441 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU, 1442 sighand_ctor, NULL); 1443 signal_cachep = kmem_cache_create("signal_cache", 1444 sizeof(struct signal_struct), 0, 1445 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1446 files_cachep = kmem_cache_create("files_cache", 1447 sizeof(struct files_struct), 0, 1448 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1449 fs_cachep = kmem_cache_create("fs_cache", 1450 sizeof(struct fs_struct), 0, 1451 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1452 vm_area_cachep = kmem_cache_create("vm_area_struct", 1453 sizeof(struct vm_area_struct), 0, 1454 SLAB_PANIC, NULL, NULL); 1455 mm_cachep = kmem_cache_create("mm_struct", 1456 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, 1457 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1458 } 1459 1460 /* 1461 * Check constraints on flags passed to the unshare system call and 1462 * force unsharing of additional process context as appropriate. 1463 */ 1464 static inline void check_unshare_flags(unsigned long *flags_ptr) 1465 { 1466 /* 1467 * If unsharing a thread from a thread group, must also 1468 * unshare vm. 1469 */ 1470 if (*flags_ptr & CLONE_THREAD) 1471 *flags_ptr |= CLONE_VM; 1472 1473 /* 1474 * If unsharing vm, must also unshare signal handlers. 1475 */ 1476 if (*flags_ptr & CLONE_VM) 1477 *flags_ptr |= CLONE_SIGHAND; 1478 1479 /* 1480 * If unsharing signal handlers and the task was created 1481 * using CLONE_THREAD, then must unshare the thread 1482 */ 1483 if ((*flags_ptr & CLONE_SIGHAND) && 1484 (atomic_read(¤t->signal->count) > 1)) 1485 *flags_ptr |= CLONE_THREAD; 1486 1487 /* 1488 * If unsharing namespace, must also unshare filesystem information. 1489 */ 1490 if (*flags_ptr & CLONE_NEWNS) 1491 *flags_ptr |= CLONE_FS; 1492 } 1493 1494 /* 1495 * Unsharing of tasks created with CLONE_THREAD is not supported yet 1496 */ 1497 static int unshare_thread(unsigned long unshare_flags) 1498 { 1499 if (unshare_flags & CLONE_THREAD) 1500 return -EINVAL; 1501 1502 return 0; 1503 } 1504 1505 /* 1506 * Unshare the filesystem structure if it is being shared 1507 */ 1508 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) 1509 { 1510 struct fs_struct *fs = current->fs; 1511 1512 if ((unshare_flags & CLONE_FS) && 1513 (fs && atomic_read(&fs->count) > 1)) { 1514 *new_fsp = __copy_fs_struct(current->fs); 1515 if (!*new_fsp) 1516 return -ENOMEM; 1517 } 1518 1519 return 0; 1520 } 1521 1522 /* 1523 * Unsharing of sighand is not supported yet 1524 */ 1525 static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp) 1526 { 1527 struct sighand_struct *sigh = current->sighand; 1528 1529 if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1) 1530 return -EINVAL; 1531 else 1532 return 0; 1533 } 1534 1535 /* 1536 * Unshare vm if it is being shared 1537 */ 1538 static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp) 1539 { 1540 struct mm_struct *mm = current->mm; 1541 1542 if ((unshare_flags & CLONE_VM) && 1543 (mm && atomic_read(&mm->mm_users) > 1)) { 1544 return -EINVAL; 1545 } 1546 1547 return 0; 1548 } 1549 1550 /* 1551 * Unshare file descriptor table if it is being shared 1552 */ 1553 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) 1554 { 1555 struct files_struct *fd = current->files; 1556 int error = 0; 1557 1558 if ((unshare_flags & CLONE_FILES) && 1559 (fd && atomic_read(&fd->count) > 1)) { 1560 *new_fdp = dup_fd(fd, &error); 1561 if (!*new_fdp) 1562 return error; 1563 } 1564 1565 return 0; 1566 } 1567 1568 /* 1569 * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not 1570 * supported yet 1571 */ 1572 static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp) 1573 { 1574 if (unshare_flags & CLONE_SYSVSEM) 1575 return -EINVAL; 1576 1577 return 0; 1578 } 1579 1580 /* 1581 * unshare allows a process to 'unshare' part of the process 1582 * context which was originally shared using clone. copy_* 1583 * functions used by do_fork() cannot be used here directly 1584 * because they modify an inactive task_struct that is being 1585 * constructed. Here we are modifying the current, active, 1586 * task_struct. 1587 */ 1588 asmlinkage long sys_unshare(unsigned long unshare_flags) 1589 { 1590 int err = 0; 1591 struct fs_struct *fs, *new_fs = NULL; 1592 struct sighand_struct *new_sigh = NULL; 1593 struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL; 1594 struct files_struct *fd, *new_fd = NULL; 1595 struct sem_undo_list *new_ulist = NULL; 1596 struct nsproxy *new_nsproxy = NULL, *old_nsproxy = NULL; 1597 1598 check_unshare_flags(&unshare_flags); 1599 1600 /* Return -EINVAL for all unsupported flags */ 1601 err = -EINVAL; 1602 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| 1603 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| 1604 CLONE_NEWUTS|CLONE_NEWIPC)) 1605 goto bad_unshare_out; 1606 1607 if ((err = unshare_thread(unshare_flags))) 1608 goto bad_unshare_out; 1609 if ((err = unshare_fs(unshare_flags, &new_fs))) 1610 goto bad_unshare_cleanup_thread; 1611 if ((err = unshare_sighand(unshare_flags, &new_sigh))) 1612 goto bad_unshare_cleanup_fs; 1613 if ((err = unshare_vm(unshare_flags, &new_mm))) 1614 goto bad_unshare_cleanup_sigh; 1615 if ((err = unshare_fd(unshare_flags, &new_fd))) 1616 goto bad_unshare_cleanup_vm; 1617 if ((err = unshare_semundo(unshare_flags, &new_ulist))) 1618 goto bad_unshare_cleanup_fd; 1619 if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, 1620 new_fs))) 1621 goto bad_unshare_cleanup_semundo; 1622 1623 if (new_fs || new_mm || new_fd || new_ulist || new_nsproxy) { 1624 1625 task_lock(current); 1626 1627 if (new_nsproxy) { 1628 old_nsproxy = current->nsproxy; 1629 current->nsproxy = new_nsproxy; 1630 new_nsproxy = old_nsproxy; 1631 } 1632 1633 if (new_fs) { 1634 fs = current->fs; 1635 current->fs = new_fs; 1636 new_fs = fs; 1637 } 1638 1639 if (new_mm) { 1640 mm = current->mm; 1641 active_mm = current->active_mm; 1642 current->mm = new_mm; 1643 current->active_mm = new_mm; 1644 activate_mm(active_mm, new_mm); 1645 new_mm = mm; 1646 } 1647 1648 if (new_fd) { 1649 fd = current->files; 1650 current->files = new_fd; 1651 new_fd = fd; 1652 } 1653 1654 task_unlock(current); 1655 } 1656 1657 if (new_nsproxy) 1658 put_nsproxy(new_nsproxy); 1659 1660 bad_unshare_cleanup_semundo: 1661 bad_unshare_cleanup_fd: 1662 if (new_fd) 1663 put_files_struct(new_fd); 1664 1665 bad_unshare_cleanup_vm: 1666 if (new_mm) 1667 mmput(new_mm); 1668 1669 bad_unshare_cleanup_sigh: 1670 if (new_sigh) 1671 if (atomic_dec_and_test(&new_sigh->count)) 1672 kmem_cache_free(sighand_cachep, new_sigh); 1673 1674 bad_unshare_cleanup_fs: 1675 if (new_fs) 1676 put_fs_struct(new_fs); 1677 1678 bad_unshare_cleanup_thread: 1679 bad_unshare_out: 1680 return err; 1681 } 1682