1 /* 2 * linux/kernel/fork.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 /* 8 * 'fork.c' contains the help-routines for the 'fork' system call 9 * (see also entry.S and others). 10 * Fork is rather simple, once you get the hang of it, but the memory 11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' 12 */ 13 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/unistd.h> 17 #include <linux/smp_lock.h> 18 #include <linux/module.h> 19 #include <linux/vmalloc.h> 20 #include <linux/completion.h> 21 #include <linux/mnt_namespace.h> 22 #include <linux/personality.h> 23 #include <linux/mempolicy.h> 24 #include <linux/sem.h> 25 #include <linux/file.h> 26 #include <linux/key.h> 27 #include <linux/binfmts.h> 28 #include <linux/mman.h> 29 #include <linux/fs.h> 30 #include <linux/nsproxy.h> 31 #include <linux/capability.h> 32 #include <linux/cpu.h> 33 #include <linux/cpuset.h> 34 #include <linux/security.h> 35 #include <linux/swap.h> 36 #include <linux/syscalls.h> 37 #include <linux/jiffies.h> 38 #include <linux/futex.h> 39 #include <linux/task_io_accounting_ops.h> 40 #include <linux/rcupdate.h> 41 #include <linux/ptrace.h> 42 #include <linux/mount.h> 43 #include <linux/audit.h> 44 #include <linux/profile.h> 45 #include <linux/rmap.h> 46 #include <linux/acct.h> 47 #include <linux/tsacct_kern.h> 48 #include <linux/cn_proc.h> 49 #include <linux/delayacct.h> 50 #include <linux/taskstats_kern.h> 51 #include <linux/random.h> 52 53 #include <asm/pgtable.h> 54 #include <asm/pgalloc.h> 55 #include <asm/uaccess.h> 56 #include <asm/mmu_context.h> 57 #include <asm/cacheflush.h> 58 #include <asm/tlbflush.h> 59 60 /* 61 * Protected counters by write_lock_irq(&tasklist_lock) 62 */ 63 unsigned long total_forks; /* Handle normal Linux uptimes. */ 64 int nr_threads; /* The idle threads do not count.. */ 65 66 int max_threads; /* tunable limit on nr_threads */ 67 68 DEFINE_PER_CPU(unsigned long, process_counts) = 0; 69 70 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 71 72 int nr_processes(void) 73 { 74 int cpu; 75 int total = 0; 76 77 for_each_online_cpu(cpu) 78 total += per_cpu(process_counts, cpu); 79 80 return total; 81 } 82 83 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 84 # define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL) 85 # define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk)) 86 static struct kmem_cache *task_struct_cachep; 87 #endif 88 89 /* SLAB cache for signal_struct structures (tsk->signal) */ 90 static struct kmem_cache *signal_cachep; 91 92 /* SLAB cache for sighand_struct structures (tsk->sighand) */ 93 struct kmem_cache *sighand_cachep; 94 95 /* SLAB cache for files_struct structures (tsk->files) */ 96 struct kmem_cache *files_cachep; 97 98 /* SLAB cache for fs_struct structures (tsk->fs) */ 99 struct kmem_cache *fs_cachep; 100 101 /* SLAB cache for vm_area_struct structures */ 102 struct kmem_cache *vm_area_cachep; 103 104 /* SLAB cache for mm_struct structures (tsk->mm) */ 105 static struct kmem_cache *mm_cachep; 106 107 void free_task(struct task_struct *tsk) 108 { 109 free_thread_info(tsk->thread_info); 110 rt_mutex_debug_task_free(tsk); 111 free_task_struct(tsk); 112 } 113 EXPORT_SYMBOL(free_task); 114 115 void __put_task_struct(struct task_struct *tsk) 116 { 117 WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE))); 118 WARN_ON(atomic_read(&tsk->usage)); 119 WARN_ON(tsk == current); 120 121 security_task_free(tsk); 122 free_uid(tsk->user); 123 put_group_info(tsk->group_info); 124 delayacct_tsk_free(tsk); 125 126 if (!profile_handoff_task(tsk)) 127 free_task(tsk); 128 } 129 130 void __init fork_init(unsigned long mempages) 131 { 132 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 133 #ifndef ARCH_MIN_TASKALIGN 134 #define ARCH_MIN_TASKALIGN L1_CACHE_BYTES 135 #endif 136 /* create a slab on which task_structs can be allocated */ 137 task_struct_cachep = 138 kmem_cache_create("task_struct", sizeof(struct task_struct), 139 ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL, NULL); 140 #endif 141 142 /* 143 * The default maximum number of threads is set to a safe 144 * value: the thread structures can take up at most half 145 * of memory. 146 */ 147 max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE); 148 149 /* 150 * we need to allow at least 20 threads to boot a system 151 */ 152 if(max_threads < 20) 153 max_threads = 20; 154 155 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; 156 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; 157 init_task.signal->rlim[RLIMIT_SIGPENDING] = 158 init_task.signal->rlim[RLIMIT_NPROC]; 159 } 160 161 static struct task_struct *dup_task_struct(struct task_struct *orig) 162 { 163 struct task_struct *tsk; 164 struct thread_info *ti; 165 166 prepare_to_copy(orig); 167 168 tsk = alloc_task_struct(); 169 if (!tsk) 170 return NULL; 171 172 ti = alloc_thread_info(tsk); 173 if (!ti) { 174 free_task_struct(tsk); 175 return NULL; 176 } 177 178 *tsk = *orig; 179 tsk->thread_info = ti; 180 setup_thread_stack(tsk, orig); 181 182 #ifdef CONFIG_CC_STACKPROTECTOR 183 tsk->stack_canary = get_random_int(); 184 #endif 185 186 /* One for us, one for whoever does the "release_task()" (usually parent) */ 187 atomic_set(&tsk->usage,2); 188 atomic_set(&tsk->fs_excl, 0); 189 #ifdef CONFIG_BLK_DEV_IO_TRACE 190 tsk->btrace_seq = 0; 191 #endif 192 tsk->splice_pipe = NULL; 193 return tsk; 194 } 195 196 #ifdef CONFIG_MMU 197 static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 198 { 199 struct vm_area_struct *mpnt, *tmp, **pprev; 200 struct rb_node **rb_link, *rb_parent; 201 int retval; 202 unsigned long charge; 203 struct mempolicy *pol; 204 205 down_write(&oldmm->mmap_sem); 206 flush_cache_dup_mm(oldmm); 207 /* 208 * Not linked in yet - no deadlock potential: 209 */ 210 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); 211 212 mm->locked_vm = 0; 213 mm->mmap = NULL; 214 mm->mmap_cache = NULL; 215 mm->free_area_cache = oldmm->mmap_base; 216 mm->cached_hole_size = ~0UL; 217 mm->map_count = 0; 218 cpus_clear(mm->cpu_vm_mask); 219 mm->mm_rb = RB_ROOT; 220 rb_link = &mm->mm_rb.rb_node; 221 rb_parent = NULL; 222 pprev = &mm->mmap; 223 224 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { 225 struct file *file; 226 227 if (mpnt->vm_flags & VM_DONTCOPY) { 228 long pages = vma_pages(mpnt); 229 mm->total_vm -= pages; 230 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, 231 -pages); 232 continue; 233 } 234 charge = 0; 235 if (mpnt->vm_flags & VM_ACCOUNT) { 236 unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; 237 if (security_vm_enough_memory(len)) 238 goto fail_nomem; 239 charge = len; 240 } 241 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 242 if (!tmp) 243 goto fail_nomem; 244 *tmp = *mpnt; 245 pol = mpol_copy(vma_policy(mpnt)); 246 retval = PTR_ERR(pol); 247 if (IS_ERR(pol)) 248 goto fail_nomem_policy; 249 vma_set_policy(tmp, pol); 250 tmp->vm_flags &= ~VM_LOCKED; 251 tmp->vm_mm = mm; 252 tmp->vm_next = NULL; 253 anon_vma_link(tmp); 254 file = tmp->vm_file; 255 if (file) { 256 struct inode *inode = file->f_path.dentry->d_inode; 257 get_file(file); 258 if (tmp->vm_flags & VM_DENYWRITE) 259 atomic_dec(&inode->i_writecount); 260 261 /* insert tmp into the share list, just after mpnt */ 262 spin_lock(&file->f_mapping->i_mmap_lock); 263 tmp->vm_truncate_count = mpnt->vm_truncate_count; 264 flush_dcache_mmap_lock(file->f_mapping); 265 vma_prio_tree_add(tmp, mpnt); 266 flush_dcache_mmap_unlock(file->f_mapping); 267 spin_unlock(&file->f_mapping->i_mmap_lock); 268 } 269 270 /* 271 * Link in the new vma and copy the page table entries. 272 */ 273 *pprev = tmp; 274 pprev = &tmp->vm_next; 275 276 __vma_link_rb(mm, tmp, rb_link, rb_parent); 277 rb_link = &tmp->vm_rb.rb_right; 278 rb_parent = &tmp->vm_rb; 279 280 mm->map_count++; 281 retval = copy_page_range(mm, oldmm, mpnt); 282 283 if (tmp->vm_ops && tmp->vm_ops->open) 284 tmp->vm_ops->open(tmp); 285 286 if (retval) 287 goto out; 288 } 289 retval = 0; 290 out: 291 up_write(&mm->mmap_sem); 292 flush_tlb_mm(oldmm); 293 up_write(&oldmm->mmap_sem); 294 return retval; 295 fail_nomem_policy: 296 kmem_cache_free(vm_area_cachep, tmp); 297 fail_nomem: 298 retval = -ENOMEM; 299 vm_unacct_memory(charge); 300 goto out; 301 } 302 303 static inline int mm_alloc_pgd(struct mm_struct * mm) 304 { 305 mm->pgd = pgd_alloc(mm); 306 if (unlikely(!mm->pgd)) 307 return -ENOMEM; 308 return 0; 309 } 310 311 static inline void mm_free_pgd(struct mm_struct * mm) 312 { 313 pgd_free(mm->pgd); 314 } 315 #else 316 #define dup_mmap(mm, oldmm) (0) 317 #define mm_alloc_pgd(mm) (0) 318 #define mm_free_pgd(mm) 319 #endif /* CONFIG_MMU */ 320 321 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); 322 323 #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) 324 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) 325 326 #include <linux/init_task.h> 327 328 static struct mm_struct * mm_init(struct mm_struct * mm) 329 { 330 atomic_set(&mm->mm_users, 1); 331 atomic_set(&mm->mm_count, 1); 332 init_rwsem(&mm->mmap_sem); 333 INIT_LIST_HEAD(&mm->mmlist); 334 mm->core_waiters = 0; 335 mm->nr_ptes = 0; 336 set_mm_counter(mm, file_rss, 0); 337 set_mm_counter(mm, anon_rss, 0); 338 spin_lock_init(&mm->page_table_lock); 339 rwlock_init(&mm->ioctx_list_lock); 340 mm->ioctx_list = NULL; 341 mm->free_area_cache = TASK_UNMAPPED_BASE; 342 mm->cached_hole_size = ~0UL; 343 344 if (likely(!mm_alloc_pgd(mm))) { 345 mm->def_flags = 0; 346 return mm; 347 } 348 free_mm(mm); 349 return NULL; 350 } 351 352 /* 353 * Allocate and initialize an mm_struct. 354 */ 355 struct mm_struct * mm_alloc(void) 356 { 357 struct mm_struct * mm; 358 359 mm = allocate_mm(); 360 if (mm) { 361 memset(mm, 0, sizeof(*mm)); 362 mm = mm_init(mm); 363 } 364 return mm; 365 } 366 367 /* 368 * Called when the last reference to the mm 369 * is dropped: either by a lazy thread or by 370 * mmput. Free the page directory and the mm. 371 */ 372 void fastcall __mmdrop(struct mm_struct *mm) 373 { 374 BUG_ON(mm == &init_mm); 375 mm_free_pgd(mm); 376 destroy_context(mm); 377 free_mm(mm); 378 } 379 380 /* 381 * Decrement the use count and release all resources for an mm. 382 */ 383 void mmput(struct mm_struct *mm) 384 { 385 might_sleep(); 386 387 if (atomic_dec_and_test(&mm->mm_users)) { 388 exit_aio(mm); 389 exit_mmap(mm); 390 if (!list_empty(&mm->mmlist)) { 391 spin_lock(&mmlist_lock); 392 list_del(&mm->mmlist); 393 spin_unlock(&mmlist_lock); 394 } 395 put_swap_token(mm); 396 mmdrop(mm); 397 } 398 } 399 EXPORT_SYMBOL_GPL(mmput); 400 401 /** 402 * get_task_mm - acquire a reference to the task's mm 403 * 404 * Returns %NULL if the task has no mm. Checks PF_BORROWED_MM (meaning 405 * this kernel workthread has transiently adopted a user mm with use_mm, 406 * to do its AIO) is not set and if so returns a reference to it, after 407 * bumping up the use count. User must release the mm via mmput() 408 * after use. Typically used by /proc and ptrace. 409 */ 410 struct mm_struct *get_task_mm(struct task_struct *task) 411 { 412 struct mm_struct *mm; 413 414 task_lock(task); 415 mm = task->mm; 416 if (mm) { 417 if (task->flags & PF_BORROWED_MM) 418 mm = NULL; 419 else 420 atomic_inc(&mm->mm_users); 421 } 422 task_unlock(task); 423 return mm; 424 } 425 EXPORT_SYMBOL_GPL(get_task_mm); 426 427 /* Please note the differences between mmput and mm_release. 428 * mmput is called whenever we stop holding onto a mm_struct, 429 * error success whatever. 430 * 431 * mm_release is called after a mm_struct has been removed 432 * from the current process. 433 * 434 * This difference is important for error handling, when we 435 * only half set up a mm_struct for a new process and need to restore 436 * the old one. Because we mmput the new mm_struct before 437 * restoring the old one. . . 438 * Eric Biederman 10 January 1998 439 */ 440 void mm_release(struct task_struct *tsk, struct mm_struct *mm) 441 { 442 struct completion *vfork_done = tsk->vfork_done; 443 444 /* Get rid of any cached register state */ 445 deactivate_mm(tsk, mm); 446 447 /* notify parent sleeping on vfork() */ 448 if (vfork_done) { 449 tsk->vfork_done = NULL; 450 complete(vfork_done); 451 } 452 453 /* 454 * If we're exiting normally, clear a user-space tid field if 455 * requested. We leave this alone when dying by signal, to leave 456 * the value intact in a core dump, and to save the unnecessary 457 * trouble otherwise. Userland only wants this done for a sys_exit. 458 */ 459 if (tsk->clear_child_tid 460 && !(tsk->flags & PF_SIGNALED) 461 && atomic_read(&mm->mm_users) > 1) { 462 u32 __user * tidptr = tsk->clear_child_tid; 463 tsk->clear_child_tid = NULL; 464 465 /* 466 * We don't check the error code - if userspace has 467 * not set up a proper pointer then tough luck. 468 */ 469 put_user(0, tidptr); 470 sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0); 471 } 472 } 473 474 /* 475 * Allocate a new mm structure and copy contents from the 476 * mm structure of the passed in task structure. 477 */ 478 static struct mm_struct *dup_mm(struct task_struct *tsk) 479 { 480 struct mm_struct *mm, *oldmm = current->mm; 481 int err; 482 483 if (!oldmm) 484 return NULL; 485 486 mm = allocate_mm(); 487 if (!mm) 488 goto fail_nomem; 489 490 memcpy(mm, oldmm, sizeof(*mm)); 491 492 /* Initializing for Swap token stuff */ 493 mm->token_priority = 0; 494 mm->last_interval = 0; 495 496 if (!mm_init(mm)) 497 goto fail_nomem; 498 499 if (init_new_context(tsk, mm)) 500 goto fail_nocontext; 501 502 err = dup_mmap(mm, oldmm); 503 if (err) 504 goto free_pt; 505 506 mm->hiwater_rss = get_mm_rss(mm); 507 mm->hiwater_vm = mm->total_vm; 508 509 return mm; 510 511 free_pt: 512 mmput(mm); 513 514 fail_nomem: 515 return NULL; 516 517 fail_nocontext: 518 /* 519 * If init_new_context() failed, we cannot use mmput() to free the mm 520 * because it calls destroy_context() 521 */ 522 mm_free_pgd(mm); 523 free_mm(mm); 524 return NULL; 525 } 526 527 static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) 528 { 529 struct mm_struct * mm, *oldmm; 530 int retval; 531 532 tsk->min_flt = tsk->maj_flt = 0; 533 tsk->nvcsw = tsk->nivcsw = 0; 534 535 tsk->mm = NULL; 536 tsk->active_mm = NULL; 537 538 /* 539 * Are we cloning a kernel thread? 540 * 541 * We need to steal a active VM for that.. 542 */ 543 oldmm = current->mm; 544 if (!oldmm) 545 return 0; 546 547 if (clone_flags & CLONE_VM) { 548 atomic_inc(&oldmm->mm_users); 549 mm = oldmm; 550 goto good_mm; 551 } 552 553 retval = -ENOMEM; 554 mm = dup_mm(tsk); 555 if (!mm) 556 goto fail_nomem; 557 558 good_mm: 559 /* Initializing for Swap token stuff */ 560 mm->token_priority = 0; 561 mm->last_interval = 0; 562 563 tsk->mm = mm; 564 tsk->active_mm = mm; 565 return 0; 566 567 fail_nomem: 568 return retval; 569 } 570 571 static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old) 572 { 573 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL); 574 /* We don't need to lock fs - think why ;-) */ 575 if (fs) { 576 atomic_set(&fs->count, 1); 577 rwlock_init(&fs->lock); 578 fs->umask = old->umask; 579 read_lock(&old->lock); 580 fs->rootmnt = mntget(old->rootmnt); 581 fs->root = dget(old->root); 582 fs->pwdmnt = mntget(old->pwdmnt); 583 fs->pwd = dget(old->pwd); 584 if (old->altroot) { 585 fs->altrootmnt = mntget(old->altrootmnt); 586 fs->altroot = dget(old->altroot); 587 } else { 588 fs->altrootmnt = NULL; 589 fs->altroot = NULL; 590 } 591 read_unlock(&old->lock); 592 } 593 return fs; 594 } 595 596 struct fs_struct *copy_fs_struct(struct fs_struct *old) 597 { 598 return __copy_fs_struct(old); 599 } 600 601 EXPORT_SYMBOL_GPL(copy_fs_struct); 602 603 static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk) 604 { 605 if (clone_flags & CLONE_FS) { 606 atomic_inc(¤t->fs->count); 607 return 0; 608 } 609 tsk->fs = __copy_fs_struct(current->fs); 610 if (!tsk->fs) 611 return -ENOMEM; 612 return 0; 613 } 614 615 static int count_open_files(struct fdtable *fdt) 616 { 617 int size = fdt->max_fds; 618 int i; 619 620 /* Find the last open fd */ 621 for (i = size/(8*sizeof(long)); i > 0; ) { 622 if (fdt->open_fds->fds_bits[--i]) 623 break; 624 } 625 i = (i+1) * 8 * sizeof(long); 626 return i; 627 } 628 629 static struct files_struct *alloc_files(void) 630 { 631 struct files_struct *newf; 632 struct fdtable *fdt; 633 634 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); 635 if (!newf) 636 goto out; 637 638 atomic_set(&newf->count, 1); 639 640 spin_lock_init(&newf->file_lock); 641 newf->next_fd = 0; 642 fdt = &newf->fdtab; 643 fdt->max_fds = NR_OPEN_DEFAULT; 644 fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init; 645 fdt->open_fds = (fd_set *)&newf->open_fds_init; 646 fdt->fd = &newf->fd_array[0]; 647 INIT_RCU_HEAD(&fdt->rcu); 648 fdt->next = NULL; 649 rcu_assign_pointer(newf->fdt, fdt); 650 out: 651 return newf; 652 } 653 654 /* 655 * Allocate a new files structure and copy contents from the 656 * passed in files structure. 657 * errorp will be valid only when the returned files_struct is NULL. 658 */ 659 static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) 660 { 661 struct files_struct *newf; 662 struct file **old_fds, **new_fds; 663 int open_files, size, i; 664 struct fdtable *old_fdt, *new_fdt; 665 666 *errorp = -ENOMEM; 667 newf = alloc_files(); 668 if (!newf) 669 goto out; 670 671 spin_lock(&oldf->file_lock); 672 old_fdt = files_fdtable(oldf); 673 new_fdt = files_fdtable(newf); 674 open_files = count_open_files(old_fdt); 675 676 /* 677 * Check whether we need to allocate a larger fd array and fd set. 678 * Note: we're not a clone task, so the open count won't change. 679 */ 680 if (open_files > new_fdt->max_fds) { 681 new_fdt->max_fds = 0; 682 spin_unlock(&oldf->file_lock); 683 spin_lock(&newf->file_lock); 684 *errorp = expand_files(newf, open_files-1); 685 spin_unlock(&newf->file_lock); 686 if (*errorp < 0) 687 goto out_release; 688 new_fdt = files_fdtable(newf); 689 /* 690 * Reacquire the oldf lock and a pointer to its fd table 691 * who knows it may have a new bigger fd table. We need 692 * the latest pointer. 693 */ 694 spin_lock(&oldf->file_lock); 695 old_fdt = files_fdtable(oldf); 696 } 697 698 old_fds = old_fdt->fd; 699 new_fds = new_fdt->fd; 700 701 memcpy(new_fdt->open_fds->fds_bits, 702 old_fdt->open_fds->fds_bits, open_files/8); 703 memcpy(new_fdt->close_on_exec->fds_bits, 704 old_fdt->close_on_exec->fds_bits, open_files/8); 705 706 for (i = open_files; i != 0; i--) { 707 struct file *f = *old_fds++; 708 if (f) { 709 get_file(f); 710 } else { 711 /* 712 * The fd may be claimed in the fd bitmap but not yet 713 * instantiated in the files array if a sibling thread 714 * is partway through open(). So make sure that this 715 * fd is available to the new process. 716 */ 717 FD_CLR(open_files - i, new_fdt->open_fds); 718 } 719 rcu_assign_pointer(*new_fds++, f); 720 } 721 spin_unlock(&oldf->file_lock); 722 723 /* compute the remainder to be cleared */ 724 size = (new_fdt->max_fds - open_files) * sizeof(struct file *); 725 726 /* This is long word aligned thus could use a optimized version */ 727 memset(new_fds, 0, size); 728 729 if (new_fdt->max_fds > open_files) { 730 int left = (new_fdt->max_fds-open_files)/8; 731 int start = open_files / (8 * sizeof(unsigned long)); 732 733 memset(&new_fdt->open_fds->fds_bits[start], 0, left); 734 memset(&new_fdt->close_on_exec->fds_bits[start], 0, left); 735 } 736 737 return newf; 738 739 out_release: 740 kmem_cache_free(files_cachep, newf); 741 out: 742 return NULL; 743 } 744 745 static int copy_files(unsigned long clone_flags, struct task_struct * tsk) 746 { 747 struct files_struct *oldf, *newf; 748 int error = 0; 749 750 /* 751 * A background process may not have any files ... 752 */ 753 oldf = current->files; 754 if (!oldf) 755 goto out; 756 757 if (clone_flags & CLONE_FILES) { 758 atomic_inc(&oldf->count); 759 goto out; 760 } 761 762 /* 763 * Note: we may be using current for both targets (See exec.c) 764 * This works because we cache current->files (old) as oldf. Don't 765 * break this. 766 */ 767 tsk->files = NULL; 768 newf = dup_fd(oldf, &error); 769 if (!newf) 770 goto out; 771 772 tsk->files = newf; 773 error = 0; 774 out: 775 return error; 776 } 777 778 /* 779 * Helper to unshare the files of the current task. 780 * We don't want to expose copy_files internals to 781 * the exec layer of the kernel. 782 */ 783 784 int unshare_files(void) 785 { 786 struct files_struct *files = current->files; 787 int rc; 788 789 BUG_ON(!files); 790 791 /* This can race but the race causes us to copy when we don't 792 need to and drop the copy */ 793 if(atomic_read(&files->count) == 1) 794 { 795 atomic_inc(&files->count); 796 return 0; 797 } 798 rc = copy_files(0, current); 799 if(rc) 800 current->files = files; 801 return rc; 802 } 803 804 EXPORT_SYMBOL(unshare_files); 805 806 static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk) 807 { 808 struct sighand_struct *sig; 809 810 if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) { 811 atomic_inc(¤t->sighand->count); 812 return 0; 813 } 814 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); 815 rcu_assign_pointer(tsk->sighand, sig); 816 if (!sig) 817 return -ENOMEM; 818 atomic_set(&sig->count, 1); 819 memcpy(sig->action, current->sighand->action, sizeof(sig->action)); 820 return 0; 821 } 822 823 void __cleanup_sighand(struct sighand_struct *sighand) 824 { 825 if (atomic_dec_and_test(&sighand->count)) 826 kmem_cache_free(sighand_cachep, sighand); 827 } 828 829 static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk) 830 { 831 struct signal_struct *sig; 832 int ret; 833 834 if (clone_flags & CLONE_THREAD) { 835 atomic_inc(¤t->signal->count); 836 atomic_inc(¤t->signal->live); 837 return 0; 838 } 839 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); 840 tsk->signal = sig; 841 if (!sig) 842 return -ENOMEM; 843 844 ret = copy_thread_group_keys(tsk); 845 if (ret < 0) { 846 kmem_cache_free(signal_cachep, sig); 847 return ret; 848 } 849 850 atomic_set(&sig->count, 1); 851 atomic_set(&sig->live, 1); 852 init_waitqueue_head(&sig->wait_chldexit); 853 sig->flags = 0; 854 sig->group_exit_code = 0; 855 sig->group_exit_task = NULL; 856 sig->group_stop_count = 0; 857 sig->curr_target = NULL; 858 init_sigpending(&sig->shared_pending); 859 INIT_LIST_HEAD(&sig->posix_timers); 860 861 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_REL); 862 sig->it_real_incr.tv64 = 0; 863 sig->real_timer.function = it_real_fn; 864 sig->tsk = tsk; 865 866 sig->it_virt_expires = cputime_zero; 867 sig->it_virt_incr = cputime_zero; 868 sig->it_prof_expires = cputime_zero; 869 sig->it_prof_incr = cputime_zero; 870 871 sig->leader = 0; /* session leadership doesn't inherit */ 872 sig->tty_old_pgrp = NULL; 873 874 sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; 875 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; 876 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; 877 sig->sched_time = 0; 878 INIT_LIST_HEAD(&sig->cpu_timers[0]); 879 INIT_LIST_HEAD(&sig->cpu_timers[1]); 880 INIT_LIST_HEAD(&sig->cpu_timers[2]); 881 taskstats_tgid_init(sig); 882 883 task_lock(current->group_leader); 884 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 885 task_unlock(current->group_leader); 886 887 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { 888 /* 889 * New sole thread in the process gets an expiry time 890 * of the whole CPU time limit. 891 */ 892 tsk->it_prof_expires = 893 secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); 894 } 895 acct_init_pacct(&sig->pacct); 896 897 return 0; 898 } 899 900 void __cleanup_signal(struct signal_struct *sig) 901 { 902 exit_thread_group_keys(sig); 903 kmem_cache_free(signal_cachep, sig); 904 } 905 906 static inline void cleanup_signal(struct task_struct *tsk) 907 { 908 struct signal_struct *sig = tsk->signal; 909 910 atomic_dec(&sig->live); 911 912 if (atomic_dec_and_test(&sig->count)) 913 __cleanup_signal(sig); 914 } 915 916 static inline void copy_flags(unsigned long clone_flags, struct task_struct *p) 917 { 918 unsigned long new_flags = p->flags; 919 920 new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE); 921 new_flags |= PF_FORKNOEXEC; 922 if (!(clone_flags & CLONE_PTRACE)) 923 p->ptrace = 0; 924 p->flags = new_flags; 925 } 926 927 asmlinkage long sys_set_tid_address(int __user *tidptr) 928 { 929 current->clear_child_tid = tidptr; 930 931 return current->pid; 932 } 933 934 static inline void rt_mutex_init_task(struct task_struct *p) 935 { 936 #ifdef CONFIG_RT_MUTEXES 937 spin_lock_init(&p->pi_lock); 938 plist_head_init(&p->pi_waiters, &p->pi_lock); 939 p->pi_blocked_on = NULL; 940 #endif 941 } 942 943 /* 944 * This creates a new process as a copy of the old one, 945 * but does not actually start it yet. 946 * 947 * It copies the registers, and all the appropriate 948 * parts of the process environment (as per the clone 949 * flags). The actual kick-off is left to the caller. 950 */ 951 static struct task_struct *copy_process(unsigned long clone_flags, 952 unsigned long stack_start, 953 struct pt_regs *regs, 954 unsigned long stack_size, 955 int __user *parent_tidptr, 956 int __user *child_tidptr, 957 int pid) 958 { 959 int retval; 960 struct task_struct *p = NULL; 961 962 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) 963 return ERR_PTR(-EINVAL); 964 965 /* 966 * Thread groups must share signals as well, and detached threads 967 * can only be started up within the thread group. 968 */ 969 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) 970 return ERR_PTR(-EINVAL); 971 972 /* 973 * Shared signal handlers imply shared VM. By way of the above, 974 * thread groups also imply shared VM. Blocking this case allows 975 * for various simplifications in other code. 976 */ 977 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) 978 return ERR_PTR(-EINVAL); 979 980 retval = security_task_create(clone_flags); 981 if (retval) 982 goto fork_out; 983 984 retval = -ENOMEM; 985 p = dup_task_struct(current); 986 if (!p) 987 goto fork_out; 988 989 rt_mutex_init_task(p); 990 991 #ifdef CONFIG_TRACE_IRQFLAGS 992 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); 993 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 994 #endif 995 retval = -EAGAIN; 996 if (atomic_read(&p->user->processes) >= 997 p->signal->rlim[RLIMIT_NPROC].rlim_cur) { 998 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && 999 p->user != &root_user) 1000 goto bad_fork_free; 1001 } 1002 1003 atomic_inc(&p->user->__count); 1004 atomic_inc(&p->user->processes); 1005 get_group_info(p->group_info); 1006 1007 /* 1008 * If multiple threads are within copy_process(), then this check 1009 * triggers too late. This doesn't hurt, the check is only there 1010 * to stop root fork bombs. 1011 */ 1012 if (nr_threads >= max_threads) 1013 goto bad_fork_cleanup_count; 1014 1015 if (!try_module_get(task_thread_info(p)->exec_domain->module)) 1016 goto bad_fork_cleanup_count; 1017 1018 if (p->binfmt && !try_module_get(p->binfmt->module)) 1019 goto bad_fork_cleanup_put_domain; 1020 1021 p->did_exec = 0; 1022 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ 1023 copy_flags(clone_flags, p); 1024 p->pid = pid; 1025 retval = -EFAULT; 1026 if (clone_flags & CLONE_PARENT_SETTID) 1027 if (put_user(p->pid, parent_tidptr)) 1028 goto bad_fork_cleanup_delays_binfmt; 1029 1030 INIT_LIST_HEAD(&p->children); 1031 INIT_LIST_HEAD(&p->sibling); 1032 p->vfork_done = NULL; 1033 spin_lock_init(&p->alloc_lock); 1034 1035 clear_tsk_thread_flag(p, TIF_SIGPENDING); 1036 init_sigpending(&p->pending); 1037 1038 p->utime = cputime_zero; 1039 p->stime = cputime_zero; 1040 p->sched_time = 0; 1041 #ifdef CONFIG_TASK_XACCT 1042 p->rchar = 0; /* I/O counter: bytes read */ 1043 p->wchar = 0; /* I/O counter: bytes written */ 1044 p->syscr = 0; /* I/O counter: read syscalls */ 1045 p->syscw = 0; /* I/O counter: write syscalls */ 1046 #endif 1047 task_io_accounting_init(p); 1048 acct_clear_integrals(p); 1049 1050 p->it_virt_expires = cputime_zero; 1051 p->it_prof_expires = cputime_zero; 1052 p->it_sched_expires = 0; 1053 INIT_LIST_HEAD(&p->cpu_timers[0]); 1054 INIT_LIST_HEAD(&p->cpu_timers[1]); 1055 INIT_LIST_HEAD(&p->cpu_timers[2]); 1056 1057 p->lock_depth = -1; /* -1 = no lock */ 1058 do_posix_clock_monotonic_gettime(&p->start_time); 1059 p->security = NULL; 1060 p->io_context = NULL; 1061 p->io_wait = NULL; 1062 p->audit_context = NULL; 1063 cpuset_fork(p); 1064 #ifdef CONFIG_NUMA 1065 p->mempolicy = mpol_copy(p->mempolicy); 1066 if (IS_ERR(p->mempolicy)) { 1067 retval = PTR_ERR(p->mempolicy); 1068 p->mempolicy = NULL; 1069 goto bad_fork_cleanup_cpuset; 1070 } 1071 mpol_fix_fork_child_flag(p); 1072 #endif 1073 #ifdef CONFIG_TRACE_IRQFLAGS 1074 p->irq_events = 0; 1075 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 1076 p->hardirqs_enabled = 1; 1077 #else 1078 p->hardirqs_enabled = 0; 1079 #endif 1080 p->hardirq_enable_ip = 0; 1081 p->hardirq_enable_event = 0; 1082 p->hardirq_disable_ip = _THIS_IP_; 1083 p->hardirq_disable_event = 0; 1084 p->softirqs_enabled = 1; 1085 p->softirq_enable_ip = _THIS_IP_; 1086 p->softirq_enable_event = 0; 1087 p->softirq_disable_ip = 0; 1088 p->softirq_disable_event = 0; 1089 p->hardirq_context = 0; 1090 p->softirq_context = 0; 1091 #endif 1092 #ifdef CONFIG_LOCKDEP 1093 p->lockdep_depth = 0; /* no locks held yet */ 1094 p->curr_chain_key = 0; 1095 p->lockdep_recursion = 0; 1096 #endif 1097 1098 #ifdef CONFIG_DEBUG_MUTEXES 1099 p->blocked_on = NULL; /* not blocked yet */ 1100 #endif 1101 1102 p->tgid = p->pid; 1103 if (clone_flags & CLONE_THREAD) 1104 p->tgid = current->tgid; 1105 1106 if ((retval = security_task_alloc(p))) 1107 goto bad_fork_cleanup_policy; 1108 if ((retval = audit_alloc(p))) 1109 goto bad_fork_cleanup_security; 1110 /* copy all the process information */ 1111 if ((retval = copy_semundo(clone_flags, p))) 1112 goto bad_fork_cleanup_audit; 1113 if ((retval = copy_files(clone_flags, p))) 1114 goto bad_fork_cleanup_semundo; 1115 if ((retval = copy_fs(clone_flags, p))) 1116 goto bad_fork_cleanup_files; 1117 if ((retval = copy_sighand(clone_flags, p))) 1118 goto bad_fork_cleanup_fs; 1119 if ((retval = copy_signal(clone_flags, p))) 1120 goto bad_fork_cleanup_sighand; 1121 if ((retval = copy_mm(clone_flags, p))) 1122 goto bad_fork_cleanup_signal; 1123 if ((retval = copy_keys(clone_flags, p))) 1124 goto bad_fork_cleanup_mm; 1125 if ((retval = copy_namespaces(clone_flags, p))) 1126 goto bad_fork_cleanup_keys; 1127 retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); 1128 if (retval) 1129 goto bad_fork_cleanup_namespaces; 1130 1131 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; 1132 /* 1133 * Clear TID on mm_release()? 1134 */ 1135 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; 1136 p->robust_list = NULL; 1137 #ifdef CONFIG_COMPAT 1138 p->compat_robust_list = NULL; 1139 #endif 1140 INIT_LIST_HEAD(&p->pi_state_list); 1141 p->pi_state_cache = NULL; 1142 1143 /* 1144 * sigaltstack should be cleared when sharing the same VM 1145 */ 1146 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) 1147 p->sas_ss_sp = p->sas_ss_size = 0; 1148 1149 /* 1150 * Syscall tracing should be turned off in the child regardless 1151 * of CLONE_PTRACE. 1152 */ 1153 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); 1154 #ifdef TIF_SYSCALL_EMU 1155 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); 1156 #endif 1157 1158 /* Our parent execution domain becomes current domain 1159 These must match for thread signalling to apply */ 1160 p->parent_exec_id = p->self_exec_id; 1161 1162 /* ok, now we should be set up.. */ 1163 p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); 1164 p->pdeath_signal = 0; 1165 p->exit_state = 0; 1166 1167 /* 1168 * Ok, make it visible to the rest of the system. 1169 * We dont wake it up yet. 1170 */ 1171 p->group_leader = p; 1172 INIT_LIST_HEAD(&p->thread_group); 1173 INIT_LIST_HEAD(&p->ptrace_children); 1174 INIT_LIST_HEAD(&p->ptrace_list); 1175 1176 /* Perform scheduler related setup. Assign this task to a CPU. */ 1177 sched_fork(p, clone_flags); 1178 1179 /* Need tasklist lock for parent etc handling! */ 1180 write_lock_irq(&tasklist_lock); 1181 1182 /* for sys_ioprio_set(IOPRIO_WHO_PGRP) */ 1183 p->ioprio = current->ioprio; 1184 1185 /* 1186 * The task hasn't been attached yet, so its cpus_allowed mask will 1187 * not be changed, nor will its assigned CPU. 1188 * 1189 * The cpus_allowed mask of the parent may have changed after it was 1190 * copied first time - so re-copy it here, then check the child's CPU 1191 * to ensure it is on a valid CPU (and if not, just force it back to 1192 * parent's CPU). This avoids alot of nasty races. 1193 */ 1194 p->cpus_allowed = current->cpus_allowed; 1195 if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) || 1196 !cpu_online(task_cpu(p)))) 1197 set_task_cpu(p, smp_processor_id()); 1198 1199 /* CLONE_PARENT re-uses the old parent */ 1200 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) 1201 p->real_parent = current->real_parent; 1202 else 1203 p->real_parent = current; 1204 p->parent = p->real_parent; 1205 1206 spin_lock(¤t->sighand->siglock); 1207 1208 /* 1209 * Process group and session signals need to be delivered to just the 1210 * parent before the fork or both the parent and the child after the 1211 * fork. Restart if a signal comes in before we add the new process to 1212 * it's process group. 1213 * A fatal signal pending means that current will exit, so the new 1214 * thread can't slip out of an OOM kill (or normal SIGKILL). 1215 */ 1216 recalc_sigpending(); 1217 if (signal_pending(current)) { 1218 spin_unlock(¤t->sighand->siglock); 1219 write_unlock_irq(&tasklist_lock); 1220 retval = -ERESTARTNOINTR; 1221 goto bad_fork_cleanup_namespaces; 1222 } 1223 1224 if (clone_flags & CLONE_THREAD) { 1225 p->group_leader = current->group_leader; 1226 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); 1227 1228 if (!cputime_eq(current->signal->it_virt_expires, 1229 cputime_zero) || 1230 !cputime_eq(current->signal->it_prof_expires, 1231 cputime_zero) || 1232 current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY || 1233 !list_empty(¤t->signal->cpu_timers[0]) || 1234 !list_empty(¤t->signal->cpu_timers[1]) || 1235 !list_empty(¤t->signal->cpu_timers[2])) { 1236 /* 1237 * Have child wake up on its first tick to check 1238 * for process CPU timers. 1239 */ 1240 p->it_prof_expires = jiffies_to_cputime(1); 1241 } 1242 } 1243 1244 if (likely(p->pid)) { 1245 add_parent(p); 1246 if (unlikely(p->ptrace & PT_PTRACED)) 1247 __ptrace_link(p, current->parent); 1248 1249 if (thread_group_leader(p)) { 1250 p->signal->tty = current->signal->tty; 1251 p->signal->pgrp = process_group(current); 1252 set_signal_session(p->signal, process_session(current)); 1253 attach_pid(p, PIDTYPE_PGID, process_group(p)); 1254 attach_pid(p, PIDTYPE_SID, process_session(p)); 1255 1256 list_add_tail_rcu(&p->tasks, &init_task.tasks); 1257 __get_cpu_var(process_counts)++; 1258 } 1259 attach_pid(p, PIDTYPE_PID, p->pid); 1260 nr_threads++; 1261 } 1262 1263 total_forks++; 1264 spin_unlock(¤t->sighand->siglock); 1265 write_unlock_irq(&tasklist_lock); 1266 proc_fork_connector(p); 1267 return p; 1268 1269 bad_fork_cleanup_namespaces: 1270 exit_task_namespaces(p); 1271 bad_fork_cleanup_keys: 1272 exit_keys(p); 1273 bad_fork_cleanup_mm: 1274 if (p->mm) 1275 mmput(p->mm); 1276 bad_fork_cleanup_signal: 1277 cleanup_signal(p); 1278 bad_fork_cleanup_sighand: 1279 __cleanup_sighand(p->sighand); 1280 bad_fork_cleanup_fs: 1281 exit_fs(p); /* blocking */ 1282 bad_fork_cleanup_files: 1283 exit_files(p); /* blocking */ 1284 bad_fork_cleanup_semundo: 1285 exit_sem(p); 1286 bad_fork_cleanup_audit: 1287 audit_free(p); 1288 bad_fork_cleanup_security: 1289 security_task_free(p); 1290 bad_fork_cleanup_policy: 1291 #ifdef CONFIG_NUMA 1292 mpol_free(p->mempolicy); 1293 bad_fork_cleanup_cpuset: 1294 #endif 1295 cpuset_exit(p); 1296 bad_fork_cleanup_delays_binfmt: 1297 delayacct_tsk_free(p); 1298 if (p->binfmt) 1299 module_put(p->binfmt->module); 1300 bad_fork_cleanup_put_domain: 1301 module_put(task_thread_info(p)->exec_domain->module); 1302 bad_fork_cleanup_count: 1303 put_group_info(p->group_info); 1304 atomic_dec(&p->user->processes); 1305 free_uid(p->user); 1306 bad_fork_free: 1307 free_task(p); 1308 fork_out: 1309 return ERR_PTR(retval); 1310 } 1311 1312 noinline struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs) 1313 { 1314 memset(regs, 0, sizeof(struct pt_regs)); 1315 return regs; 1316 } 1317 1318 struct task_struct * __cpuinit fork_idle(int cpu) 1319 { 1320 struct task_struct *task; 1321 struct pt_regs regs; 1322 1323 task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, NULL, 0); 1324 if (!IS_ERR(task)) 1325 init_idle(task, cpu); 1326 1327 return task; 1328 } 1329 1330 static inline int fork_traceflag (unsigned clone_flags) 1331 { 1332 if (clone_flags & CLONE_UNTRACED) 1333 return 0; 1334 else if (clone_flags & CLONE_VFORK) { 1335 if (current->ptrace & PT_TRACE_VFORK) 1336 return PTRACE_EVENT_VFORK; 1337 } else if ((clone_flags & CSIGNAL) != SIGCHLD) { 1338 if (current->ptrace & PT_TRACE_CLONE) 1339 return PTRACE_EVENT_CLONE; 1340 } else if (current->ptrace & PT_TRACE_FORK) 1341 return PTRACE_EVENT_FORK; 1342 1343 return 0; 1344 } 1345 1346 /* 1347 * Ok, this is the main fork-routine. 1348 * 1349 * It copies the process, and if successful kick-starts 1350 * it and waits for it to finish using the VM if required. 1351 */ 1352 long do_fork(unsigned long clone_flags, 1353 unsigned long stack_start, 1354 struct pt_regs *regs, 1355 unsigned long stack_size, 1356 int __user *parent_tidptr, 1357 int __user *child_tidptr) 1358 { 1359 struct task_struct *p; 1360 int trace = 0; 1361 struct pid *pid = alloc_pid(); 1362 long nr; 1363 1364 if (!pid) 1365 return -EAGAIN; 1366 nr = pid->nr; 1367 if (unlikely(current->ptrace)) { 1368 trace = fork_traceflag (clone_flags); 1369 if (trace) 1370 clone_flags |= CLONE_PTRACE; 1371 } 1372 1373 p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, nr); 1374 /* 1375 * Do this prior waking up the new thread - the thread pointer 1376 * might get invalid after that point, if the thread exits quickly. 1377 */ 1378 if (!IS_ERR(p)) { 1379 struct completion vfork; 1380 1381 if (clone_flags & CLONE_VFORK) { 1382 p->vfork_done = &vfork; 1383 init_completion(&vfork); 1384 } 1385 1386 if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) { 1387 /* 1388 * We'll start up with an immediate SIGSTOP. 1389 */ 1390 sigaddset(&p->pending.signal, SIGSTOP); 1391 set_tsk_thread_flag(p, TIF_SIGPENDING); 1392 } 1393 1394 if (!(clone_flags & CLONE_STOPPED)) 1395 wake_up_new_task(p, clone_flags); 1396 else 1397 p->state = TASK_STOPPED; 1398 1399 if (unlikely (trace)) { 1400 current->ptrace_message = nr; 1401 ptrace_notify ((trace << 8) | SIGTRAP); 1402 } 1403 1404 if (clone_flags & CLONE_VFORK) { 1405 wait_for_completion(&vfork); 1406 if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) { 1407 current->ptrace_message = nr; 1408 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP); 1409 } 1410 } 1411 } else { 1412 free_pid(pid); 1413 nr = PTR_ERR(p); 1414 } 1415 return nr; 1416 } 1417 1418 #ifndef ARCH_MIN_MMSTRUCT_ALIGN 1419 #define ARCH_MIN_MMSTRUCT_ALIGN 0 1420 #endif 1421 1422 static void sighand_ctor(void *data, struct kmem_cache *cachep, unsigned long flags) 1423 { 1424 struct sighand_struct *sighand = data; 1425 1426 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) == 1427 SLAB_CTOR_CONSTRUCTOR) 1428 spin_lock_init(&sighand->siglock); 1429 } 1430 1431 void __init proc_caches_init(void) 1432 { 1433 sighand_cachep = kmem_cache_create("sighand_cache", 1434 sizeof(struct sighand_struct), 0, 1435 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU, 1436 sighand_ctor, NULL); 1437 signal_cachep = kmem_cache_create("signal_cache", 1438 sizeof(struct signal_struct), 0, 1439 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1440 files_cachep = kmem_cache_create("files_cache", 1441 sizeof(struct files_struct), 0, 1442 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1443 fs_cachep = kmem_cache_create("fs_cache", 1444 sizeof(struct fs_struct), 0, 1445 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1446 vm_area_cachep = kmem_cache_create("vm_area_struct", 1447 sizeof(struct vm_area_struct), 0, 1448 SLAB_PANIC, NULL, NULL); 1449 mm_cachep = kmem_cache_create("mm_struct", 1450 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, 1451 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1452 } 1453 1454 1455 /* 1456 * Check constraints on flags passed to the unshare system call and 1457 * force unsharing of additional process context as appropriate. 1458 */ 1459 static inline void check_unshare_flags(unsigned long *flags_ptr) 1460 { 1461 /* 1462 * If unsharing a thread from a thread group, must also 1463 * unshare vm. 1464 */ 1465 if (*flags_ptr & CLONE_THREAD) 1466 *flags_ptr |= CLONE_VM; 1467 1468 /* 1469 * If unsharing vm, must also unshare signal handlers. 1470 */ 1471 if (*flags_ptr & CLONE_VM) 1472 *flags_ptr |= CLONE_SIGHAND; 1473 1474 /* 1475 * If unsharing signal handlers and the task was created 1476 * using CLONE_THREAD, then must unshare the thread 1477 */ 1478 if ((*flags_ptr & CLONE_SIGHAND) && 1479 (atomic_read(¤t->signal->count) > 1)) 1480 *flags_ptr |= CLONE_THREAD; 1481 1482 /* 1483 * If unsharing namespace, must also unshare filesystem information. 1484 */ 1485 if (*flags_ptr & CLONE_NEWNS) 1486 *flags_ptr |= CLONE_FS; 1487 } 1488 1489 /* 1490 * Unsharing of tasks created with CLONE_THREAD is not supported yet 1491 */ 1492 static int unshare_thread(unsigned long unshare_flags) 1493 { 1494 if (unshare_flags & CLONE_THREAD) 1495 return -EINVAL; 1496 1497 return 0; 1498 } 1499 1500 /* 1501 * Unshare the filesystem structure if it is being shared 1502 */ 1503 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) 1504 { 1505 struct fs_struct *fs = current->fs; 1506 1507 if ((unshare_flags & CLONE_FS) && 1508 (fs && atomic_read(&fs->count) > 1)) { 1509 *new_fsp = __copy_fs_struct(current->fs); 1510 if (!*new_fsp) 1511 return -ENOMEM; 1512 } 1513 1514 return 0; 1515 } 1516 1517 /* 1518 * Unshare the mnt_namespace structure if it is being shared 1519 */ 1520 static int unshare_mnt_namespace(unsigned long unshare_flags, 1521 struct mnt_namespace **new_nsp, struct fs_struct *new_fs) 1522 { 1523 struct mnt_namespace *ns = current->nsproxy->mnt_ns; 1524 1525 if ((unshare_flags & CLONE_NEWNS) && ns) { 1526 if (!capable(CAP_SYS_ADMIN)) 1527 return -EPERM; 1528 1529 *new_nsp = dup_mnt_ns(current, new_fs ? new_fs : current->fs); 1530 if (!*new_nsp) 1531 return -ENOMEM; 1532 } 1533 1534 return 0; 1535 } 1536 1537 /* 1538 * Unsharing of sighand is not supported yet 1539 */ 1540 static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp) 1541 { 1542 struct sighand_struct *sigh = current->sighand; 1543 1544 if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1) 1545 return -EINVAL; 1546 else 1547 return 0; 1548 } 1549 1550 /* 1551 * Unshare vm if it is being shared 1552 */ 1553 static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp) 1554 { 1555 struct mm_struct *mm = current->mm; 1556 1557 if ((unshare_flags & CLONE_VM) && 1558 (mm && atomic_read(&mm->mm_users) > 1)) { 1559 return -EINVAL; 1560 } 1561 1562 return 0; 1563 } 1564 1565 /* 1566 * Unshare file descriptor table if it is being shared 1567 */ 1568 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) 1569 { 1570 struct files_struct *fd = current->files; 1571 int error = 0; 1572 1573 if ((unshare_flags & CLONE_FILES) && 1574 (fd && atomic_read(&fd->count) > 1)) { 1575 *new_fdp = dup_fd(fd, &error); 1576 if (!*new_fdp) 1577 return error; 1578 } 1579 1580 return 0; 1581 } 1582 1583 /* 1584 * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not 1585 * supported yet 1586 */ 1587 static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp) 1588 { 1589 if (unshare_flags & CLONE_SYSVSEM) 1590 return -EINVAL; 1591 1592 return 0; 1593 } 1594 1595 #ifndef CONFIG_IPC_NS 1596 static inline int unshare_ipcs(unsigned long flags, struct ipc_namespace **ns) 1597 { 1598 if (flags & CLONE_NEWIPC) 1599 return -EINVAL; 1600 1601 return 0; 1602 } 1603 #endif 1604 1605 /* 1606 * unshare allows a process to 'unshare' part of the process 1607 * context which was originally shared using clone. copy_* 1608 * functions used by do_fork() cannot be used here directly 1609 * because they modify an inactive task_struct that is being 1610 * constructed. Here we are modifying the current, active, 1611 * task_struct. 1612 */ 1613 asmlinkage long sys_unshare(unsigned long unshare_flags) 1614 { 1615 int err = 0; 1616 struct fs_struct *fs, *new_fs = NULL; 1617 struct mnt_namespace *ns, *new_ns = NULL; 1618 struct sighand_struct *new_sigh = NULL; 1619 struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL; 1620 struct files_struct *fd, *new_fd = NULL; 1621 struct sem_undo_list *new_ulist = NULL; 1622 struct nsproxy *new_nsproxy = NULL, *old_nsproxy = NULL; 1623 struct uts_namespace *uts, *new_uts = NULL; 1624 struct ipc_namespace *ipc, *new_ipc = NULL; 1625 1626 check_unshare_flags(&unshare_flags); 1627 1628 /* Return -EINVAL for all unsupported flags */ 1629 err = -EINVAL; 1630 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| 1631 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| 1632 CLONE_NEWUTS|CLONE_NEWIPC)) 1633 goto bad_unshare_out; 1634 1635 if ((err = unshare_thread(unshare_flags))) 1636 goto bad_unshare_out; 1637 if ((err = unshare_fs(unshare_flags, &new_fs))) 1638 goto bad_unshare_cleanup_thread; 1639 if ((err = unshare_mnt_namespace(unshare_flags, &new_ns, new_fs))) 1640 goto bad_unshare_cleanup_fs; 1641 if ((err = unshare_sighand(unshare_flags, &new_sigh))) 1642 goto bad_unshare_cleanup_ns; 1643 if ((err = unshare_vm(unshare_flags, &new_mm))) 1644 goto bad_unshare_cleanup_sigh; 1645 if ((err = unshare_fd(unshare_flags, &new_fd))) 1646 goto bad_unshare_cleanup_vm; 1647 if ((err = unshare_semundo(unshare_flags, &new_ulist))) 1648 goto bad_unshare_cleanup_fd; 1649 if ((err = unshare_utsname(unshare_flags, &new_uts))) 1650 goto bad_unshare_cleanup_semundo; 1651 if ((err = unshare_ipcs(unshare_flags, &new_ipc))) 1652 goto bad_unshare_cleanup_uts; 1653 1654 if (new_ns || new_uts || new_ipc) { 1655 old_nsproxy = current->nsproxy; 1656 new_nsproxy = dup_namespaces(old_nsproxy); 1657 if (!new_nsproxy) { 1658 err = -ENOMEM; 1659 goto bad_unshare_cleanup_ipc; 1660 } 1661 } 1662 1663 if (new_fs || new_ns || new_mm || new_fd || new_ulist || 1664 new_uts || new_ipc) { 1665 1666 task_lock(current); 1667 1668 if (new_nsproxy) { 1669 current->nsproxy = new_nsproxy; 1670 new_nsproxy = old_nsproxy; 1671 } 1672 1673 if (new_fs) { 1674 fs = current->fs; 1675 current->fs = new_fs; 1676 new_fs = fs; 1677 } 1678 1679 if (new_ns) { 1680 ns = current->nsproxy->mnt_ns; 1681 current->nsproxy->mnt_ns = new_ns; 1682 new_ns = ns; 1683 } 1684 1685 if (new_mm) { 1686 mm = current->mm; 1687 active_mm = current->active_mm; 1688 current->mm = new_mm; 1689 current->active_mm = new_mm; 1690 activate_mm(active_mm, new_mm); 1691 new_mm = mm; 1692 } 1693 1694 if (new_fd) { 1695 fd = current->files; 1696 current->files = new_fd; 1697 new_fd = fd; 1698 } 1699 1700 if (new_uts) { 1701 uts = current->nsproxy->uts_ns; 1702 current->nsproxy->uts_ns = new_uts; 1703 new_uts = uts; 1704 } 1705 1706 if (new_ipc) { 1707 ipc = current->nsproxy->ipc_ns; 1708 current->nsproxy->ipc_ns = new_ipc; 1709 new_ipc = ipc; 1710 } 1711 1712 task_unlock(current); 1713 } 1714 1715 if (new_nsproxy) 1716 put_nsproxy(new_nsproxy); 1717 1718 bad_unshare_cleanup_ipc: 1719 if (new_ipc) 1720 put_ipc_ns(new_ipc); 1721 1722 bad_unshare_cleanup_uts: 1723 if (new_uts) 1724 put_uts_ns(new_uts); 1725 1726 bad_unshare_cleanup_semundo: 1727 bad_unshare_cleanup_fd: 1728 if (new_fd) 1729 put_files_struct(new_fd); 1730 1731 bad_unshare_cleanup_vm: 1732 if (new_mm) 1733 mmput(new_mm); 1734 1735 bad_unshare_cleanup_sigh: 1736 if (new_sigh) 1737 if (atomic_dec_and_test(&new_sigh->count)) 1738 kmem_cache_free(sighand_cachep, new_sigh); 1739 1740 bad_unshare_cleanup_ns: 1741 if (new_ns) 1742 put_mnt_ns(new_ns); 1743 1744 bad_unshare_cleanup_fs: 1745 if (new_fs) 1746 put_fs_struct(new_fs); 1747 1748 bad_unshare_cleanup_thread: 1749 bad_unshare_out: 1750 return err; 1751 } 1752