1 /* 2 * linux/kernel/exit.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 #include <linux/mm.h> 8 #include <linux/slab.h> 9 #include <linux/interrupt.h> 10 #include <linux/smp_lock.h> 11 #include <linux/module.h> 12 #include <linux/capability.h> 13 #include <linux/completion.h> 14 #include <linux/personality.h> 15 #include <linux/tty.h> 16 #include <linux/mnt_namespace.h> 17 #include <linux/key.h> 18 #include <linux/security.h> 19 #include <linux/cpu.h> 20 #include <linux/acct.h> 21 #include <linux/tsacct_kern.h> 22 #include <linux/file.h> 23 #include <linux/binfmts.h> 24 #include <linux/nsproxy.h> 25 #include <linux/pid_namespace.h> 26 #include <linux/ptrace.h> 27 #include <linux/profile.h> 28 #include <linux/mount.h> 29 #include <linux/proc_fs.h> 30 #include <linux/mempolicy.h> 31 #include <linux/taskstats_kern.h> 32 #include <linux/delayacct.h> 33 #include <linux/cpuset.h> 34 #include <linux/syscalls.h> 35 #include <linux/signal.h> 36 #include <linux/posix-timers.h> 37 #include <linux/cn_proc.h> 38 #include <linux/mutex.h> 39 #include <linux/futex.h> 40 #include <linux/compat.h> 41 #include <linux/pipe_fs_i.h> 42 #include <linux/audit.h> /* for audit_free() */ 43 #include <linux/resource.h> 44 #include <linux/blkdev.h> 45 46 #include <asm/uaccess.h> 47 #include <asm/unistd.h> 48 #include <asm/pgtable.h> 49 #include <asm/mmu_context.h> 50 51 extern void sem_exit (void); 52 53 static void exit_mm(struct task_struct * tsk); 54 55 static void __unhash_process(struct task_struct *p) 56 { 57 nr_threads--; 58 detach_pid(p, PIDTYPE_PID); 59 if (thread_group_leader(p)) { 60 detach_pid(p, PIDTYPE_PGID); 61 detach_pid(p, PIDTYPE_SID); 62 63 list_del_rcu(&p->tasks); 64 __get_cpu_var(process_counts)--; 65 } 66 list_del_rcu(&p->thread_group); 67 remove_parent(p); 68 } 69 70 /* 71 * This function expects the tasklist_lock write-locked. 72 */ 73 static void __exit_signal(struct task_struct *tsk) 74 { 75 struct signal_struct *sig = tsk->signal; 76 struct sighand_struct *sighand; 77 78 BUG_ON(!sig); 79 BUG_ON(!atomic_read(&sig->count)); 80 81 rcu_read_lock(); 82 sighand = rcu_dereference(tsk->sighand); 83 spin_lock(&sighand->siglock); 84 85 posix_cpu_timers_exit(tsk); 86 if (atomic_dec_and_test(&sig->count)) 87 posix_cpu_timers_exit_group(tsk); 88 else { 89 /* 90 * If there is any task waiting for the group exit 91 * then notify it: 92 */ 93 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) { 94 wake_up_process(sig->group_exit_task); 95 sig->group_exit_task = NULL; 96 } 97 if (tsk == sig->curr_target) 98 sig->curr_target = next_thread(tsk); 99 /* 100 * Accumulate here the counters for all threads but the 101 * group leader as they die, so they can be added into 102 * the process-wide totals when those are taken. 103 * The group leader stays around as a zombie as long 104 * as there are other threads. When it gets reaped, 105 * the exit.c code will add its counts into these totals. 106 * We won't ever get here for the group leader, since it 107 * will have been the last reference on the signal_struct. 108 */ 109 sig->utime = cputime_add(sig->utime, tsk->utime); 110 sig->stime = cputime_add(sig->stime, tsk->stime); 111 sig->min_flt += tsk->min_flt; 112 sig->maj_flt += tsk->maj_flt; 113 sig->nvcsw += tsk->nvcsw; 114 sig->nivcsw += tsk->nivcsw; 115 sig->sched_time += tsk->sched_time; 116 sig = NULL; /* Marker for below. */ 117 } 118 119 __unhash_process(tsk); 120 121 tsk->signal = NULL; 122 tsk->sighand = NULL; 123 spin_unlock(&sighand->siglock); 124 rcu_read_unlock(); 125 126 __cleanup_sighand(sighand); 127 clear_tsk_thread_flag(tsk,TIF_SIGPENDING); 128 flush_sigqueue(&tsk->pending); 129 if (sig) { 130 flush_sigqueue(&sig->shared_pending); 131 taskstats_tgid_free(sig); 132 __cleanup_signal(sig); 133 } 134 } 135 136 static void delayed_put_task_struct(struct rcu_head *rhp) 137 { 138 put_task_struct(container_of(rhp, struct task_struct, rcu)); 139 } 140 141 void release_task(struct task_struct * p) 142 { 143 struct task_struct *leader; 144 int zap_leader; 145 repeat: 146 atomic_dec(&p->user->processes); 147 write_lock_irq(&tasklist_lock); 148 ptrace_unlink(p); 149 BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); 150 __exit_signal(p); 151 152 /* 153 * If we are the last non-leader member of the thread 154 * group, and the leader is zombie, then notify the 155 * group leader's parent process. (if it wants notification.) 156 */ 157 zap_leader = 0; 158 leader = p->group_leader; 159 if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { 160 BUG_ON(leader->exit_signal == -1); 161 do_notify_parent(leader, leader->exit_signal); 162 /* 163 * If we were the last child thread and the leader has 164 * exited already, and the leader's parent ignores SIGCHLD, 165 * then we are the one who should release the leader. 166 * 167 * do_notify_parent() will have marked it self-reaping in 168 * that case. 169 */ 170 zap_leader = (leader->exit_signal == -1); 171 } 172 173 sched_exit(p); 174 write_unlock_irq(&tasklist_lock); 175 proc_flush_task(p); 176 release_thread(p); 177 call_rcu(&p->rcu, delayed_put_task_struct); 178 179 p = leader; 180 if (unlikely(zap_leader)) 181 goto repeat; 182 } 183 184 /* 185 * This checks not only the pgrp, but falls back on the pid if no 186 * satisfactory pgrp is found. I dunno - gdb doesn't work correctly 187 * without this... 188 */ 189 int session_of_pgrp(int pgrp) 190 { 191 struct task_struct *p; 192 int sid = 0; 193 194 read_lock(&tasklist_lock); 195 196 p = find_task_by_pid_type(PIDTYPE_PGID, pgrp); 197 if (p == NULL) 198 p = find_task_by_pid(pgrp); 199 if (p != NULL) 200 sid = process_session(p); 201 202 read_unlock(&tasklist_lock); 203 204 return sid; 205 } 206 207 /* 208 * Determine if a process group is "orphaned", according to the POSIX 209 * definition in 2.2.2.52. Orphaned process groups are not to be affected 210 * by terminal-generated stop signals. Newly orphaned process groups are 211 * to receive a SIGHUP and a SIGCONT. 212 * 213 * "I ask you, have you ever known what it is to be an orphan?" 214 */ 215 static int will_become_orphaned_pgrp(int pgrp, struct task_struct *ignored_task) 216 { 217 struct task_struct *p; 218 int ret = 1; 219 220 do_each_task_pid(pgrp, PIDTYPE_PGID, p) { 221 if (p == ignored_task 222 || p->exit_state 223 || is_init(p->real_parent)) 224 continue; 225 if (process_group(p->real_parent) != pgrp && 226 process_session(p->real_parent) == process_session(p)) { 227 ret = 0; 228 break; 229 } 230 } while_each_task_pid(pgrp, PIDTYPE_PGID, p); 231 return ret; /* (sighing) "Often!" */ 232 } 233 234 int is_orphaned_pgrp(int pgrp) 235 { 236 int retval; 237 238 read_lock(&tasklist_lock); 239 retval = will_become_orphaned_pgrp(pgrp, NULL); 240 read_unlock(&tasklist_lock); 241 242 return retval; 243 } 244 245 static int has_stopped_jobs(int pgrp) 246 { 247 int retval = 0; 248 struct task_struct *p; 249 250 do_each_task_pid(pgrp, PIDTYPE_PGID, p) { 251 if (p->state != TASK_STOPPED) 252 continue; 253 retval = 1; 254 break; 255 } while_each_task_pid(pgrp, PIDTYPE_PGID, p); 256 return retval; 257 } 258 259 /** 260 * reparent_to_init - Reparent the calling kernel thread to the init task 261 * of the pid space that the thread belongs to. 262 * 263 * If a kernel thread is launched as a result of a system call, or if 264 * it ever exits, it should generally reparent itself to init so that 265 * it is correctly cleaned up on exit. 266 * 267 * The various task state such as scheduling policy and priority may have 268 * been inherited from a user process, so we reset them to sane values here. 269 * 270 * NOTE that reparent_to_init() gives the caller full capabilities. 271 */ 272 static void reparent_to_init(void) 273 { 274 write_lock_irq(&tasklist_lock); 275 276 ptrace_unlink(current); 277 /* Reparent to init */ 278 remove_parent(current); 279 current->parent = child_reaper(current); 280 current->real_parent = child_reaper(current); 281 add_parent(current); 282 283 /* Set the exit signal to SIGCHLD so we signal init on exit */ 284 current->exit_signal = SIGCHLD; 285 286 if (!has_rt_policy(current) && (task_nice(current) < 0)) 287 set_user_nice(current, 0); 288 /* cpus_allowed? */ 289 /* rt_priority? */ 290 /* signals? */ 291 security_task_reparent_to_init(current); 292 memcpy(current->signal->rlim, init_task.signal->rlim, 293 sizeof(current->signal->rlim)); 294 atomic_inc(&(INIT_USER->__count)); 295 write_unlock_irq(&tasklist_lock); 296 switch_uid(INIT_USER); 297 } 298 299 void __set_special_pids(pid_t session, pid_t pgrp) 300 { 301 struct task_struct *curr = current->group_leader; 302 303 if (process_session(curr) != session) { 304 detach_pid(curr, PIDTYPE_SID); 305 set_signal_session(curr->signal, session); 306 attach_pid(curr, PIDTYPE_SID, session); 307 } 308 if (process_group(curr) != pgrp) { 309 detach_pid(curr, PIDTYPE_PGID); 310 curr->signal->pgrp = pgrp; 311 attach_pid(curr, PIDTYPE_PGID, pgrp); 312 } 313 } 314 315 static void set_special_pids(pid_t session, pid_t pgrp) 316 { 317 write_lock_irq(&tasklist_lock); 318 __set_special_pids(session, pgrp); 319 write_unlock_irq(&tasklist_lock); 320 } 321 322 /* 323 * Let kernel threads use this to say that they 324 * allow a certain signal (since daemonize() will 325 * have disabled all of them by default). 326 */ 327 int allow_signal(int sig) 328 { 329 if (!valid_signal(sig) || sig < 1) 330 return -EINVAL; 331 332 spin_lock_irq(¤t->sighand->siglock); 333 sigdelset(¤t->blocked, sig); 334 if (!current->mm) { 335 /* Kernel threads handle their own signals. 336 Let the signal code know it'll be handled, so 337 that they don't get converted to SIGKILL or 338 just silently dropped */ 339 current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; 340 } 341 recalc_sigpending(); 342 spin_unlock_irq(¤t->sighand->siglock); 343 return 0; 344 } 345 346 EXPORT_SYMBOL(allow_signal); 347 348 int disallow_signal(int sig) 349 { 350 if (!valid_signal(sig) || sig < 1) 351 return -EINVAL; 352 353 spin_lock_irq(¤t->sighand->siglock); 354 sigaddset(¤t->blocked, sig); 355 recalc_sigpending(); 356 spin_unlock_irq(¤t->sighand->siglock); 357 return 0; 358 } 359 360 EXPORT_SYMBOL(disallow_signal); 361 362 /* 363 * Put all the gunge required to become a kernel thread without 364 * attached user resources in one place where it belongs. 365 */ 366 367 void daemonize(const char *name, ...) 368 { 369 va_list args; 370 struct fs_struct *fs; 371 sigset_t blocked; 372 373 va_start(args, name); 374 vsnprintf(current->comm, sizeof(current->comm), name, args); 375 va_end(args); 376 377 /* 378 * If we were started as result of loading a module, close all of the 379 * user space pages. We don't need them, and if we didn't close them 380 * they would be locked into memory. 381 */ 382 exit_mm(current); 383 384 set_special_pids(1, 1); 385 proc_clear_tty(current); 386 387 /* Block and flush all signals */ 388 sigfillset(&blocked); 389 sigprocmask(SIG_BLOCK, &blocked, NULL); 390 flush_signals(current); 391 392 /* Become as one with the init task */ 393 394 exit_fs(current); /* current->fs->count--; */ 395 fs = init_task.fs; 396 current->fs = fs; 397 atomic_inc(&fs->count); 398 399 exit_task_namespaces(current); 400 current->nsproxy = init_task.nsproxy; 401 get_task_namespaces(current); 402 403 exit_files(current); 404 current->files = init_task.files; 405 atomic_inc(¤t->files->count); 406 407 reparent_to_init(); 408 } 409 410 EXPORT_SYMBOL(daemonize); 411 412 static void close_files(struct files_struct * files) 413 { 414 int i, j; 415 struct fdtable *fdt; 416 417 j = 0; 418 419 /* 420 * It is safe to dereference the fd table without RCU or 421 * ->file_lock because this is the last reference to the 422 * files structure. 423 */ 424 fdt = files_fdtable(files); 425 for (;;) { 426 unsigned long set; 427 i = j * __NFDBITS; 428 if (i >= fdt->max_fdset || i >= fdt->max_fds) 429 break; 430 set = fdt->open_fds->fds_bits[j++]; 431 while (set) { 432 if (set & 1) { 433 struct file * file = xchg(&fdt->fd[i], NULL); 434 if (file) 435 filp_close(file, files); 436 } 437 i++; 438 set >>= 1; 439 } 440 } 441 } 442 443 struct files_struct *get_files_struct(struct task_struct *task) 444 { 445 struct files_struct *files; 446 447 task_lock(task); 448 files = task->files; 449 if (files) 450 atomic_inc(&files->count); 451 task_unlock(task); 452 453 return files; 454 } 455 456 void fastcall put_files_struct(struct files_struct *files) 457 { 458 struct fdtable *fdt; 459 460 if (atomic_dec_and_test(&files->count)) { 461 close_files(files); 462 /* 463 * Free the fd and fdset arrays if we expanded them. 464 * If the fdtable was embedded, pass files for freeing 465 * at the end of the RCU grace period. Otherwise, 466 * you can free files immediately. 467 */ 468 fdt = files_fdtable(files); 469 if (fdt == &files->fdtab) 470 fdt->free_files = files; 471 else 472 kmem_cache_free(files_cachep, files); 473 free_fdtable(fdt); 474 } 475 } 476 477 EXPORT_SYMBOL(put_files_struct); 478 479 void reset_files_struct(struct task_struct *tsk, struct files_struct *files) 480 { 481 struct files_struct *old; 482 483 old = tsk->files; 484 task_lock(tsk); 485 tsk->files = files; 486 task_unlock(tsk); 487 put_files_struct(old); 488 } 489 EXPORT_SYMBOL(reset_files_struct); 490 491 static inline void __exit_files(struct task_struct *tsk) 492 { 493 struct files_struct * files = tsk->files; 494 495 if (files) { 496 task_lock(tsk); 497 tsk->files = NULL; 498 task_unlock(tsk); 499 put_files_struct(files); 500 } 501 } 502 503 void exit_files(struct task_struct *tsk) 504 { 505 __exit_files(tsk); 506 } 507 508 static inline void __put_fs_struct(struct fs_struct *fs) 509 { 510 /* No need to hold fs->lock if we are killing it */ 511 if (atomic_dec_and_test(&fs->count)) { 512 dput(fs->root); 513 mntput(fs->rootmnt); 514 dput(fs->pwd); 515 mntput(fs->pwdmnt); 516 if (fs->altroot) { 517 dput(fs->altroot); 518 mntput(fs->altrootmnt); 519 } 520 kmem_cache_free(fs_cachep, fs); 521 } 522 } 523 524 void put_fs_struct(struct fs_struct *fs) 525 { 526 __put_fs_struct(fs); 527 } 528 529 static inline void __exit_fs(struct task_struct *tsk) 530 { 531 struct fs_struct * fs = tsk->fs; 532 533 if (fs) { 534 task_lock(tsk); 535 tsk->fs = NULL; 536 task_unlock(tsk); 537 __put_fs_struct(fs); 538 } 539 } 540 541 void exit_fs(struct task_struct *tsk) 542 { 543 __exit_fs(tsk); 544 } 545 546 EXPORT_SYMBOL_GPL(exit_fs); 547 548 /* 549 * Turn us into a lazy TLB process if we 550 * aren't already.. 551 */ 552 static void exit_mm(struct task_struct * tsk) 553 { 554 struct mm_struct *mm = tsk->mm; 555 556 mm_release(tsk, mm); 557 if (!mm) 558 return; 559 /* 560 * Serialize with any possible pending coredump. 561 * We must hold mmap_sem around checking core_waiters 562 * and clearing tsk->mm. The core-inducing thread 563 * will increment core_waiters for each thread in the 564 * group with ->mm != NULL. 565 */ 566 down_read(&mm->mmap_sem); 567 if (mm->core_waiters) { 568 up_read(&mm->mmap_sem); 569 down_write(&mm->mmap_sem); 570 if (!--mm->core_waiters) 571 complete(mm->core_startup_done); 572 up_write(&mm->mmap_sem); 573 574 wait_for_completion(&mm->core_done); 575 down_read(&mm->mmap_sem); 576 } 577 atomic_inc(&mm->mm_count); 578 BUG_ON(mm != tsk->active_mm); 579 /* more a memory barrier than a real lock */ 580 task_lock(tsk); 581 tsk->mm = NULL; 582 up_read(&mm->mmap_sem); 583 enter_lazy_tlb(mm, current); 584 task_unlock(tsk); 585 mmput(mm); 586 } 587 588 static inline void 589 choose_new_parent(struct task_struct *p, struct task_struct *reaper) 590 { 591 /* 592 * Make sure we're not reparenting to ourselves and that 593 * the parent is not a zombie. 594 */ 595 BUG_ON(p == reaper || reaper->exit_state); 596 p->real_parent = reaper; 597 } 598 599 static void 600 reparent_thread(struct task_struct *p, struct task_struct *father, int traced) 601 { 602 /* We don't want people slaying init. */ 603 if (p->exit_signal != -1) 604 p->exit_signal = SIGCHLD; 605 606 if (p->pdeath_signal) 607 /* We already hold the tasklist_lock here. */ 608 group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p); 609 610 /* Move the child from its dying parent to the new one. */ 611 if (unlikely(traced)) { 612 /* Preserve ptrace links if someone else is tracing this child. */ 613 list_del_init(&p->ptrace_list); 614 if (p->parent != p->real_parent) 615 list_add(&p->ptrace_list, &p->real_parent->ptrace_children); 616 } else { 617 /* If this child is being traced, then we're the one tracing it 618 * anyway, so let go of it. 619 */ 620 p->ptrace = 0; 621 remove_parent(p); 622 p->parent = p->real_parent; 623 add_parent(p); 624 625 /* If we'd notified the old parent about this child's death, 626 * also notify the new parent. 627 */ 628 if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 && 629 thread_group_empty(p)) 630 do_notify_parent(p, p->exit_signal); 631 else if (p->state == TASK_TRACED) { 632 /* 633 * If it was at a trace stop, turn it into 634 * a normal stop since it's no longer being 635 * traced. 636 */ 637 ptrace_untrace(p); 638 } 639 } 640 641 /* 642 * process group orphan check 643 * Case ii: Our child is in a different pgrp 644 * than we are, and it was the only connection 645 * outside, so the child pgrp is now orphaned. 646 */ 647 if ((process_group(p) != process_group(father)) && 648 (process_session(p) == process_session(father))) { 649 int pgrp = process_group(p); 650 651 if (will_become_orphaned_pgrp(pgrp, NULL) && 652 has_stopped_jobs(pgrp)) { 653 __kill_pg_info(SIGHUP, SEND_SIG_PRIV, pgrp); 654 __kill_pg_info(SIGCONT, SEND_SIG_PRIV, pgrp); 655 } 656 } 657 } 658 659 /* 660 * When we die, we re-parent all our children. 661 * Try to give them to another thread in our thread 662 * group, and if no such member exists, give it to 663 * the child reaper process (ie "init") in our pid 664 * space. 665 */ 666 static void 667 forget_original_parent(struct task_struct *father, struct list_head *to_release) 668 { 669 struct task_struct *p, *reaper = father; 670 struct list_head *_p, *_n; 671 672 do { 673 reaper = next_thread(reaper); 674 if (reaper == father) { 675 reaper = child_reaper(father); 676 break; 677 } 678 } while (reaper->exit_state); 679 680 /* 681 * There are only two places where our children can be: 682 * 683 * - in our child list 684 * - in our ptraced child list 685 * 686 * Search them and reparent children. 687 */ 688 list_for_each_safe(_p, _n, &father->children) { 689 int ptrace; 690 p = list_entry(_p, struct task_struct, sibling); 691 692 ptrace = p->ptrace; 693 694 /* if father isn't the real parent, then ptrace must be enabled */ 695 BUG_ON(father != p->real_parent && !ptrace); 696 697 if (father == p->real_parent) { 698 /* reparent with a reaper, real father it's us */ 699 choose_new_parent(p, reaper); 700 reparent_thread(p, father, 0); 701 } else { 702 /* reparent ptraced task to its real parent */ 703 __ptrace_unlink (p); 704 if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 && 705 thread_group_empty(p)) 706 do_notify_parent(p, p->exit_signal); 707 } 708 709 /* 710 * if the ptraced child is a zombie with exit_signal == -1 711 * we must collect it before we exit, or it will remain 712 * zombie forever since we prevented it from self-reap itself 713 * while it was being traced by us, to be able to see it in wait4. 714 */ 715 if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1)) 716 list_add(&p->ptrace_list, to_release); 717 } 718 list_for_each_safe(_p, _n, &father->ptrace_children) { 719 p = list_entry(_p, struct task_struct, ptrace_list); 720 choose_new_parent(p, reaper); 721 reparent_thread(p, father, 1); 722 } 723 } 724 725 /* 726 * Send signals to all our closest relatives so that they know 727 * to properly mourn us.. 728 */ 729 static void exit_notify(struct task_struct *tsk) 730 { 731 int state; 732 struct task_struct *t; 733 struct list_head ptrace_dead, *_p, *_n; 734 735 if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT) 736 && !thread_group_empty(tsk)) { 737 /* 738 * This occurs when there was a race between our exit 739 * syscall and a group signal choosing us as the one to 740 * wake up. It could be that we are the only thread 741 * alerted to check for pending signals, but another thread 742 * should be woken now to take the signal since we will not. 743 * Now we'll wake all the threads in the group just to make 744 * sure someone gets all the pending signals. 745 */ 746 read_lock(&tasklist_lock); 747 spin_lock_irq(&tsk->sighand->siglock); 748 for (t = next_thread(tsk); t != tsk; t = next_thread(t)) 749 if (!signal_pending(t) && !(t->flags & PF_EXITING)) { 750 recalc_sigpending_tsk(t); 751 if (signal_pending(t)) 752 signal_wake_up(t, 0); 753 } 754 spin_unlock_irq(&tsk->sighand->siglock); 755 read_unlock(&tasklist_lock); 756 } 757 758 write_lock_irq(&tasklist_lock); 759 760 /* 761 * This does two things: 762 * 763 * A. Make init inherit all the child processes 764 * B. Check to see if any process groups have become orphaned 765 * as a result of our exiting, and if they have any stopped 766 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 767 */ 768 769 INIT_LIST_HEAD(&ptrace_dead); 770 forget_original_parent(tsk, &ptrace_dead); 771 BUG_ON(!list_empty(&tsk->children)); 772 BUG_ON(!list_empty(&tsk->ptrace_children)); 773 774 /* 775 * Check to see if any process groups have become orphaned 776 * as a result of our exiting, and if they have any stopped 777 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 778 * 779 * Case i: Our father is in a different pgrp than we are 780 * and we were the only connection outside, so our pgrp 781 * is about to become orphaned. 782 */ 783 784 t = tsk->real_parent; 785 786 if ((process_group(t) != process_group(tsk)) && 787 (process_session(t) == process_session(tsk)) && 788 will_become_orphaned_pgrp(process_group(tsk), tsk) && 789 has_stopped_jobs(process_group(tsk))) { 790 __kill_pg_info(SIGHUP, SEND_SIG_PRIV, process_group(tsk)); 791 __kill_pg_info(SIGCONT, SEND_SIG_PRIV, process_group(tsk)); 792 } 793 794 /* Let father know we died 795 * 796 * Thread signals are configurable, but you aren't going to use 797 * that to send signals to arbitary processes. 798 * That stops right now. 799 * 800 * If the parent exec id doesn't match the exec id we saved 801 * when we started then we know the parent has changed security 802 * domain. 803 * 804 * If our self_exec id doesn't match our parent_exec_id then 805 * we have changed execution domain as these two values started 806 * the same after a fork. 807 * 808 */ 809 810 if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 && 811 ( tsk->parent_exec_id != t->self_exec_id || 812 tsk->self_exec_id != tsk->parent_exec_id) 813 && !capable(CAP_KILL)) 814 tsk->exit_signal = SIGCHLD; 815 816 817 /* If something other than our normal parent is ptracing us, then 818 * send it a SIGCHLD instead of honoring exit_signal. exit_signal 819 * only has special meaning to our real parent. 820 */ 821 if (tsk->exit_signal != -1 && thread_group_empty(tsk)) { 822 int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD; 823 do_notify_parent(tsk, signal); 824 } else if (tsk->ptrace) { 825 do_notify_parent(tsk, SIGCHLD); 826 } 827 828 state = EXIT_ZOMBIE; 829 if (tsk->exit_signal == -1 && 830 (likely(tsk->ptrace == 0) || 831 unlikely(tsk->parent->signal->flags & SIGNAL_GROUP_EXIT))) 832 state = EXIT_DEAD; 833 tsk->exit_state = state; 834 835 write_unlock_irq(&tasklist_lock); 836 837 list_for_each_safe(_p, _n, &ptrace_dead) { 838 list_del_init(_p); 839 t = list_entry(_p, struct task_struct, ptrace_list); 840 release_task(t); 841 } 842 843 /* If the process is dead, release it - nobody will wait for it */ 844 if (state == EXIT_DEAD) 845 release_task(tsk); 846 } 847 848 fastcall NORET_TYPE void do_exit(long code) 849 { 850 struct task_struct *tsk = current; 851 int group_dead; 852 853 profile_task_exit(tsk); 854 855 WARN_ON(atomic_read(&tsk->fs_excl)); 856 857 if (unlikely(in_interrupt())) 858 panic("Aiee, killing interrupt handler!"); 859 if (unlikely(!tsk->pid)) 860 panic("Attempted to kill the idle task!"); 861 if (unlikely(tsk == child_reaper(tsk))) { 862 if (tsk->nsproxy->pid_ns != &init_pid_ns) 863 tsk->nsproxy->pid_ns->child_reaper = init_pid_ns.child_reaper; 864 else 865 panic("Attempted to kill init!"); 866 } 867 868 869 if (unlikely(current->ptrace & PT_TRACE_EXIT)) { 870 current->ptrace_message = code; 871 ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP); 872 } 873 874 /* 875 * We're taking recursive faults here in do_exit. Safest is to just 876 * leave this task alone and wait for reboot. 877 */ 878 if (unlikely(tsk->flags & PF_EXITING)) { 879 printk(KERN_ALERT 880 "Fixing recursive fault but reboot is needed!\n"); 881 if (tsk->io_context) 882 exit_io_context(); 883 set_current_state(TASK_UNINTERRUPTIBLE); 884 schedule(); 885 } 886 887 tsk->flags |= PF_EXITING; 888 889 if (unlikely(in_atomic())) 890 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", 891 current->comm, current->pid, 892 preempt_count()); 893 894 acct_update_integrals(tsk); 895 if (tsk->mm) { 896 update_hiwater_rss(tsk->mm); 897 update_hiwater_vm(tsk->mm); 898 } 899 group_dead = atomic_dec_and_test(&tsk->signal->live); 900 if (group_dead) { 901 hrtimer_cancel(&tsk->signal->real_timer); 902 exit_itimers(tsk->signal); 903 } 904 acct_collect(code, group_dead); 905 if (unlikely(tsk->robust_list)) 906 exit_robust_list(tsk); 907 #if defined(CONFIG_FUTEX) && defined(CONFIG_COMPAT) 908 if (unlikely(tsk->compat_robust_list)) 909 compat_exit_robust_list(tsk); 910 #endif 911 if (unlikely(tsk->audit_context)) 912 audit_free(tsk); 913 914 taskstats_exit(tsk, group_dead); 915 916 exit_mm(tsk); 917 918 if (group_dead) 919 acct_process(); 920 exit_sem(tsk); 921 __exit_files(tsk); 922 __exit_fs(tsk); 923 exit_thread(); 924 cpuset_exit(tsk); 925 exit_keys(tsk); 926 927 if (group_dead && tsk->signal->leader) 928 disassociate_ctty(1); 929 930 module_put(task_thread_info(tsk)->exec_domain->module); 931 if (tsk->binfmt) 932 module_put(tsk->binfmt->module); 933 934 tsk->exit_code = code; 935 proc_exit_connector(tsk); 936 exit_notify(tsk); 937 exit_task_namespaces(tsk); 938 #ifdef CONFIG_NUMA 939 mpol_free(tsk->mempolicy); 940 tsk->mempolicy = NULL; 941 #endif 942 /* 943 * This must happen late, after the PID is not 944 * hashed anymore: 945 */ 946 if (unlikely(!list_empty(&tsk->pi_state_list))) 947 exit_pi_state_list(tsk); 948 if (unlikely(current->pi_state_cache)) 949 kfree(current->pi_state_cache); 950 /* 951 * Make sure we are holding no locks: 952 */ 953 debug_check_no_locks_held(tsk); 954 955 if (tsk->io_context) 956 exit_io_context(); 957 958 if (tsk->splice_pipe) 959 __free_pipe_info(tsk->splice_pipe); 960 961 preempt_disable(); 962 /* causes final put_task_struct in finish_task_switch(). */ 963 tsk->state = TASK_DEAD; 964 965 schedule(); 966 BUG(); 967 /* Avoid "noreturn function does return". */ 968 for (;;) 969 cpu_relax(); /* For when BUG is null */ 970 } 971 972 EXPORT_SYMBOL_GPL(do_exit); 973 974 NORET_TYPE void complete_and_exit(struct completion *comp, long code) 975 { 976 if (comp) 977 complete(comp); 978 979 do_exit(code); 980 } 981 982 EXPORT_SYMBOL(complete_and_exit); 983 984 asmlinkage long sys_exit(int error_code) 985 { 986 do_exit((error_code&0xff)<<8); 987 } 988 989 /* 990 * Take down every thread in the group. This is called by fatal signals 991 * as well as by sys_exit_group (below). 992 */ 993 NORET_TYPE void 994 do_group_exit(int exit_code) 995 { 996 BUG_ON(exit_code & 0x80); /* core dumps don't get here */ 997 998 if (current->signal->flags & SIGNAL_GROUP_EXIT) 999 exit_code = current->signal->group_exit_code; 1000 else if (!thread_group_empty(current)) { 1001 struct signal_struct *const sig = current->signal; 1002 struct sighand_struct *const sighand = current->sighand; 1003 spin_lock_irq(&sighand->siglock); 1004 if (sig->flags & SIGNAL_GROUP_EXIT) 1005 /* Another thread got here before we took the lock. */ 1006 exit_code = sig->group_exit_code; 1007 else { 1008 sig->group_exit_code = exit_code; 1009 zap_other_threads(current); 1010 } 1011 spin_unlock_irq(&sighand->siglock); 1012 } 1013 1014 do_exit(exit_code); 1015 /* NOTREACHED */ 1016 } 1017 1018 /* 1019 * this kills every thread in the thread group. Note that any externally 1020 * wait4()-ing process will get the correct exit code - even if this 1021 * thread is not the thread group leader. 1022 */ 1023 asmlinkage void sys_exit_group(int error_code) 1024 { 1025 do_group_exit((error_code & 0xff) << 8); 1026 } 1027 1028 static int eligible_child(pid_t pid, int options, struct task_struct *p) 1029 { 1030 if (pid > 0) { 1031 if (p->pid != pid) 1032 return 0; 1033 } else if (!pid) { 1034 if (process_group(p) != process_group(current)) 1035 return 0; 1036 } else if (pid != -1) { 1037 if (process_group(p) != -pid) 1038 return 0; 1039 } 1040 1041 /* 1042 * Do not consider detached threads that are 1043 * not ptraced: 1044 */ 1045 if (p->exit_signal == -1 && !p->ptrace) 1046 return 0; 1047 1048 /* Wait for all children (clone and not) if __WALL is set; 1049 * otherwise, wait for clone children *only* if __WCLONE is 1050 * set; otherwise, wait for non-clone children *only*. (Note: 1051 * A "clone" child here is one that reports to its parent 1052 * using a signal other than SIGCHLD.) */ 1053 if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0)) 1054 && !(options & __WALL)) 1055 return 0; 1056 /* 1057 * Do not consider thread group leaders that are 1058 * in a non-empty thread group: 1059 */ 1060 if (delay_group_leader(p)) 1061 return 2; 1062 1063 if (security_task_wait(p)) 1064 return 0; 1065 1066 return 1; 1067 } 1068 1069 static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, 1070 int why, int status, 1071 struct siginfo __user *infop, 1072 struct rusage __user *rusagep) 1073 { 1074 int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0; 1075 1076 put_task_struct(p); 1077 if (!retval) 1078 retval = put_user(SIGCHLD, &infop->si_signo); 1079 if (!retval) 1080 retval = put_user(0, &infop->si_errno); 1081 if (!retval) 1082 retval = put_user((short)why, &infop->si_code); 1083 if (!retval) 1084 retval = put_user(pid, &infop->si_pid); 1085 if (!retval) 1086 retval = put_user(uid, &infop->si_uid); 1087 if (!retval) 1088 retval = put_user(status, &infop->si_status); 1089 if (!retval) 1090 retval = pid; 1091 return retval; 1092 } 1093 1094 /* 1095 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold 1096 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1097 * the lock and this task is uninteresting. If we return nonzero, we have 1098 * released the lock and the system call should return. 1099 */ 1100 static int wait_task_zombie(struct task_struct *p, int noreap, 1101 struct siginfo __user *infop, 1102 int __user *stat_addr, struct rusage __user *ru) 1103 { 1104 unsigned long state; 1105 int retval; 1106 int status; 1107 1108 if (unlikely(noreap)) { 1109 pid_t pid = p->pid; 1110 uid_t uid = p->uid; 1111 int exit_code = p->exit_code; 1112 int why, status; 1113 1114 if (unlikely(p->exit_state != EXIT_ZOMBIE)) 1115 return 0; 1116 if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) 1117 return 0; 1118 get_task_struct(p); 1119 read_unlock(&tasklist_lock); 1120 if ((exit_code & 0x7f) == 0) { 1121 why = CLD_EXITED; 1122 status = exit_code >> 8; 1123 } else { 1124 why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED; 1125 status = exit_code & 0x7f; 1126 } 1127 return wait_noreap_copyout(p, pid, uid, why, 1128 status, infop, ru); 1129 } 1130 1131 /* 1132 * Try to move the task's state to DEAD 1133 * only one thread is allowed to do this: 1134 */ 1135 state = xchg(&p->exit_state, EXIT_DEAD); 1136 if (state != EXIT_ZOMBIE) { 1137 BUG_ON(state != EXIT_DEAD); 1138 return 0; 1139 } 1140 if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) { 1141 /* 1142 * This can only happen in a race with a ptraced thread 1143 * dying on another processor. 1144 */ 1145 return 0; 1146 } 1147 1148 if (likely(p->real_parent == p->parent) && likely(p->signal)) { 1149 struct signal_struct *psig; 1150 struct signal_struct *sig; 1151 1152 /* 1153 * The resource counters for the group leader are in its 1154 * own task_struct. Those for dead threads in the group 1155 * are in its signal_struct, as are those for the child 1156 * processes it has previously reaped. All these 1157 * accumulate in the parent's signal_struct c* fields. 1158 * 1159 * We don't bother to take a lock here to protect these 1160 * p->signal fields, because they are only touched by 1161 * __exit_signal, which runs with tasklist_lock 1162 * write-locked anyway, and so is excluded here. We do 1163 * need to protect the access to p->parent->signal fields, 1164 * as other threads in the parent group can be right 1165 * here reaping other children at the same time. 1166 */ 1167 spin_lock_irq(&p->parent->sighand->siglock); 1168 psig = p->parent->signal; 1169 sig = p->signal; 1170 psig->cutime = 1171 cputime_add(psig->cutime, 1172 cputime_add(p->utime, 1173 cputime_add(sig->utime, 1174 sig->cutime))); 1175 psig->cstime = 1176 cputime_add(psig->cstime, 1177 cputime_add(p->stime, 1178 cputime_add(sig->stime, 1179 sig->cstime))); 1180 psig->cmin_flt += 1181 p->min_flt + sig->min_flt + sig->cmin_flt; 1182 psig->cmaj_flt += 1183 p->maj_flt + sig->maj_flt + sig->cmaj_flt; 1184 psig->cnvcsw += 1185 p->nvcsw + sig->nvcsw + sig->cnvcsw; 1186 psig->cnivcsw += 1187 p->nivcsw + sig->nivcsw + sig->cnivcsw; 1188 spin_unlock_irq(&p->parent->sighand->siglock); 1189 } 1190 1191 /* 1192 * Now we are sure this task is interesting, and no other 1193 * thread can reap it because we set its state to EXIT_DEAD. 1194 */ 1195 read_unlock(&tasklist_lock); 1196 1197 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; 1198 status = (p->signal->flags & SIGNAL_GROUP_EXIT) 1199 ? p->signal->group_exit_code : p->exit_code; 1200 if (!retval && stat_addr) 1201 retval = put_user(status, stat_addr); 1202 if (!retval && infop) 1203 retval = put_user(SIGCHLD, &infop->si_signo); 1204 if (!retval && infop) 1205 retval = put_user(0, &infop->si_errno); 1206 if (!retval && infop) { 1207 int why; 1208 1209 if ((status & 0x7f) == 0) { 1210 why = CLD_EXITED; 1211 status >>= 8; 1212 } else { 1213 why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; 1214 status &= 0x7f; 1215 } 1216 retval = put_user((short)why, &infop->si_code); 1217 if (!retval) 1218 retval = put_user(status, &infop->si_status); 1219 } 1220 if (!retval && infop) 1221 retval = put_user(p->pid, &infop->si_pid); 1222 if (!retval && infop) 1223 retval = put_user(p->uid, &infop->si_uid); 1224 if (retval) { 1225 // TODO: is this safe? 1226 p->exit_state = EXIT_ZOMBIE; 1227 return retval; 1228 } 1229 retval = p->pid; 1230 if (p->real_parent != p->parent) { 1231 write_lock_irq(&tasklist_lock); 1232 /* Double-check with lock held. */ 1233 if (p->real_parent != p->parent) { 1234 __ptrace_unlink(p); 1235 // TODO: is this safe? 1236 p->exit_state = EXIT_ZOMBIE; 1237 /* 1238 * If this is not a detached task, notify the parent. 1239 * If it's still not detached after that, don't release 1240 * it now. 1241 */ 1242 if (p->exit_signal != -1) { 1243 do_notify_parent(p, p->exit_signal); 1244 if (p->exit_signal != -1) 1245 p = NULL; 1246 } 1247 } 1248 write_unlock_irq(&tasklist_lock); 1249 } 1250 if (p != NULL) 1251 release_task(p); 1252 BUG_ON(!retval); 1253 return retval; 1254 } 1255 1256 /* 1257 * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold 1258 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1259 * the lock and this task is uninteresting. If we return nonzero, we have 1260 * released the lock and the system call should return. 1261 */ 1262 static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, 1263 int noreap, struct siginfo __user *infop, 1264 int __user *stat_addr, struct rusage __user *ru) 1265 { 1266 int retval, exit_code; 1267 1268 if (!p->exit_code) 1269 return 0; 1270 if (delayed_group_leader && !(p->ptrace & PT_PTRACED) && 1271 p->signal && p->signal->group_stop_count > 0) 1272 /* 1273 * A group stop is in progress and this is the group leader. 1274 * We won't report until all threads have stopped. 1275 */ 1276 return 0; 1277 1278 /* 1279 * Now we are pretty sure this task is interesting. 1280 * Make sure it doesn't get reaped out from under us while we 1281 * give up the lock and then examine it below. We don't want to 1282 * keep holding onto the tasklist_lock while we call getrusage and 1283 * possibly take page faults for user memory. 1284 */ 1285 get_task_struct(p); 1286 read_unlock(&tasklist_lock); 1287 1288 if (unlikely(noreap)) { 1289 pid_t pid = p->pid; 1290 uid_t uid = p->uid; 1291 int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED; 1292 1293 exit_code = p->exit_code; 1294 if (unlikely(!exit_code) || 1295 unlikely(p->state & TASK_TRACED)) 1296 goto bail_ref; 1297 return wait_noreap_copyout(p, pid, uid, 1298 why, (exit_code << 8) | 0x7f, 1299 infop, ru); 1300 } 1301 1302 write_lock_irq(&tasklist_lock); 1303 1304 /* 1305 * This uses xchg to be atomic with the thread resuming and setting 1306 * it. It must also be done with the write lock held to prevent a 1307 * race with the EXIT_ZOMBIE case. 1308 */ 1309 exit_code = xchg(&p->exit_code, 0); 1310 if (unlikely(p->exit_state)) { 1311 /* 1312 * The task resumed and then died. Let the next iteration 1313 * catch it in EXIT_ZOMBIE. Note that exit_code might 1314 * already be zero here if it resumed and did _exit(0). 1315 * The task itself is dead and won't touch exit_code again; 1316 * other processors in this function are locked out. 1317 */ 1318 p->exit_code = exit_code; 1319 exit_code = 0; 1320 } 1321 if (unlikely(exit_code == 0)) { 1322 /* 1323 * Another thread in this function got to it first, or it 1324 * resumed, or it resumed and then died. 1325 */ 1326 write_unlock_irq(&tasklist_lock); 1327 bail_ref: 1328 put_task_struct(p); 1329 /* 1330 * We are returning to the wait loop without having successfully 1331 * removed the process and having released the lock. We cannot 1332 * continue, since the "p" task pointer is potentially stale. 1333 * 1334 * Return -EAGAIN, and do_wait() will restart the loop from the 1335 * beginning. Do _not_ re-acquire the lock. 1336 */ 1337 return -EAGAIN; 1338 } 1339 1340 /* move to end of parent's list to avoid starvation */ 1341 remove_parent(p); 1342 add_parent(p); 1343 1344 write_unlock_irq(&tasklist_lock); 1345 1346 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; 1347 if (!retval && stat_addr) 1348 retval = put_user((exit_code << 8) | 0x7f, stat_addr); 1349 if (!retval && infop) 1350 retval = put_user(SIGCHLD, &infop->si_signo); 1351 if (!retval && infop) 1352 retval = put_user(0, &infop->si_errno); 1353 if (!retval && infop) 1354 retval = put_user((short)((p->ptrace & PT_PTRACED) 1355 ? CLD_TRAPPED : CLD_STOPPED), 1356 &infop->si_code); 1357 if (!retval && infop) 1358 retval = put_user(exit_code, &infop->si_status); 1359 if (!retval && infop) 1360 retval = put_user(p->pid, &infop->si_pid); 1361 if (!retval && infop) 1362 retval = put_user(p->uid, &infop->si_uid); 1363 if (!retval) 1364 retval = p->pid; 1365 put_task_struct(p); 1366 1367 BUG_ON(!retval); 1368 return retval; 1369 } 1370 1371 /* 1372 * Handle do_wait work for one task in a live, non-stopped state. 1373 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1374 * the lock and this task is uninteresting. If we return nonzero, we have 1375 * released the lock and the system call should return. 1376 */ 1377 static int wait_task_continued(struct task_struct *p, int noreap, 1378 struct siginfo __user *infop, 1379 int __user *stat_addr, struct rusage __user *ru) 1380 { 1381 int retval; 1382 pid_t pid; 1383 uid_t uid; 1384 1385 if (unlikely(!p->signal)) 1386 return 0; 1387 1388 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) 1389 return 0; 1390 1391 spin_lock_irq(&p->sighand->siglock); 1392 /* Re-check with the lock held. */ 1393 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { 1394 spin_unlock_irq(&p->sighand->siglock); 1395 return 0; 1396 } 1397 if (!noreap) 1398 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; 1399 spin_unlock_irq(&p->sighand->siglock); 1400 1401 pid = p->pid; 1402 uid = p->uid; 1403 get_task_struct(p); 1404 read_unlock(&tasklist_lock); 1405 1406 if (!infop) { 1407 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; 1408 put_task_struct(p); 1409 if (!retval && stat_addr) 1410 retval = put_user(0xffff, stat_addr); 1411 if (!retval) 1412 retval = p->pid; 1413 } else { 1414 retval = wait_noreap_copyout(p, pid, uid, 1415 CLD_CONTINUED, SIGCONT, 1416 infop, ru); 1417 BUG_ON(retval == 0); 1418 } 1419 1420 return retval; 1421 } 1422 1423 1424 static inline int my_ptrace_child(struct task_struct *p) 1425 { 1426 if (!(p->ptrace & PT_PTRACED)) 1427 return 0; 1428 if (!(p->ptrace & PT_ATTACHED)) 1429 return 1; 1430 /* 1431 * This child was PTRACE_ATTACH'd. We should be seeing it only if 1432 * we are the attacher. If we are the real parent, this is a race 1433 * inside ptrace_attach. It is waiting for the tasklist_lock, 1434 * which we have to switch the parent links, but has already set 1435 * the flags in p->ptrace. 1436 */ 1437 return (p->parent != p->real_parent); 1438 } 1439 1440 static long do_wait(pid_t pid, int options, struct siginfo __user *infop, 1441 int __user *stat_addr, struct rusage __user *ru) 1442 { 1443 DECLARE_WAITQUEUE(wait, current); 1444 struct task_struct *tsk; 1445 int flag, retval; 1446 1447 add_wait_queue(¤t->signal->wait_chldexit,&wait); 1448 repeat: 1449 /* 1450 * We will set this flag if we see any child that might later 1451 * match our criteria, even if we are not able to reap it yet. 1452 */ 1453 flag = 0; 1454 current->state = TASK_INTERRUPTIBLE; 1455 read_lock(&tasklist_lock); 1456 tsk = current; 1457 do { 1458 struct task_struct *p; 1459 struct list_head *_p; 1460 int ret; 1461 1462 list_for_each(_p,&tsk->children) { 1463 p = list_entry(_p, struct task_struct, sibling); 1464 1465 ret = eligible_child(pid, options, p); 1466 if (!ret) 1467 continue; 1468 1469 switch (p->state) { 1470 case TASK_TRACED: 1471 /* 1472 * When we hit the race with PTRACE_ATTACH, 1473 * we will not report this child. But the 1474 * race means it has not yet been moved to 1475 * our ptrace_children list, so we need to 1476 * set the flag here to avoid a spurious ECHILD 1477 * when the race happens with the only child. 1478 */ 1479 flag = 1; 1480 if (!my_ptrace_child(p)) 1481 continue; 1482 /*FALLTHROUGH*/ 1483 case TASK_STOPPED: 1484 /* 1485 * It's stopped now, so it might later 1486 * continue, exit, or stop again. 1487 */ 1488 flag = 1; 1489 if (!(options & WUNTRACED) && 1490 !my_ptrace_child(p)) 1491 continue; 1492 retval = wait_task_stopped(p, ret == 2, 1493 (options & WNOWAIT), 1494 infop, 1495 stat_addr, ru); 1496 if (retval == -EAGAIN) 1497 goto repeat; 1498 if (retval != 0) /* He released the lock. */ 1499 goto end; 1500 break; 1501 default: 1502 // case EXIT_DEAD: 1503 if (p->exit_state == EXIT_DEAD) 1504 continue; 1505 // case EXIT_ZOMBIE: 1506 if (p->exit_state == EXIT_ZOMBIE) { 1507 /* 1508 * Eligible but we cannot release 1509 * it yet: 1510 */ 1511 if (ret == 2) 1512 goto check_continued; 1513 if (!likely(options & WEXITED)) 1514 continue; 1515 retval = wait_task_zombie( 1516 p, (options & WNOWAIT), 1517 infop, stat_addr, ru); 1518 /* He released the lock. */ 1519 if (retval != 0) 1520 goto end; 1521 break; 1522 } 1523 check_continued: 1524 /* 1525 * It's running now, so it might later 1526 * exit, stop, or stop and then continue. 1527 */ 1528 flag = 1; 1529 if (!unlikely(options & WCONTINUED)) 1530 continue; 1531 retval = wait_task_continued( 1532 p, (options & WNOWAIT), 1533 infop, stat_addr, ru); 1534 if (retval != 0) /* He released the lock. */ 1535 goto end; 1536 break; 1537 } 1538 } 1539 if (!flag) { 1540 list_for_each(_p, &tsk->ptrace_children) { 1541 p = list_entry(_p, struct task_struct, 1542 ptrace_list); 1543 if (!eligible_child(pid, options, p)) 1544 continue; 1545 flag = 1; 1546 break; 1547 } 1548 } 1549 if (options & __WNOTHREAD) 1550 break; 1551 tsk = next_thread(tsk); 1552 BUG_ON(tsk->signal != current->signal); 1553 } while (tsk != current); 1554 1555 read_unlock(&tasklist_lock); 1556 if (flag) { 1557 retval = 0; 1558 if (options & WNOHANG) 1559 goto end; 1560 retval = -ERESTARTSYS; 1561 if (signal_pending(current)) 1562 goto end; 1563 schedule(); 1564 goto repeat; 1565 } 1566 retval = -ECHILD; 1567 end: 1568 current->state = TASK_RUNNING; 1569 remove_wait_queue(¤t->signal->wait_chldexit,&wait); 1570 if (infop) { 1571 if (retval > 0) 1572 retval = 0; 1573 else { 1574 /* 1575 * For a WNOHANG return, clear out all the fields 1576 * we would set so the user can easily tell the 1577 * difference. 1578 */ 1579 if (!retval) 1580 retval = put_user(0, &infop->si_signo); 1581 if (!retval) 1582 retval = put_user(0, &infop->si_errno); 1583 if (!retval) 1584 retval = put_user(0, &infop->si_code); 1585 if (!retval) 1586 retval = put_user(0, &infop->si_pid); 1587 if (!retval) 1588 retval = put_user(0, &infop->si_uid); 1589 if (!retval) 1590 retval = put_user(0, &infop->si_status); 1591 } 1592 } 1593 return retval; 1594 } 1595 1596 asmlinkage long sys_waitid(int which, pid_t pid, 1597 struct siginfo __user *infop, int options, 1598 struct rusage __user *ru) 1599 { 1600 long ret; 1601 1602 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED)) 1603 return -EINVAL; 1604 if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) 1605 return -EINVAL; 1606 1607 switch (which) { 1608 case P_ALL: 1609 pid = -1; 1610 break; 1611 case P_PID: 1612 if (pid <= 0) 1613 return -EINVAL; 1614 break; 1615 case P_PGID: 1616 if (pid <= 0) 1617 return -EINVAL; 1618 pid = -pid; 1619 break; 1620 default: 1621 return -EINVAL; 1622 } 1623 1624 ret = do_wait(pid, options, infop, NULL, ru); 1625 1626 /* avoid REGPARM breakage on x86: */ 1627 prevent_tail_call(ret); 1628 return ret; 1629 } 1630 1631 asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr, 1632 int options, struct rusage __user *ru) 1633 { 1634 long ret; 1635 1636 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| 1637 __WNOTHREAD|__WCLONE|__WALL)) 1638 return -EINVAL; 1639 ret = do_wait(pid, options | WEXITED, NULL, stat_addr, ru); 1640 1641 /* avoid REGPARM breakage on x86: */ 1642 prevent_tail_call(ret); 1643 return ret; 1644 } 1645 1646 #ifdef __ARCH_WANT_SYS_WAITPID 1647 1648 /* 1649 * sys_waitpid() remains for compatibility. waitpid() should be 1650 * implemented by calling sys_wait4() from libc.a. 1651 */ 1652 asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options) 1653 { 1654 return sys_wait4(pid, stat_addr, options, NULL); 1655 } 1656 1657 #endif 1658