1 /* 2 * linux/kernel/exit.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 #include <linux/mm.h> 8 #include <linux/slab.h> 9 #include <linux/interrupt.h> 10 #include <linux/module.h> 11 #include <linux/capability.h> 12 #include <linux/completion.h> 13 #include <linux/personality.h> 14 #include <linux/tty.h> 15 #include <linux/iocontext.h> 16 #include <linux/key.h> 17 #include <linux/security.h> 18 #include <linux/cpu.h> 19 #include <linux/acct.h> 20 #include <linux/tsacct_kern.h> 21 #include <linux/file.h> 22 #include <linux/fdtable.h> 23 #include <linux/binfmts.h> 24 #include <linux/nsproxy.h> 25 #include <linux/pid_namespace.h> 26 #include <linux/ptrace.h> 27 #include <linux/profile.h> 28 #include <linux/mount.h> 29 #include <linux/proc_fs.h> 30 #include <linux/kthread.h> 31 #include <linux/mempolicy.h> 32 #include <linux/taskstats_kern.h> 33 #include <linux/delayacct.h> 34 #include <linux/freezer.h> 35 #include <linux/cgroup.h> 36 #include <linux/syscalls.h> 37 #include <linux/signal.h> 38 #include <linux/posix-timers.h> 39 #include <linux/cn_proc.h> 40 #include <linux/mutex.h> 41 #include <linux/futex.h> 42 #include <linux/pipe_fs_i.h> 43 #include <linux/audit.h> /* for audit_free() */ 44 #include <linux/resource.h> 45 #include <linux/blkdev.h> 46 #include <linux/task_io_accounting_ops.h> 47 #include <linux/tracehook.h> 48 #include <linux/fs_struct.h> 49 #include <linux/init_task.h> 50 #include <linux/perf_event.h> 51 #include <trace/events/sched.h> 52 #include <linux/hw_breakpoint.h> 53 #include <linux/oom.h> 54 55 #include <asm/uaccess.h> 56 #include <asm/unistd.h> 57 #include <asm/pgtable.h> 58 #include <asm/mmu_context.h> 59 60 static void exit_mm(struct task_struct * tsk); 61 62 static void __unhash_process(struct task_struct *p, bool group_dead) 63 { 64 nr_threads--; 65 detach_pid(p, PIDTYPE_PID); 66 if (group_dead) { 67 detach_pid(p, PIDTYPE_PGID); 68 detach_pid(p, PIDTYPE_SID); 69 70 list_del_rcu(&p->tasks); 71 list_del_init(&p->sibling); 72 __this_cpu_dec(process_counts); 73 } 74 list_del_rcu(&p->thread_group); 75 } 76 77 /* 78 * This function expects the tasklist_lock write-locked. 79 */ 80 static void __exit_signal(struct task_struct *tsk) 81 { 82 struct signal_struct *sig = tsk->signal; 83 bool group_dead = thread_group_leader(tsk); 84 struct sighand_struct *sighand; 85 struct tty_struct *uninitialized_var(tty); 86 87 sighand = rcu_dereference_check(tsk->sighand, 88 rcu_read_lock_held() || 89 lockdep_tasklist_lock_is_held()); 90 spin_lock(&sighand->siglock); 91 92 posix_cpu_timers_exit(tsk); 93 if (group_dead) { 94 posix_cpu_timers_exit_group(tsk); 95 tty = sig->tty; 96 sig->tty = NULL; 97 } else { 98 /* 99 * This can only happen if the caller is de_thread(). 100 * FIXME: this is the temporary hack, we should teach 101 * posix-cpu-timers to handle this case correctly. 102 */ 103 if (unlikely(has_group_leader_pid(tsk))) 104 posix_cpu_timers_exit_group(tsk); 105 106 /* 107 * If there is any task waiting for the group exit 108 * then notify it: 109 */ 110 if (sig->notify_count > 0 && !--sig->notify_count) 111 wake_up_process(sig->group_exit_task); 112 113 if (tsk == sig->curr_target) 114 sig->curr_target = next_thread(tsk); 115 /* 116 * Accumulate here the counters for all threads but the 117 * group leader as they die, so they can be added into 118 * the process-wide totals when those are taken. 119 * The group leader stays around as a zombie as long 120 * as there are other threads. When it gets reaped, 121 * the exit.c code will add its counts into these totals. 122 * We won't ever get here for the group leader, since it 123 * will have been the last reference on the signal_struct. 124 */ 125 sig->utime = cputime_add(sig->utime, tsk->utime); 126 sig->stime = cputime_add(sig->stime, tsk->stime); 127 sig->gtime = cputime_add(sig->gtime, tsk->gtime); 128 sig->min_flt += tsk->min_flt; 129 sig->maj_flt += tsk->maj_flt; 130 sig->nvcsw += tsk->nvcsw; 131 sig->nivcsw += tsk->nivcsw; 132 sig->inblock += task_io_get_inblock(tsk); 133 sig->oublock += task_io_get_oublock(tsk); 134 task_io_accounting_add(&sig->ioac, &tsk->ioac); 135 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; 136 } 137 138 sig->nr_threads--; 139 __unhash_process(tsk, group_dead); 140 141 /* 142 * Do this under ->siglock, we can race with another thread 143 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. 144 */ 145 flush_sigqueue(&tsk->pending); 146 tsk->sighand = NULL; 147 spin_unlock(&sighand->siglock); 148 149 __cleanup_sighand(sighand); 150 clear_tsk_thread_flag(tsk,TIF_SIGPENDING); 151 if (group_dead) { 152 flush_sigqueue(&sig->shared_pending); 153 tty_kref_put(tty); 154 } 155 } 156 157 static void delayed_put_task_struct(struct rcu_head *rhp) 158 { 159 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 160 161 perf_event_delayed_put(tsk); 162 trace_sched_process_free(tsk); 163 put_task_struct(tsk); 164 } 165 166 167 void release_task(struct task_struct * p) 168 { 169 struct task_struct *leader; 170 int zap_leader; 171 repeat: 172 /* don't need to get the RCU readlock here - the process is dead and 173 * can't be modifying its own credentials. But shut RCU-lockdep up */ 174 rcu_read_lock(); 175 atomic_dec(&__task_cred(p)->user->processes); 176 rcu_read_unlock(); 177 178 proc_flush_task(p); 179 180 write_lock_irq(&tasklist_lock); 181 ptrace_release_task(p); 182 __exit_signal(p); 183 184 /* 185 * If we are the last non-leader member of the thread 186 * group, and the leader is zombie, then notify the 187 * group leader's parent process. (if it wants notification.) 188 */ 189 zap_leader = 0; 190 leader = p->group_leader; 191 if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { 192 /* 193 * If we were the last child thread and the leader has 194 * exited already, and the leader's parent ignores SIGCHLD, 195 * then we are the one who should release the leader. 196 */ 197 zap_leader = do_notify_parent(leader, leader->exit_signal); 198 if (zap_leader) 199 leader->exit_state = EXIT_DEAD; 200 } 201 202 write_unlock_irq(&tasklist_lock); 203 release_thread(p); 204 call_rcu(&p->rcu, delayed_put_task_struct); 205 206 p = leader; 207 if (unlikely(zap_leader)) 208 goto repeat; 209 } 210 211 /* 212 * This checks not only the pgrp, but falls back on the pid if no 213 * satisfactory pgrp is found. I dunno - gdb doesn't work correctly 214 * without this... 215 * 216 * The caller must hold rcu lock or the tasklist lock. 217 */ 218 struct pid *session_of_pgrp(struct pid *pgrp) 219 { 220 struct task_struct *p; 221 struct pid *sid = NULL; 222 223 p = pid_task(pgrp, PIDTYPE_PGID); 224 if (p == NULL) 225 p = pid_task(pgrp, PIDTYPE_PID); 226 if (p != NULL) 227 sid = task_session(p); 228 229 return sid; 230 } 231 232 /* 233 * Determine if a process group is "orphaned", according to the POSIX 234 * definition in 2.2.2.52. Orphaned process groups are not to be affected 235 * by terminal-generated stop signals. Newly orphaned process groups are 236 * to receive a SIGHUP and a SIGCONT. 237 * 238 * "I ask you, have you ever known what it is to be an orphan?" 239 */ 240 static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task) 241 { 242 struct task_struct *p; 243 244 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 245 if ((p == ignored_task) || 246 (p->exit_state && thread_group_empty(p)) || 247 is_global_init(p->real_parent)) 248 continue; 249 250 if (task_pgrp(p->real_parent) != pgrp && 251 task_session(p->real_parent) == task_session(p)) 252 return 0; 253 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 254 255 return 1; 256 } 257 258 int is_current_pgrp_orphaned(void) 259 { 260 int retval; 261 262 read_lock(&tasklist_lock); 263 retval = will_become_orphaned_pgrp(task_pgrp(current), NULL); 264 read_unlock(&tasklist_lock); 265 266 return retval; 267 } 268 269 static bool has_stopped_jobs(struct pid *pgrp) 270 { 271 struct task_struct *p; 272 273 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 274 if (p->signal->flags & SIGNAL_STOP_STOPPED) 275 return true; 276 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 277 278 return false; 279 } 280 281 /* 282 * Check to see if any process groups have become orphaned as 283 * a result of our exiting, and if they have any stopped jobs, 284 * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 285 */ 286 static void 287 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) 288 { 289 struct pid *pgrp = task_pgrp(tsk); 290 struct task_struct *ignored_task = tsk; 291 292 if (!parent) 293 /* exit: our father is in a different pgrp than 294 * we are and we were the only connection outside. 295 */ 296 parent = tsk->real_parent; 297 else 298 /* reparent: our child is in a different pgrp than 299 * we are, and it was the only connection outside. 300 */ 301 ignored_task = NULL; 302 303 if (task_pgrp(parent) != pgrp && 304 task_session(parent) == task_session(tsk) && 305 will_become_orphaned_pgrp(pgrp, ignored_task) && 306 has_stopped_jobs(pgrp)) { 307 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); 308 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); 309 } 310 } 311 312 /** 313 * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd 314 * 315 * If a kernel thread is launched as a result of a system call, or if 316 * it ever exits, it should generally reparent itself to kthreadd so it 317 * isn't in the way of other processes and is correctly cleaned up on exit. 318 * 319 * The various task state such as scheduling policy and priority may have 320 * been inherited from a user process, so we reset them to sane values here. 321 * 322 * NOTE that reparent_to_kthreadd() gives the caller full capabilities. 323 */ 324 static void reparent_to_kthreadd(void) 325 { 326 write_lock_irq(&tasklist_lock); 327 328 ptrace_unlink(current); 329 /* Reparent to init */ 330 current->real_parent = current->parent = kthreadd_task; 331 list_move_tail(¤t->sibling, ¤t->real_parent->children); 332 333 /* Set the exit signal to SIGCHLD so we signal init on exit */ 334 current->exit_signal = SIGCHLD; 335 336 if (task_nice(current) < 0) 337 set_user_nice(current, 0); 338 /* cpus_allowed? */ 339 /* rt_priority? */ 340 /* signals? */ 341 memcpy(current->signal->rlim, init_task.signal->rlim, 342 sizeof(current->signal->rlim)); 343 344 atomic_inc(&init_cred.usage); 345 commit_creds(&init_cred); 346 write_unlock_irq(&tasklist_lock); 347 } 348 349 void __set_special_pids(struct pid *pid) 350 { 351 struct task_struct *curr = current->group_leader; 352 353 if (task_session(curr) != pid) 354 change_pid(curr, PIDTYPE_SID, pid); 355 356 if (task_pgrp(curr) != pid) 357 change_pid(curr, PIDTYPE_PGID, pid); 358 } 359 360 static void set_special_pids(struct pid *pid) 361 { 362 write_lock_irq(&tasklist_lock); 363 __set_special_pids(pid); 364 write_unlock_irq(&tasklist_lock); 365 } 366 367 /* 368 * Let kernel threads use this to say that they allow a certain signal. 369 * Must not be used if kthread was cloned with CLONE_SIGHAND. 370 */ 371 int allow_signal(int sig) 372 { 373 if (!valid_signal(sig) || sig < 1) 374 return -EINVAL; 375 376 spin_lock_irq(¤t->sighand->siglock); 377 /* This is only needed for daemonize()'ed kthreads */ 378 sigdelset(¤t->blocked, sig); 379 /* 380 * Kernel threads handle their own signals. Let the signal code 381 * know it'll be handled, so that they don't get converted to 382 * SIGKILL or just silently dropped. 383 */ 384 current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; 385 recalc_sigpending(); 386 spin_unlock_irq(¤t->sighand->siglock); 387 return 0; 388 } 389 390 EXPORT_SYMBOL(allow_signal); 391 392 int disallow_signal(int sig) 393 { 394 if (!valid_signal(sig) || sig < 1) 395 return -EINVAL; 396 397 spin_lock_irq(¤t->sighand->siglock); 398 current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN; 399 recalc_sigpending(); 400 spin_unlock_irq(¤t->sighand->siglock); 401 return 0; 402 } 403 404 EXPORT_SYMBOL(disallow_signal); 405 406 /* 407 * Put all the gunge required to become a kernel thread without 408 * attached user resources in one place where it belongs. 409 */ 410 411 void daemonize(const char *name, ...) 412 { 413 va_list args; 414 sigset_t blocked; 415 416 va_start(args, name); 417 vsnprintf(current->comm, sizeof(current->comm), name, args); 418 va_end(args); 419 420 /* 421 * If we were started as result of loading a module, close all of the 422 * user space pages. We don't need them, and if we didn't close them 423 * they would be locked into memory. 424 */ 425 exit_mm(current); 426 /* 427 * We don't want to have TIF_FREEZE set if the system-wide hibernation 428 * or suspend transition begins right now. 429 */ 430 current->flags |= (PF_NOFREEZE | PF_KTHREAD); 431 432 if (current->nsproxy != &init_nsproxy) { 433 get_nsproxy(&init_nsproxy); 434 switch_task_namespaces(current, &init_nsproxy); 435 } 436 set_special_pids(&init_struct_pid); 437 proc_clear_tty(current); 438 439 /* Block and flush all signals */ 440 sigfillset(&blocked); 441 sigprocmask(SIG_BLOCK, &blocked, NULL); 442 flush_signals(current); 443 444 /* Become as one with the init task */ 445 446 daemonize_fs_struct(); 447 exit_files(current); 448 current->files = init_task.files; 449 atomic_inc(¤t->files->count); 450 451 reparent_to_kthreadd(); 452 } 453 454 EXPORT_SYMBOL(daemonize); 455 456 static void close_files(struct files_struct * files) 457 { 458 int i, j; 459 struct fdtable *fdt; 460 461 j = 0; 462 463 /* 464 * It is safe to dereference the fd table without RCU or 465 * ->file_lock because this is the last reference to the 466 * files structure. But use RCU to shut RCU-lockdep up. 467 */ 468 rcu_read_lock(); 469 fdt = files_fdtable(files); 470 rcu_read_unlock(); 471 for (;;) { 472 unsigned long set; 473 i = j * __NFDBITS; 474 if (i >= fdt->max_fds) 475 break; 476 set = fdt->open_fds->fds_bits[j++]; 477 while (set) { 478 if (set & 1) { 479 struct file * file = xchg(&fdt->fd[i], NULL); 480 if (file) { 481 filp_close(file, files); 482 cond_resched(); 483 } 484 } 485 i++; 486 set >>= 1; 487 } 488 } 489 } 490 491 struct files_struct *get_files_struct(struct task_struct *task) 492 { 493 struct files_struct *files; 494 495 task_lock(task); 496 files = task->files; 497 if (files) 498 atomic_inc(&files->count); 499 task_unlock(task); 500 501 return files; 502 } 503 504 void put_files_struct(struct files_struct *files) 505 { 506 struct fdtable *fdt; 507 508 if (atomic_dec_and_test(&files->count)) { 509 close_files(files); 510 /* 511 * Free the fd and fdset arrays if we expanded them. 512 * If the fdtable was embedded, pass files for freeing 513 * at the end of the RCU grace period. Otherwise, 514 * you can free files immediately. 515 */ 516 rcu_read_lock(); 517 fdt = files_fdtable(files); 518 if (fdt != &files->fdtab) 519 kmem_cache_free(files_cachep, files); 520 free_fdtable(fdt); 521 rcu_read_unlock(); 522 } 523 } 524 525 void reset_files_struct(struct files_struct *files) 526 { 527 struct task_struct *tsk = current; 528 struct files_struct *old; 529 530 old = tsk->files; 531 task_lock(tsk); 532 tsk->files = files; 533 task_unlock(tsk); 534 put_files_struct(old); 535 } 536 537 void exit_files(struct task_struct *tsk) 538 { 539 struct files_struct * files = tsk->files; 540 541 if (files) { 542 task_lock(tsk); 543 tsk->files = NULL; 544 task_unlock(tsk); 545 put_files_struct(files); 546 } 547 } 548 549 #ifdef CONFIG_MM_OWNER 550 /* 551 * A task is exiting. If it owned this mm, find a new owner for the mm. 552 */ 553 void mm_update_next_owner(struct mm_struct *mm) 554 { 555 struct task_struct *c, *g, *p = current; 556 557 retry: 558 /* 559 * If the exiting or execing task is not the owner, it's 560 * someone else's problem. 561 */ 562 if (mm->owner != p) 563 return; 564 /* 565 * The current owner is exiting/execing and there are no other 566 * candidates. Do not leave the mm pointing to a possibly 567 * freed task structure. 568 */ 569 if (atomic_read(&mm->mm_users) <= 1) { 570 mm->owner = NULL; 571 return; 572 } 573 574 read_lock(&tasklist_lock); 575 /* 576 * Search in the children 577 */ 578 list_for_each_entry(c, &p->children, sibling) { 579 if (c->mm == mm) 580 goto assign_new_owner; 581 } 582 583 /* 584 * Search in the siblings 585 */ 586 list_for_each_entry(c, &p->real_parent->children, sibling) { 587 if (c->mm == mm) 588 goto assign_new_owner; 589 } 590 591 /* 592 * Search through everything else. We should not get 593 * here often 594 */ 595 do_each_thread(g, c) { 596 if (c->mm == mm) 597 goto assign_new_owner; 598 } while_each_thread(g, c); 599 600 read_unlock(&tasklist_lock); 601 /* 602 * We found no owner yet mm_users > 1: this implies that we are 603 * most likely racing with swapoff (try_to_unuse()) or /proc or 604 * ptrace or page migration (get_task_mm()). Mark owner as NULL. 605 */ 606 mm->owner = NULL; 607 return; 608 609 assign_new_owner: 610 BUG_ON(c == p); 611 get_task_struct(c); 612 /* 613 * The task_lock protects c->mm from changing. 614 * We always want mm->owner->mm == mm 615 */ 616 task_lock(c); 617 /* 618 * Delay read_unlock() till we have the task_lock() 619 * to ensure that c does not slip away underneath us 620 */ 621 read_unlock(&tasklist_lock); 622 if (c->mm != mm) { 623 task_unlock(c); 624 put_task_struct(c); 625 goto retry; 626 } 627 mm->owner = c; 628 task_unlock(c); 629 put_task_struct(c); 630 } 631 #endif /* CONFIG_MM_OWNER */ 632 633 /* 634 * Turn us into a lazy TLB process if we 635 * aren't already.. 636 */ 637 static void exit_mm(struct task_struct * tsk) 638 { 639 struct mm_struct *mm = tsk->mm; 640 struct core_state *core_state; 641 642 mm_release(tsk, mm); 643 if (!mm) 644 return; 645 /* 646 * Serialize with any possible pending coredump. 647 * We must hold mmap_sem around checking core_state 648 * and clearing tsk->mm. The core-inducing thread 649 * will increment ->nr_threads for each thread in the 650 * group with ->mm != NULL. 651 */ 652 down_read(&mm->mmap_sem); 653 core_state = mm->core_state; 654 if (core_state) { 655 struct core_thread self; 656 up_read(&mm->mmap_sem); 657 658 self.task = tsk; 659 self.next = xchg(&core_state->dumper.next, &self); 660 /* 661 * Implies mb(), the result of xchg() must be visible 662 * to core_state->dumper. 663 */ 664 if (atomic_dec_and_test(&core_state->nr_threads)) 665 complete(&core_state->startup); 666 667 for (;;) { 668 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 669 if (!self.task) /* see coredump_finish() */ 670 break; 671 schedule(); 672 } 673 __set_task_state(tsk, TASK_RUNNING); 674 down_read(&mm->mmap_sem); 675 } 676 atomic_inc(&mm->mm_count); 677 BUG_ON(mm != tsk->active_mm); 678 /* more a memory barrier than a real lock */ 679 task_lock(tsk); 680 tsk->mm = NULL; 681 up_read(&mm->mmap_sem); 682 enter_lazy_tlb(mm, current); 683 /* We don't want this task to be frozen prematurely */ 684 clear_freeze_flag(tsk); 685 if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) 686 atomic_dec(&mm->oom_disable_count); 687 task_unlock(tsk); 688 mm_update_next_owner(mm); 689 mmput(mm); 690 } 691 692 /* 693 * When we die, we re-parent all our children. 694 * Try to give them to another thread in our thread 695 * group, and if no such member exists, give it to 696 * the child reaper process (ie "init") in our pid 697 * space. 698 */ 699 static struct task_struct *find_new_reaper(struct task_struct *father) 700 __releases(&tasklist_lock) 701 __acquires(&tasklist_lock) 702 { 703 struct pid_namespace *pid_ns = task_active_pid_ns(father); 704 struct task_struct *thread; 705 706 thread = father; 707 while_each_thread(father, thread) { 708 if (thread->flags & PF_EXITING) 709 continue; 710 if (unlikely(pid_ns->child_reaper == father)) 711 pid_ns->child_reaper = thread; 712 return thread; 713 } 714 715 if (unlikely(pid_ns->child_reaper == father)) { 716 write_unlock_irq(&tasklist_lock); 717 if (unlikely(pid_ns == &init_pid_ns)) 718 panic("Attempted to kill init!"); 719 720 zap_pid_ns_processes(pid_ns); 721 write_lock_irq(&tasklist_lock); 722 /* 723 * We can not clear ->child_reaper or leave it alone. 724 * There may by stealth EXIT_DEAD tasks on ->children, 725 * forget_original_parent() must move them somewhere. 726 */ 727 pid_ns->child_reaper = init_pid_ns.child_reaper; 728 } 729 730 return pid_ns->child_reaper; 731 } 732 733 /* 734 * Any that need to be release_task'd are put on the @dead list. 735 */ 736 static void reparent_leader(struct task_struct *father, struct task_struct *p, 737 struct list_head *dead) 738 { 739 list_move_tail(&p->sibling, &p->real_parent->children); 740 741 if (p->exit_state == EXIT_DEAD) 742 return; 743 /* 744 * If this is a threaded reparent there is no need to 745 * notify anyone anything has happened. 746 */ 747 if (same_thread_group(p->real_parent, father)) 748 return; 749 750 /* We don't want people slaying init. */ 751 p->exit_signal = SIGCHLD; 752 753 /* If it has exited notify the new parent about this child's death. */ 754 if (!p->ptrace && 755 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { 756 if (do_notify_parent(p, p->exit_signal)) { 757 p->exit_state = EXIT_DEAD; 758 list_move_tail(&p->sibling, dead); 759 } 760 } 761 762 kill_orphaned_pgrp(p, father); 763 } 764 765 static void forget_original_parent(struct task_struct *father) 766 { 767 struct task_struct *p, *n, *reaper; 768 LIST_HEAD(dead_children); 769 770 write_lock_irq(&tasklist_lock); 771 /* 772 * Note that exit_ptrace() and find_new_reaper() might 773 * drop tasklist_lock and reacquire it. 774 */ 775 exit_ptrace(father); 776 reaper = find_new_reaper(father); 777 778 list_for_each_entry_safe(p, n, &father->children, sibling) { 779 struct task_struct *t = p; 780 do { 781 t->real_parent = reaper; 782 if (t->parent == father) { 783 BUG_ON(t->ptrace); 784 t->parent = t->real_parent; 785 } 786 if (t->pdeath_signal) 787 group_send_sig_info(t->pdeath_signal, 788 SEND_SIG_NOINFO, t); 789 } while_each_thread(p, t); 790 reparent_leader(father, p, &dead_children); 791 } 792 write_unlock_irq(&tasklist_lock); 793 794 BUG_ON(!list_empty(&father->children)); 795 796 list_for_each_entry_safe(p, n, &dead_children, sibling) { 797 list_del_init(&p->sibling); 798 release_task(p); 799 } 800 } 801 802 /* 803 * Send signals to all our closest relatives so that they know 804 * to properly mourn us.. 805 */ 806 static void exit_notify(struct task_struct *tsk, int group_dead) 807 { 808 bool autoreap; 809 810 /* 811 * This does two things: 812 * 813 * A. Make init inherit all the child processes 814 * B. Check to see if any process groups have become orphaned 815 * as a result of our exiting, and if they have any stopped 816 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 817 */ 818 forget_original_parent(tsk); 819 exit_task_namespaces(tsk); 820 821 write_lock_irq(&tasklist_lock); 822 if (group_dead) 823 kill_orphaned_pgrp(tsk->group_leader, NULL); 824 825 /* Let father know we died 826 * 827 * Thread signals are configurable, but you aren't going to use 828 * that to send signals to arbitrary processes. 829 * That stops right now. 830 * 831 * If the parent exec id doesn't match the exec id we saved 832 * when we started then we know the parent has changed security 833 * domain. 834 * 835 * If our self_exec id doesn't match our parent_exec_id then 836 * we have changed execution domain as these two values started 837 * the same after a fork. 838 */ 839 if (thread_group_leader(tsk) && tsk->exit_signal != SIGCHLD && 840 (tsk->parent_exec_id != tsk->real_parent->self_exec_id || 841 tsk->self_exec_id != tsk->parent_exec_id)) 842 tsk->exit_signal = SIGCHLD; 843 844 if (unlikely(tsk->ptrace)) { 845 int sig = thread_group_leader(tsk) && 846 thread_group_empty(tsk) && 847 !ptrace_reparented(tsk) ? 848 tsk->exit_signal : SIGCHLD; 849 autoreap = do_notify_parent(tsk, sig); 850 } else if (thread_group_leader(tsk)) { 851 autoreap = thread_group_empty(tsk) && 852 do_notify_parent(tsk, tsk->exit_signal); 853 } else { 854 autoreap = true; 855 } 856 857 tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE; 858 859 /* mt-exec, de_thread() is waiting for group leader */ 860 if (unlikely(tsk->signal->notify_count < 0)) 861 wake_up_process(tsk->signal->group_exit_task); 862 write_unlock_irq(&tasklist_lock); 863 864 /* If the process is dead, release it - nobody will wait for it */ 865 if (autoreap) 866 release_task(tsk); 867 } 868 869 #ifdef CONFIG_DEBUG_STACK_USAGE 870 static void check_stack_usage(void) 871 { 872 static DEFINE_SPINLOCK(low_water_lock); 873 static int lowest_to_date = THREAD_SIZE; 874 unsigned long free; 875 876 free = stack_not_used(current); 877 878 if (free >= lowest_to_date) 879 return; 880 881 spin_lock(&low_water_lock); 882 if (free < lowest_to_date) { 883 printk(KERN_WARNING "%s used greatest stack depth: %lu bytes " 884 "left\n", 885 current->comm, free); 886 lowest_to_date = free; 887 } 888 spin_unlock(&low_water_lock); 889 } 890 #else 891 static inline void check_stack_usage(void) {} 892 #endif 893 894 NORET_TYPE void do_exit(long code) 895 { 896 struct task_struct *tsk = current; 897 int group_dead; 898 899 profile_task_exit(tsk); 900 901 WARN_ON(atomic_read(&tsk->fs_excl)); 902 WARN_ON(blk_needs_flush_plug(tsk)); 903 904 if (unlikely(in_interrupt())) 905 panic("Aiee, killing interrupt handler!"); 906 if (unlikely(!tsk->pid)) 907 panic("Attempted to kill the idle task!"); 908 909 /* 910 * If do_exit is called because this processes oopsed, it's possible 911 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before 912 * continuing. Amongst other possible reasons, this is to prevent 913 * mm_release()->clear_child_tid() from writing to a user-controlled 914 * kernel address. 915 */ 916 set_fs(USER_DS); 917 918 ptrace_event(PTRACE_EVENT_EXIT, code); 919 920 validate_creds_for_do_exit(tsk); 921 922 /* 923 * We're taking recursive faults here in do_exit. Safest is to just 924 * leave this task alone and wait for reboot. 925 */ 926 if (unlikely(tsk->flags & PF_EXITING)) { 927 printk(KERN_ALERT 928 "Fixing recursive fault but reboot is needed!\n"); 929 /* 930 * We can do this unlocked here. The futex code uses 931 * this flag just to verify whether the pi state 932 * cleanup has been done or not. In the worst case it 933 * loops once more. We pretend that the cleanup was 934 * done as there is no way to return. Either the 935 * OWNER_DIED bit is set by now or we push the blocked 936 * task into the wait for ever nirwana as well. 937 */ 938 tsk->flags |= PF_EXITPIDONE; 939 set_current_state(TASK_UNINTERRUPTIBLE); 940 schedule(); 941 } 942 943 exit_irq_thread(); 944 945 exit_signals(tsk); /* sets PF_EXITING */ 946 /* 947 * tsk->flags are checked in the futex code to protect against 948 * an exiting task cleaning up the robust pi futexes. 949 */ 950 smp_mb(); 951 raw_spin_unlock_wait(&tsk->pi_lock); 952 953 if (unlikely(in_atomic())) 954 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", 955 current->comm, task_pid_nr(current), 956 preempt_count()); 957 958 acct_update_integrals(tsk); 959 /* sync mm's RSS info before statistics gathering */ 960 if (tsk->mm) 961 sync_mm_rss(tsk, tsk->mm); 962 group_dead = atomic_dec_and_test(&tsk->signal->live); 963 if (group_dead) { 964 hrtimer_cancel(&tsk->signal->real_timer); 965 exit_itimers(tsk->signal); 966 if (tsk->mm) 967 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); 968 } 969 acct_collect(code, group_dead); 970 if (group_dead) 971 tty_audit_exit(); 972 if (unlikely(tsk->audit_context)) 973 audit_free(tsk); 974 975 tsk->exit_code = code; 976 taskstats_exit(tsk, group_dead); 977 978 exit_mm(tsk); 979 980 if (group_dead) 981 acct_process(); 982 trace_sched_process_exit(tsk); 983 984 exit_sem(tsk); 985 exit_files(tsk); 986 exit_fs(tsk); 987 check_stack_usage(); 988 exit_thread(); 989 990 /* 991 * Flush inherited counters to the parent - before the parent 992 * gets woken up by child-exit notifications. 993 * 994 * because of cgroup mode, must be called before cgroup_exit() 995 */ 996 perf_event_exit_task(tsk); 997 998 cgroup_exit(tsk, 1); 999 1000 if (group_dead) 1001 disassociate_ctty(1); 1002 1003 module_put(task_thread_info(tsk)->exec_domain->module); 1004 1005 proc_exit_connector(tsk); 1006 1007 /* 1008 * FIXME: do that only when needed, using sched_exit tracepoint 1009 */ 1010 ptrace_put_breakpoints(tsk); 1011 1012 exit_notify(tsk, group_dead); 1013 #ifdef CONFIG_NUMA 1014 task_lock(tsk); 1015 mpol_put(tsk->mempolicy); 1016 tsk->mempolicy = NULL; 1017 task_unlock(tsk); 1018 #endif 1019 #ifdef CONFIG_FUTEX 1020 if (unlikely(current->pi_state_cache)) 1021 kfree(current->pi_state_cache); 1022 #endif 1023 /* 1024 * Make sure we are holding no locks: 1025 */ 1026 debug_check_no_locks_held(tsk); 1027 /* 1028 * We can do this unlocked here. The futex code uses this flag 1029 * just to verify whether the pi state cleanup has been done 1030 * or not. In the worst case it loops once more. 1031 */ 1032 tsk->flags |= PF_EXITPIDONE; 1033 1034 if (tsk->io_context) 1035 exit_io_context(tsk); 1036 1037 if (tsk->splice_pipe) 1038 __free_pipe_info(tsk->splice_pipe); 1039 1040 validate_creds_for_do_exit(tsk); 1041 1042 preempt_disable(); 1043 exit_rcu(); 1044 /* causes final put_task_struct in finish_task_switch(). */ 1045 tsk->state = TASK_DEAD; 1046 schedule(); 1047 BUG(); 1048 /* Avoid "noreturn function does return". */ 1049 for (;;) 1050 cpu_relax(); /* For when BUG is null */ 1051 } 1052 1053 EXPORT_SYMBOL_GPL(do_exit); 1054 1055 NORET_TYPE void complete_and_exit(struct completion *comp, long code) 1056 { 1057 if (comp) 1058 complete(comp); 1059 1060 do_exit(code); 1061 } 1062 1063 EXPORT_SYMBOL(complete_and_exit); 1064 1065 SYSCALL_DEFINE1(exit, int, error_code) 1066 { 1067 do_exit((error_code&0xff)<<8); 1068 } 1069 1070 /* 1071 * Take down every thread in the group. This is called by fatal signals 1072 * as well as by sys_exit_group (below). 1073 */ 1074 NORET_TYPE void 1075 do_group_exit(int exit_code) 1076 { 1077 struct signal_struct *sig = current->signal; 1078 1079 BUG_ON(exit_code & 0x80); /* core dumps don't get here */ 1080 1081 if (signal_group_exit(sig)) 1082 exit_code = sig->group_exit_code; 1083 else if (!thread_group_empty(current)) { 1084 struct sighand_struct *const sighand = current->sighand; 1085 spin_lock_irq(&sighand->siglock); 1086 if (signal_group_exit(sig)) 1087 /* Another thread got here before we took the lock. */ 1088 exit_code = sig->group_exit_code; 1089 else { 1090 sig->group_exit_code = exit_code; 1091 sig->flags = SIGNAL_GROUP_EXIT; 1092 zap_other_threads(current); 1093 } 1094 spin_unlock_irq(&sighand->siglock); 1095 } 1096 1097 do_exit(exit_code); 1098 /* NOTREACHED */ 1099 } 1100 1101 /* 1102 * this kills every thread in the thread group. Note that any externally 1103 * wait4()-ing process will get the correct exit code - even if this 1104 * thread is not the thread group leader. 1105 */ 1106 SYSCALL_DEFINE1(exit_group, int, error_code) 1107 { 1108 do_group_exit((error_code & 0xff) << 8); 1109 /* NOTREACHED */ 1110 return 0; 1111 } 1112 1113 struct wait_opts { 1114 enum pid_type wo_type; 1115 int wo_flags; 1116 struct pid *wo_pid; 1117 1118 struct siginfo __user *wo_info; 1119 int __user *wo_stat; 1120 struct rusage __user *wo_rusage; 1121 1122 wait_queue_t child_wait; 1123 int notask_error; 1124 }; 1125 1126 static inline 1127 struct pid *task_pid_type(struct task_struct *task, enum pid_type type) 1128 { 1129 if (type != PIDTYPE_PID) 1130 task = task->group_leader; 1131 return task->pids[type].pid; 1132 } 1133 1134 static int eligible_pid(struct wait_opts *wo, struct task_struct *p) 1135 { 1136 return wo->wo_type == PIDTYPE_MAX || 1137 task_pid_type(p, wo->wo_type) == wo->wo_pid; 1138 } 1139 1140 static int eligible_child(struct wait_opts *wo, struct task_struct *p) 1141 { 1142 if (!eligible_pid(wo, p)) 1143 return 0; 1144 /* Wait for all children (clone and not) if __WALL is set; 1145 * otherwise, wait for clone children *only* if __WCLONE is 1146 * set; otherwise, wait for non-clone children *only*. (Note: 1147 * A "clone" child here is one that reports to its parent 1148 * using a signal other than SIGCHLD.) */ 1149 if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) 1150 && !(wo->wo_flags & __WALL)) 1151 return 0; 1152 1153 return 1; 1154 } 1155 1156 static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p, 1157 pid_t pid, uid_t uid, int why, int status) 1158 { 1159 struct siginfo __user *infop; 1160 int retval = wo->wo_rusage 1161 ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; 1162 1163 put_task_struct(p); 1164 infop = wo->wo_info; 1165 if (infop) { 1166 if (!retval) 1167 retval = put_user(SIGCHLD, &infop->si_signo); 1168 if (!retval) 1169 retval = put_user(0, &infop->si_errno); 1170 if (!retval) 1171 retval = put_user((short)why, &infop->si_code); 1172 if (!retval) 1173 retval = put_user(pid, &infop->si_pid); 1174 if (!retval) 1175 retval = put_user(uid, &infop->si_uid); 1176 if (!retval) 1177 retval = put_user(status, &infop->si_status); 1178 } 1179 if (!retval) 1180 retval = pid; 1181 return retval; 1182 } 1183 1184 /* 1185 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold 1186 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1187 * the lock and this task is uninteresting. If we return nonzero, we have 1188 * released the lock and the system call should return. 1189 */ 1190 static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) 1191 { 1192 unsigned long state; 1193 int retval, status, traced; 1194 pid_t pid = task_pid_vnr(p); 1195 uid_t uid = __task_cred(p)->uid; 1196 struct siginfo __user *infop; 1197 1198 if (!likely(wo->wo_flags & WEXITED)) 1199 return 0; 1200 1201 if (unlikely(wo->wo_flags & WNOWAIT)) { 1202 int exit_code = p->exit_code; 1203 int why; 1204 1205 get_task_struct(p); 1206 read_unlock(&tasklist_lock); 1207 if ((exit_code & 0x7f) == 0) { 1208 why = CLD_EXITED; 1209 status = exit_code >> 8; 1210 } else { 1211 why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED; 1212 status = exit_code & 0x7f; 1213 } 1214 return wait_noreap_copyout(wo, p, pid, uid, why, status); 1215 } 1216 1217 /* 1218 * Try to move the task's state to DEAD 1219 * only one thread is allowed to do this: 1220 */ 1221 state = xchg(&p->exit_state, EXIT_DEAD); 1222 if (state != EXIT_ZOMBIE) { 1223 BUG_ON(state != EXIT_DEAD); 1224 return 0; 1225 } 1226 1227 traced = ptrace_reparented(p); 1228 /* 1229 * It can be ptraced but not reparented, check 1230 * thread_group_leader() to filter out sub-threads. 1231 */ 1232 if (likely(!traced) && thread_group_leader(p)) { 1233 struct signal_struct *psig; 1234 struct signal_struct *sig; 1235 unsigned long maxrss; 1236 cputime_t tgutime, tgstime; 1237 1238 /* 1239 * The resource counters for the group leader are in its 1240 * own task_struct. Those for dead threads in the group 1241 * are in its signal_struct, as are those for the child 1242 * processes it has previously reaped. All these 1243 * accumulate in the parent's signal_struct c* fields. 1244 * 1245 * We don't bother to take a lock here to protect these 1246 * p->signal fields, because they are only touched by 1247 * __exit_signal, which runs with tasklist_lock 1248 * write-locked anyway, and so is excluded here. We do 1249 * need to protect the access to parent->signal fields, 1250 * as other threads in the parent group can be right 1251 * here reaping other children at the same time. 1252 * 1253 * We use thread_group_times() to get times for the thread 1254 * group, which consolidates times for all threads in the 1255 * group including the group leader. 1256 */ 1257 thread_group_times(p, &tgutime, &tgstime); 1258 spin_lock_irq(&p->real_parent->sighand->siglock); 1259 psig = p->real_parent->signal; 1260 sig = p->signal; 1261 psig->cutime = 1262 cputime_add(psig->cutime, 1263 cputime_add(tgutime, 1264 sig->cutime)); 1265 psig->cstime = 1266 cputime_add(psig->cstime, 1267 cputime_add(tgstime, 1268 sig->cstime)); 1269 psig->cgtime = 1270 cputime_add(psig->cgtime, 1271 cputime_add(p->gtime, 1272 cputime_add(sig->gtime, 1273 sig->cgtime))); 1274 psig->cmin_flt += 1275 p->min_flt + sig->min_flt + sig->cmin_flt; 1276 psig->cmaj_flt += 1277 p->maj_flt + sig->maj_flt + sig->cmaj_flt; 1278 psig->cnvcsw += 1279 p->nvcsw + sig->nvcsw + sig->cnvcsw; 1280 psig->cnivcsw += 1281 p->nivcsw + sig->nivcsw + sig->cnivcsw; 1282 psig->cinblock += 1283 task_io_get_inblock(p) + 1284 sig->inblock + sig->cinblock; 1285 psig->coublock += 1286 task_io_get_oublock(p) + 1287 sig->oublock + sig->coublock; 1288 maxrss = max(sig->maxrss, sig->cmaxrss); 1289 if (psig->cmaxrss < maxrss) 1290 psig->cmaxrss = maxrss; 1291 task_io_accounting_add(&psig->ioac, &p->ioac); 1292 task_io_accounting_add(&psig->ioac, &sig->ioac); 1293 spin_unlock_irq(&p->real_parent->sighand->siglock); 1294 } 1295 1296 /* 1297 * Now we are sure this task is interesting, and no other 1298 * thread can reap it because we set its state to EXIT_DEAD. 1299 */ 1300 read_unlock(&tasklist_lock); 1301 1302 retval = wo->wo_rusage 1303 ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; 1304 status = (p->signal->flags & SIGNAL_GROUP_EXIT) 1305 ? p->signal->group_exit_code : p->exit_code; 1306 if (!retval && wo->wo_stat) 1307 retval = put_user(status, wo->wo_stat); 1308 1309 infop = wo->wo_info; 1310 if (!retval && infop) 1311 retval = put_user(SIGCHLD, &infop->si_signo); 1312 if (!retval && infop) 1313 retval = put_user(0, &infop->si_errno); 1314 if (!retval && infop) { 1315 int why; 1316 1317 if ((status & 0x7f) == 0) { 1318 why = CLD_EXITED; 1319 status >>= 8; 1320 } else { 1321 why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; 1322 status &= 0x7f; 1323 } 1324 retval = put_user((short)why, &infop->si_code); 1325 if (!retval) 1326 retval = put_user(status, &infop->si_status); 1327 } 1328 if (!retval && infop) 1329 retval = put_user(pid, &infop->si_pid); 1330 if (!retval && infop) 1331 retval = put_user(uid, &infop->si_uid); 1332 if (!retval) 1333 retval = pid; 1334 1335 if (traced) { 1336 write_lock_irq(&tasklist_lock); 1337 /* We dropped tasklist, ptracer could die and untrace */ 1338 ptrace_unlink(p); 1339 /* 1340 * If this is not a sub-thread, notify the parent. 1341 * If parent wants a zombie, don't release it now. 1342 */ 1343 if (thread_group_leader(p) && 1344 !do_notify_parent(p, p->exit_signal)) { 1345 p->exit_state = EXIT_ZOMBIE; 1346 p = NULL; 1347 } 1348 write_unlock_irq(&tasklist_lock); 1349 } 1350 if (p != NULL) 1351 release_task(p); 1352 1353 return retval; 1354 } 1355 1356 static int *task_stopped_code(struct task_struct *p, bool ptrace) 1357 { 1358 if (ptrace) { 1359 if (task_is_stopped_or_traced(p) && 1360 !(p->jobctl & JOBCTL_LISTENING)) 1361 return &p->exit_code; 1362 } else { 1363 if (p->signal->flags & SIGNAL_STOP_STOPPED) 1364 return &p->signal->group_exit_code; 1365 } 1366 return NULL; 1367 } 1368 1369 /** 1370 * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED 1371 * @wo: wait options 1372 * @ptrace: is the wait for ptrace 1373 * @p: task to wait for 1374 * 1375 * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED. 1376 * 1377 * CONTEXT: 1378 * read_lock(&tasklist_lock), which is released if return value is 1379 * non-zero. Also, grabs and releases @p->sighand->siglock. 1380 * 1381 * RETURNS: 1382 * 0 if wait condition didn't exist and search for other wait conditions 1383 * should continue. Non-zero return, -errno on failure and @p's pid on 1384 * success, implies that tasklist_lock is released and wait condition 1385 * search should terminate. 1386 */ 1387 static int wait_task_stopped(struct wait_opts *wo, 1388 int ptrace, struct task_struct *p) 1389 { 1390 struct siginfo __user *infop; 1391 int retval, exit_code, *p_code, why; 1392 uid_t uid = 0; /* unneeded, required by compiler */ 1393 pid_t pid; 1394 1395 /* 1396 * Traditionally we see ptrace'd stopped tasks regardless of options. 1397 */ 1398 if (!ptrace && !(wo->wo_flags & WUNTRACED)) 1399 return 0; 1400 1401 if (!task_stopped_code(p, ptrace)) 1402 return 0; 1403 1404 exit_code = 0; 1405 spin_lock_irq(&p->sighand->siglock); 1406 1407 p_code = task_stopped_code(p, ptrace); 1408 if (unlikely(!p_code)) 1409 goto unlock_sig; 1410 1411 exit_code = *p_code; 1412 if (!exit_code) 1413 goto unlock_sig; 1414 1415 if (!unlikely(wo->wo_flags & WNOWAIT)) 1416 *p_code = 0; 1417 1418 uid = task_uid(p); 1419 unlock_sig: 1420 spin_unlock_irq(&p->sighand->siglock); 1421 if (!exit_code) 1422 return 0; 1423 1424 /* 1425 * Now we are pretty sure this task is interesting. 1426 * Make sure it doesn't get reaped out from under us while we 1427 * give up the lock and then examine it below. We don't want to 1428 * keep holding onto the tasklist_lock while we call getrusage and 1429 * possibly take page faults for user memory. 1430 */ 1431 get_task_struct(p); 1432 pid = task_pid_vnr(p); 1433 why = ptrace ? CLD_TRAPPED : CLD_STOPPED; 1434 read_unlock(&tasklist_lock); 1435 1436 if (unlikely(wo->wo_flags & WNOWAIT)) 1437 return wait_noreap_copyout(wo, p, pid, uid, why, exit_code); 1438 1439 retval = wo->wo_rusage 1440 ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; 1441 if (!retval && wo->wo_stat) 1442 retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat); 1443 1444 infop = wo->wo_info; 1445 if (!retval && infop) 1446 retval = put_user(SIGCHLD, &infop->si_signo); 1447 if (!retval && infop) 1448 retval = put_user(0, &infop->si_errno); 1449 if (!retval && infop) 1450 retval = put_user((short)why, &infop->si_code); 1451 if (!retval && infop) 1452 retval = put_user(exit_code, &infop->si_status); 1453 if (!retval && infop) 1454 retval = put_user(pid, &infop->si_pid); 1455 if (!retval && infop) 1456 retval = put_user(uid, &infop->si_uid); 1457 if (!retval) 1458 retval = pid; 1459 put_task_struct(p); 1460 1461 BUG_ON(!retval); 1462 return retval; 1463 } 1464 1465 /* 1466 * Handle do_wait work for one task in a live, non-stopped state. 1467 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1468 * the lock and this task is uninteresting. If we return nonzero, we have 1469 * released the lock and the system call should return. 1470 */ 1471 static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) 1472 { 1473 int retval; 1474 pid_t pid; 1475 uid_t uid; 1476 1477 if (!unlikely(wo->wo_flags & WCONTINUED)) 1478 return 0; 1479 1480 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) 1481 return 0; 1482 1483 spin_lock_irq(&p->sighand->siglock); 1484 /* Re-check with the lock held. */ 1485 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { 1486 spin_unlock_irq(&p->sighand->siglock); 1487 return 0; 1488 } 1489 if (!unlikely(wo->wo_flags & WNOWAIT)) 1490 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; 1491 uid = task_uid(p); 1492 spin_unlock_irq(&p->sighand->siglock); 1493 1494 pid = task_pid_vnr(p); 1495 get_task_struct(p); 1496 read_unlock(&tasklist_lock); 1497 1498 if (!wo->wo_info) { 1499 retval = wo->wo_rusage 1500 ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; 1501 put_task_struct(p); 1502 if (!retval && wo->wo_stat) 1503 retval = put_user(0xffff, wo->wo_stat); 1504 if (!retval) 1505 retval = pid; 1506 } else { 1507 retval = wait_noreap_copyout(wo, p, pid, uid, 1508 CLD_CONTINUED, SIGCONT); 1509 BUG_ON(retval == 0); 1510 } 1511 1512 return retval; 1513 } 1514 1515 /* 1516 * Consider @p for a wait by @parent. 1517 * 1518 * -ECHILD should be in ->notask_error before the first call. 1519 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1520 * Returns zero if the search for a child should continue; 1521 * then ->notask_error is 0 if @p is an eligible child, 1522 * or another error from security_task_wait(), or still -ECHILD. 1523 */ 1524 static int wait_consider_task(struct wait_opts *wo, int ptrace, 1525 struct task_struct *p) 1526 { 1527 int ret = eligible_child(wo, p); 1528 if (!ret) 1529 return ret; 1530 1531 ret = security_task_wait(p); 1532 if (unlikely(ret < 0)) { 1533 /* 1534 * If we have not yet seen any eligible child, 1535 * then let this error code replace -ECHILD. 1536 * A permission error will give the user a clue 1537 * to look for security policy problems, rather 1538 * than for mysterious wait bugs. 1539 */ 1540 if (wo->notask_error) 1541 wo->notask_error = ret; 1542 return 0; 1543 } 1544 1545 /* dead body doesn't have much to contribute */ 1546 if (p->exit_state == EXIT_DEAD) 1547 return 0; 1548 1549 /* slay zombie? */ 1550 if (p->exit_state == EXIT_ZOMBIE) { 1551 /* 1552 * A zombie ptracee is only visible to its ptracer. 1553 * Notification and reaping will be cascaded to the real 1554 * parent when the ptracer detaches. 1555 */ 1556 if (likely(!ptrace) && unlikely(p->ptrace)) { 1557 /* it will become visible, clear notask_error */ 1558 wo->notask_error = 0; 1559 return 0; 1560 } 1561 1562 /* we don't reap group leaders with subthreads */ 1563 if (!delay_group_leader(p)) 1564 return wait_task_zombie(wo, p); 1565 1566 /* 1567 * Allow access to stopped/continued state via zombie by 1568 * falling through. Clearing of notask_error is complex. 1569 * 1570 * When !@ptrace: 1571 * 1572 * If WEXITED is set, notask_error should naturally be 1573 * cleared. If not, subset of WSTOPPED|WCONTINUED is set, 1574 * so, if there are live subthreads, there are events to 1575 * wait for. If all subthreads are dead, it's still safe 1576 * to clear - this function will be called again in finite 1577 * amount time once all the subthreads are released and 1578 * will then return without clearing. 1579 * 1580 * When @ptrace: 1581 * 1582 * Stopped state is per-task and thus can't change once the 1583 * target task dies. Only continued and exited can happen. 1584 * Clear notask_error if WCONTINUED | WEXITED. 1585 */ 1586 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) 1587 wo->notask_error = 0; 1588 } else { 1589 /* 1590 * If @p is ptraced by a task in its real parent's group, 1591 * hide group stop/continued state when looking at @p as 1592 * the real parent; otherwise, a single stop can be 1593 * reported twice as group and ptrace stops. 1594 * 1595 * If a ptracer wants to distinguish the two events for its 1596 * own children, it should create a separate process which 1597 * takes the role of real parent. 1598 */ 1599 if (likely(!ptrace) && p->ptrace && !ptrace_reparented(p)) 1600 return 0; 1601 1602 /* 1603 * @p is alive and it's gonna stop, continue or exit, so 1604 * there always is something to wait for. 1605 */ 1606 wo->notask_error = 0; 1607 } 1608 1609 /* 1610 * Wait for stopped. Depending on @ptrace, different stopped state 1611 * is used and the two don't interact with each other. 1612 */ 1613 ret = wait_task_stopped(wo, ptrace, p); 1614 if (ret) 1615 return ret; 1616 1617 /* 1618 * Wait for continued. There's only one continued state and the 1619 * ptracer can consume it which can confuse the real parent. Don't 1620 * use WCONTINUED from ptracer. You don't need or want it. 1621 */ 1622 return wait_task_continued(wo, p); 1623 } 1624 1625 /* 1626 * Do the work of do_wait() for one thread in the group, @tsk. 1627 * 1628 * -ECHILD should be in ->notask_error before the first call. 1629 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1630 * Returns zero if the search for a child should continue; then 1631 * ->notask_error is 0 if there were any eligible children, 1632 * or another error from security_task_wait(), or still -ECHILD. 1633 */ 1634 static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) 1635 { 1636 struct task_struct *p; 1637 1638 list_for_each_entry(p, &tsk->children, sibling) { 1639 int ret = wait_consider_task(wo, 0, p); 1640 if (ret) 1641 return ret; 1642 } 1643 1644 return 0; 1645 } 1646 1647 static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) 1648 { 1649 struct task_struct *p; 1650 1651 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { 1652 int ret = wait_consider_task(wo, 1, p); 1653 if (ret) 1654 return ret; 1655 } 1656 1657 return 0; 1658 } 1659 1660 static int child_wait_callback(wait_queue_t *wait, unsigned mode, 1661 int sync, void *key) 1662 { 1663 struct wait_opts *wo = container_of(wait, struct wait_opts, 1664 child_wait); 1665 struct task_struct *p = key; 1666 1667 if (!eligible_pid(wo, p)) 1668 return 0; 1669 1670 if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent) 1671 return 0; 1672 1673 return default_wake_function(wait, mode, sync, key); 1674 } 1675 1676 void __wake_up_parent(struct task_struct *p, struct task_struct *parent) 1677 { 1678 __wake_up_sync_key(&parent->signal->wait_chldexit, 1679 TASK_INTERRUPTIBLE, 1, p); 1680 } 1681 1682 static long do_wait(struct wait_opts *wo) 1683 { 1684 struct task_struct *tsk; 1685 int retval; 1686 1687 trace_sched_process_wait(wo->wo_pid); 1688 1689 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback); 1690 wo->child_wait.private = current; 1691 add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 1692 repeat: 1693 /* 1694 * If there is nothing that can match our critiera just get out. 1695 * We will clear ->notask_error to zero if we see any child that 1696 * might later match our criteria, even if we are not able to reap 1697 * it yet. 1698 */ 1699 wo->notask_error = -ECHILD; 1700 if ((wo->wo_type < PIDTYPE_MAX) && 1701 (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type]))) 1702 goto notask; 1703 1704 set_current_state(TASK_INTERRUPTIBLE); 1705 read_lock(&tasklist_lock); 1706 tsk = current; 1707 do { 1708 retval = do_wait_thread(wo, tsk); 1709 if (retval) 1710 goto end; 1711 1712 retval = ptrace_do_wait(wo, tsk); 1713 if (retval) 1714 goto end; 1715 1716 if (wo->wo_flags & __WNOTHREAD) 1717 break; 1718 } while_each_thread(current, tsk); 1719 read_unlock(&tasklist_lock); 1720 1721 notask: 1722 retval = wo->notask_error; 1723 if (!retval && !(wo->wo_flags & WNOHANG)) { 1724 retval = -ERESTARTSYS; 1725 if (!signal_pending(current)) { 1726 schedule(); 1727 goto repeat; 1728 } 1729 } 1730 end: 1731 __set_current_state(TASK_RUNNING); 1732 remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 1733 return retval; 1734 } 1735 1736 SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, 1737 infop, int, options, struct rusage __user *, ru) 1738 { 1739 struct wait_opts wo; 1740 struct pid *pid = NULL; 1741 enum pid_type type; 1742 long ret; 1743 1744 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED)) 1745 return -EINVAL; 1746 if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) 1747 return -EINVAL; 1748 1749 switch (which) { 1750 case P_ALL: 1751 type = PIDTYPE_MAX; 1752 break; 1753 case P_PID: 1754 type = PIDTYPE_PID; 1755 if (upid <= 0) 1756 return -EINVAL; 1757 break; 1758 case P_PGID: 1759 type = PIDTYPE_PGID; 1760 if (upid <= 0) 1761 return -EINVAL; 1762 break; 1763 default: 1764 return -EINVAL; 1765 } 1766 1767 if (type < PIDTYPE_MAX) 1768 pid = find_get_pid(upid); 1769 1770 wo.wo_type = type; 1771 wo.wo_pid = pid; 1772 wo.wo_flags = options; 1773 wo.wo_info = infop; 1774 wo.wo_stat = NULL; 1775 wo.wo_rusage = ru; 1776 ret = do_wait(&wo); 1777 1778 if (ret > 0) { 1779 ret = 0; 1780 } else if (infop) { 1781 /* 1782 * For a WNOHANG return, clear out all the fields 1783 * we would set so the user can easily tell the 1784 * difference. 1785 */ 1786 if (!ret) 1787 ret = put_user(0, &infop->si_signo); 1788 if (!ret) 1789 ret = put_user(0, &infop->si_errno); 1790 if (!ret) 1791 ret = put_user(0, &infop->si_code); 1792 if (!ret) 1793 ret = put_user(0, &infop->si_pid); 1794 if (!ret) 1795 ret = put_user(0, &infop->si_uid); 1796 if (!ret) 1797 ret = put_user(0, &infop->si_status); 1798 } 1799 1800 put_pid(pid); 1801 1802 /* avoid REGPARM breakage on x86: */ 1803 asmlinkage_protect(5, ret, which, upid, infop, options, ru); 1804 return ret; 1805 } 1806 1807 SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, 1808 int, options, struct rusage __user *, ru) 1809 { 1810 struct wait_opts wo; 1811 struct pid *pid = NULL; 1812 enum pid_type type; 1813 long ret; 1814 1815 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| 1816 __WNOTHREAD|__WCLONE|__WALL)) 1817 return -EINVAL; 1818 1819 if (upid == -1) 1820 type = PIDTYPE_MAX; 1821 else if (upid < 0) { 1822 type = PIDTYPE_PGID; 1823 pid = find_get_pid(-upid); 1824 } else if (upid == 0) { 1825 type = PIDTYPE_PGID; 1826 pid = get_task_pid(current, PIDTYPE_PGID); 1827 } else /* upid > 0 */ { 1828 type = PIDTYPE_PID; 1829 pid = find_get_pid(upid); 1830 } 1831 1832 wo.wo_type = type; 1833 wo.wo_pid = pid; 1834 wo.wo_flags = options | WEXITED; 1835 wo.wo_info = NULL; 1836 wo.wo_stat = stat_addr; 1837 wo.wo_rusage = ru; 1838 ret = do_wait(&wo); 1839 put_pid(pid); 1840 1841 /* avoid REGPARM breakage on x86: */ 1842 asmlinkage_protect(4, ret, upid, stat_addr, options, ru); 1843 return ret; 1844 } 1845 1846 #ifdef __ARCH_WANT_SYS_WAITPID 1847 1848 /* 1849 * sys_waitpid() remains for compatibility. waitpid() should be 1850 * implemented by calling sys_wait4() from libc.a. 1851 */ 1852 SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options) 1853 { 1854 return sys_wait4(pid, stat_addr, options, NULL); 1855 } 1856 1857 #endif 1858