1 /* 2 * linux/kernel/exit.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 #include <linux/mm.h> 8 #include <linux/slab.h> 9 #include <linux/interrupt.h> 10 #include <linux/module.h> 11 #include <linux/capability.h> 12 #include <linux/completion.h> 13 #include <linux/personality.h> 14 #include <linux/tty.h> 15 #include <linux/iocontext.h> 16 #include <linux/key.h> 17 #include <linux/security.h> 18 #include <linux/cpu.h> 19 #include <linux/acct.h> 20 #include <linux/tsacct_kern.h> 21 #include <linux/file.h> 22 #include <linux/fdtable.h> 23 #include <linux/binfmts.h> 24 #include <linux/nsproxy.h> 25 #include <linux/pid_namespace.h> 26 #include <linux/ptrace.h> 27 #include <linux/profile.h> 28 #include <linux/mount.h> 29 #include <linux/proc_fs.h> 30 #include <linux/kthread.h> 31 #include <linux/mempolicy.h> 32 #include <linux/taskstats_kern.h> 33 #include <linux/delayacct.h> 34 #include <linux/freezer.h> 35 #include <linux/cgroup.h> 36 #include <linux/syscalls.h> 37 #include <linux/signal.h> 38 #include <linux/posix-timers.h> 39 #include <linux/cn_proc.h> 40 #include <linux/mutex.h> 41 #include <linux/futex.h> 42 #include <linux/pipe_fs_i.h> 43 #include <linux/audit.h> /* for audit_free() */ 44 #include <linux/resource.h> 45 #include <linux/blkdev.h> 46 #include <linux/task_io_accounting_ops.h> 47 #include <linux/tracehook.h> 48 #include <linux/fs_struct.h> 49 #include <linux/init_task.h> 50 #include <linux/perf_event.h> 51 #include <trace/events/sched.h> 52 #include <linux/hw_breakpoint.h> 53 #include <linux/oom.h> 54 #include <linux/writeback.h> 55 #include <linux/shm.h> 56 57 #include <asm/uaccess.h> 58 #include <asm/unistd.h> 59 #include <asm/pgtable.h> 60 #include <asm/mmu_context.h> 61 62 static void exit_mm(struct task_struct * tsk); 63 64 static void __unhash_process(struct task_struct *p, bool group_dead) 65 { 66 nr_threads--; 67 detach_pid(p, PIDTYPE_PID); 68 if (group_dead) { 69 detach_pid(p, PIDTYPE_PGID); 70 detach_pid(p, PIDTYPE_SID); 71 72 list_del_rcu(&p->tasks); 73 list_del_init(&p->sibling); 74 __this_cpu_dec(process_counts); 75 } 76 list_del_rcu(&p->thread_group); 77 } 78 79 /* 80 * This function expects the tasklist_lock write-locked. 81 */ 82 static void __exit_signal(struct task_struct *tsk) 83 { 84 struct signal_struct *sig = tsk->signal; 85 bool group_dead = thread_group_leader(tsk); 86 struct sighand_struct *sighand; 87 struct tty_struct *uninitialized_var(tty); 88 89 sighand = rcu_dereference_check(tsk->sighand, 90 lockdep_tasklist_lock_is_held()); 91 spin_lock(&sighand->siglock); 92 93 posix_cpu_timers_exit(tsk); 94 if (group_dead) { 95 posix_cpu_timers_exit_group(tsk); 96 tty = sig->tty; 97 sig->tty = NULL; 98 } else { 99 /* 100 * This can only happen if the caller is de_thread(). 101 * FIXME: this is the temporary hack, we should teach 102 * posix-cpu-timers to handle this case correctly. 103 */ 104 if (unlikely(has_group_leader_pid(tsk))) 105 posix_cpu_timers_exit_group(tsk); 106 107 /* 108 * If there is any task waiting for the group exit 109 * then notify it: 110 */ 111 if (sig->notify_count > 0 && !--sig->notify_count) 112 wake_up_process(sig->group_exit_task); 113 114 if (tsk == sig->curr_target) 115 sig->curr_target = next_thread(tsk); 116 /* 117 * Accumulate here the counters for all threads but the 118 * group leader as they die, so they can be added into 119 * the process-wide totals when those are taken. 120 * The group leader stays around as a zombie as long 121 * as there are other threads. When it gets reaped, 122 * the exit.c code will add its counts into these totals. 123 * We won't ever get here for the group leader, since it 124 * will have been the last reference on the signal_struct. 125 */ 126 sig->utime += tsk->utime; 127 sig->stime += tsk->stime; 128 sig->gtime += tsk->gtime; 129 sig->min_flt += tsk->min_flt; 130 sig->maj_flt += tsk->maj_flt; 131 sig->nvcsw += tsk->nvcsw; 132 sig->nivcsw += tsk->nivcsw; 133 sig->inblock += task_io_get_inblock(tsk); 134 sig->oublock += task_io_get_oublock(tsk); 135 task_io_accounting_add(&sig->ioac, &tsk->ioac); 136 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; 137 } 138 139 sig->nr_threads--; 140 __unhash_process(tsk, group_dead); 141 142 /* 143 * Do this under ->siglock, we can race with another thread 144 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. 145 */ 146 flush_sigqueue(&tsk->pending); 147 tsk->sighand = NULL; 148 spin_unlock(&sighand->siglock); 149 150 __cleanup_sighand(sighand); 151 clear_tsk_thread_flag(tsk,TIF_SIGPENDING); 152 if (group_dead) { 153 flush_sigqueue(&sig->shared_pending); 154 tty_kref_put(tty); 155 } 156 } 157 158 static void delayed_put_task_struct(struct rcu_head *rhp) 159 { 160 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 161 162 perf_event_delayed_put(tsk); 163 trace_sched_process_free(tsk); 164 put_task_struct(tsk); 165 } 166 167 168 void release_task(struct task_struct * p) 169 { 170 struct task_struct *leader; 171 int zap_leader; 172 repeat: 173 /* don't need to get the RCU readlock here - the process is dead and 174 * can't be modifying its own credentials. But shut RCU-lockdep up */ 175 rcu_read_lock(); 176 atomic_dec(&__task_cred(p)->user->processes); 177 rcu_read_unlock(); 178 179 proc_flush_task(p); 180 181 write_lock_irq(&tasklist_lock); 182 ptrace_release_task(p); 183 __exit_signal(p); 184 185 /* 186 * If we are the last non-leader member of the thread 187 * group, and the leader is zombie, then notify the 188 * group leader's parent process. (if it wants notification.) 189 */ 190 zap_leader = 0; 191 leader = p->group_leader; 192 if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { 193 /* 194 * If we were the last child thread and the leader has 195 * exited already, and the leader's parent ignores SIGCHLD, 196 * then we are the one who should release the leader. 197 */ 198 zap_leader = do_notify_parent(leader, leader->exit_signal); 199 if (zap_leader) 200 leader->exit_state = EXIT_DEAD; 201 } 202 203 write_unlock_irq(&tasklist_lock); 204 release_thread(p); 205 call_rcu(&p->rcu, delayed_put_task_struct); 206 207 p = leader; 208 if (unlikely(zap_leader)) 209 goto repeat; 210 } 211 212 /* 213 * This checks not only the pgrp, but falls back on the pid if no 214 * satisfactory pgrp is found. I dunno - gdb doesn't work correctly 215 * without this... 216 * 217 * The caller must hold rcu lock or the tasklist lock. 218 */ 219 struct pid *session_of_pgrp(struct pid *pgrp) 220 { 221 struct task_struct *p; 222 struct pid *sid = NULL; 223 224 p = pid_task(pgrp, PIDTYPE_PGID); 225 if (p == NULL) 226 p = pid_task(pgrp, PIDTYPE_PID); 227 if (p != NULL) 228 sid = task_session(p); 229 230 return sid; 231 } 232 233 /* 234 * Determine if a process group is "orphaned", according to the POSIX 235 * definition in 2.2.2.52. Orphaned process groups are not to be affected 236 * by terminal-generated stop signals. Newly orphaned process groups are 237 * to receive a SIGHUP and a SIGCONT. 238 * 239 * "I ask you, have you ever known what it is to be an orphan?" 240 */ 241 static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task) 242 { 243 struct task_struct *p; 244 245 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 246 if ((p == ignored_task) || 247 (p->exit_state && thread_group_empty(p)) || 248 is_global_init(p->real_parent)) 249 continue; 250 251 if (task_pgrp(p->real_parent) != pgrp && 252 task_session(p->real_parent) == task_session(p)) 253 return 0; 254 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 255 256 return 1; 257 } 258 259 int is_current_pgrp_orphaned(void) 260 { 261 int retval; 262 263 read_lock(&tasklist_lock); 264 retval = will_become_orphaned_pgrp(task_pgrp(current), NULL); 265 read_unlock(&tasklist_lock); 266 267 return retval; 268 } 269 270 static bool has_stopped_jobs(struct pid *pgrp) 271 { 272 struct task_struct *p; 273 274 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 275 if (p->signal->flags & SIGNAL_STOP_STOPPED) 276 return true; 277 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 278 279 return false; 280 } 281 282 /* 283 * Check to see if any process groups have become orphaned as 284 * a result of our exiting, and if they have any stopped jobs, 285 * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 286 */ 287 static void 288 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) 289 { 290 struct pid *pgrp = task_pgrp(tsk); 291 struct task_struct *ignored_task = tsk; 292 293 if (!parent) 294 /* exit: our father is in a different pgrp than 295 * we are and we were the only connection outside. 296 */ 297 parent = tsk->real_parent; 298 else 299 /* reparent: our child is in a different pgrp than 300 * we are, and it was the only connection outside. 301 */ 302 ignored_task = NULL; 303 304 if (task_pgrp(parent) != pgrp && 305 task_session(parent) == task_session(tsk) && 306 will_become_orphaned_pgrp(pgrp, ignored_task) && 307 has_stopped_jobs(pgrp)) { 308 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); 309 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); 310 } 311 } 312 313 /** 314 * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd 315 * 316 * If a kernel thread is launched as a result of a system call, or if 317 * it ever exits, it should generally reparent itself to kthreadd so it 318 * isn't in the way of other processes and is correctly cleaned up on exit. 319 * 320 * The various task state such as scheduling policy and priority may have 321 * been inherited from a user process, so we reset them to sane values here. 322 * 323 * NOTE that reparent_to_kthreadd() gives the caller full capabilities. 324 */ 325 static void reparent_to_kthreadd(void) 326 { 327 write_lock_irq(&tasklist_lock); 328 329 ptrace_unlink(current); 330 /* Reparent to init */ 331 current->real_parent = current->parent = kthreadd_task; 332 list_move_tail(¤t->sibling, ¤t->real_parent->children); 333 334 /* Set the exit signal to SIGCHLD so we signal init on exit */ 335 current->exit_signal = SIGCHLD; 336 337 if (task_nice(current) < 0) 338 set_user_nice(current, 0); 339 /* cpus_allowed? */ 340 /* rt_priority? */ 341 /* signals? */ 342 memcpy(current->signal->rlim, init_task.signal->rlim, 343 sizeof(current->signal->rlim)); 344 345 atomic_inc(&init_cred.usage); 346 commit_creds(&init_cred); 347 write_unlock_irq(&tasklist_lock); 348 } 349 350 void __set_special_pids(struct pid *pid) 351 { 352 struct task_struct *curr = current->group_leader; 353 354 if (task_session(curr) != pid) 355 change_pid(curr, PIDTYPE_SID, pid); 356 357 if (task_pgrp(curr) != pid) 358 change_pid(curr, PIDTYPE_PGID, pid); 359 } 360 361 static void set_special_pids(struct pid *pid) 362 { 363 write_lock_irq(&tasklist_lock); 364 __set_special_pids(pid); 365 write_unlock_irq(&tasklist_lock); 366 } 367 368 /* 369 * Let kernel threads use this to say that they allow a certain signal. 370 * Must not be used if kthread was cloned with CLONE_SIGHAND. 371 */ 372 int allow_signal(int sig) 373 { 374 if (!valid_signal(sig) || sig < 1) 375 return -EINVAL; 376 377 spin_lock_irq(¤t->sighand->siglock); 378 /* This is only needed for daemonize()'ed kthreads */ 379 sigdelset(¤t->blocked, sig); 380 /* 381 * Kernel threads handle their own signals. Let the signal code 382 * know it'll be handled, so that they don't get converted to 383 * SIGKILL or just silently dropped. 384 */ 385 current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; 386 recalc_sigpending(); 387 spin_unlock_irq(¤t->sighand->siglock); 388 return 0; 389 } 390 391 EXPORT_SYMBOL(allow_signal); 392 393 int disallow_signal(int sig) 394 { 395 if (!valid_signal(sig) || sig < 1) 396 return -EINVAL; 397 398 spin_lock_irq(¤t->sighand->siglock); 399 current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN; 400 recalc_sigpending(); 401 spin_unlock_irq(¤t->sighand->siglock); 402 return 0; 403 } 404 405 EXPORT_SYMBOL(disallow_signal); 406 407 /* 408 * Put all the gunge required to become a kernel thread without 409 * attached user resources in one place where it belongs. 410 */ 411 412 void daemonize(const char *name, ...) 413 { 414 va_list args; 415 sigset_t blocked; 416 417 va_start(args, name); 418 vsnprintf(current->comm, sizeof(current->comm), name, args); 419 va_end(args); 420 421 /* 422 * If we were started as result of loading a module, close all of the 423 * user space pages. We don't need them, and if we didn't close them 424 * they would be locked into memory. 425 */ 426 exit_mm(current); 427 /* 428 * We don't want to get frozen, in case system-wide hibernation 429 * or suspend transition begins right now. 430 */ 431 current->flags |= (PF_NOFREEZE | PF_KTHREAD); 432 433 if (current->nsproxy != &init_nsproxy) { 434 get_nsproxy(&init_nsproxy); 435 switch_task_namespaces(current, &init_nsproxy); 436 } 437 set_special_pids(&init_struct_pid); 438 proc_clear_tty(current); 439 440 /* Block and flush all signals */ 441 sigfillset(&blocked); 442 sigprocmask(SIG_BLOCK, &blocked, NULL); 443 flush_signals(current); 444 445 /* Become as one with the init task */ 446 447 daemonize_fs_struct(); 448 exit_files(current); 449 current->files = init_task.files; 450 atomic_inc(¤t->files->count); 451 452 reparent_to_kthreadd(); 453 } 454 455 EXPORT_SYMBOL(daemonize); 456 457 static void close_files(struct files_struct * files) 458 { 459 int i, j; 460 struct fdtable *fdt; 461 462 j = 0; 463 464 /* 465 * It is safe to dereference the fd table without RCU or 466 * ->file_lock because this is the last reference to the 467 * files structure. But use RCU to shut RCU-lockdep up. 468 */ 469 rcu_read_lock(); 470 fdt = files_fdtable(files); 471 rcu_read_unlock(); 472 for (;;) { 473 unsigned long set; 474 i = j * __NFDBITS; 475 if (i >= fdt->max_fds) 476 break; 477 set = fdt->open_fds[j++]; 478 while (set) { 479 if (set & 1) { 480 struct file * file = xchg(&fdt->fd[i], NULL); 481 if (file) { 482 filp_close(file, files); 483 cond_resched(); 484 } 485 } 486 i++; 487 set >>= 1; 488 } 489 } 490 } 491 492 struct files_struct *get_files_struct(struct task_struct *task) 493 { 494 struct files_struct *files; 495 496 task_lock(task); 497 files = task->files; 498 if (files) 499 atomic_inc(&files->count); 500 task_unlock(task); 501 502 return files; 503 } 504 505 void put_files_struct(struct files_struct *files) 506 { 507 struct fdtable *fdt; 508 509 if (atomic_dec_and_test(&files->count)) { 510 close_files(files); 511 /* 512 * Free the fd and fdset arrays if we expanded them. 513 * If the fdtable was embedded, pass files for freeing 514 * at the end of the RCU grace period. Otherwise, 515 * you can free files immediately. 516 */ 517 rcu_read_lock(); 518 fdt = files_fdtable(files); 519 if (fdt != &files->fdtab) 520 kmem_cache_free(files_cachep, files); 521 free_fdtable(fdt); 522 rcu_read_unlock(); 523 } 524 } 525 526 void reset_files_struct(struct files_struct *files) 527 { 528 struct task_struct *tsk = current; 529 struct files_struct *old; 530 531 old = tsk->files; 532 task_lock(tsk); 533 tsk->files = files; 534 task_unlock(tsk); 535 put_files_struct(old); 536 } 537 538 void exit_files(struct task_struct *tsk) 539 { 540 struct files_struct * files = tsk->files; 541 542 if (files) { 543 task_lock(tsk); 544 tsk->files = NULL; 545 task_unlock(tsk); 546 put_files_struct(files); 547 } 548 } 549 550 #ifdef CONFIG_MM_OWNER 551 /* 552 * A task is exiting. If it owned this mm, find a new owner for the mm. 553 */ 554 void mm_update_next_owner(struct mm_struct *mm) 555 { 556 struct task_struct *c, *g, *p = current; 557 558 retry: 559 /* 560 * If the exiting or execing task is not the owner, it's 561 * someone else's problem. 562 */ 563 if (mm->owner != p) 564 return; 565 /* 566 * The current owner is exiting/execing and there are no other 567 * candidates. Do not leave the mm pointing to a possibly 568 * freed task structure. 569 */ 570 if (atomic_read(&mm->mm_users) <= 1) { 571 mm->owner = NULL; 572 return; 573 } 574 575 read_lock(&tasklist_lock); 576 /* 577 * Search in the children 578 */ 579 list_for_each_entry(c, &p->children, sibling) { 580 if (c->mm == mm) 581 goto assign_new_owner; 582 } 583 584 /* 585 * Search in the siblings 586 */ 587 list_for_each_entry(c, &p->real_parent->children, sibling) { 588 if (c->mm == mm) 589 goto assign_new_owner; 590 } 591 592 /* 593 * Search through everything else. We should not get 594 * here often 595 */ 596 do_each_thread(g, c) { 597 if (c->mm == mm) 598 goto assign_new_owner; 599 } while_each_thread(g, c); 600 601 read_unlock(&tasklist_lock); 602 /* 603 * We found no owner yet mm_users > 1: this implies that we are 604 * most likely racing with swapoff (try_to_unuse()) or /proc or 605 * ptrace or page migration (get_task_mm()). Mark owner as NULL. 606 */ 607 mm->owner = NULL; 608 return; 609 610 assign_new_owner: 611 BUG_ON(c == p); 612 get_task_struct(c); 613 /* 614 * The task_lock protects c->mm from changing. 615 * We always want mm->owner->mm == mm 616 */ 617 task_lock(c); 618 /* 619 * Delay read_unlock() till we have the task_lock() 620 * to ensure that c does not slip away underneath us 621 */ 622 read_unlock(&tasklist_lock); 623 if (c->mm != mm) { 624 task_unlock(c); 625 put_task_struct(c); 626 goto retry; 627 } 628 mm->owner = c; 629 task_unlock(c); 630 put_task_struct(c); 631 } 632 #endif /* CONFIG_MM_OWNER */ 633 634 /* 635 * Turn us into a lazy TLB process if we 636 * aren't already.. 637 */ 638 static void exit_mm(struct task_struct * tsk) 639 { 640 struct mm_struct *mm = tsk->mm; 641 struct core_state *core_state; 642 643 mm_release(tsk, mm); 644 if (!mm) 645 return; 646 /* 647 * Serialize with any possible pending coredump. 648 * We must hold mmap_sem around checking core_state 649 * and clearing tsk->mm. The core-inducing thread 650 * will increment ->nr_threads for each thread in the 651 * group with ->mm != NULL. 652 */ 653 down_read(&mm->mmap_sem); 654 core_state = mm->core_state; 655 if (core_state) { 656 struct core_thread self; 657 up_read(&mm->mmap_sem); 658 659 self.task = tsk; 660 self.next = xchg(&core_state->dumper.next, &self); 661 /* 662 * Implies mb(), the result of xchg() must be visible 663 * to core_state->dumper. 664 */ 665 if (atomic_dec_and_test(&core_state->nr_threads)) 666 complete(&core_state->startup); 667 668 for (;;) { 669 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 670 if (!self.task) /* see coredump_finish() */ 671 break; 672 schedule(); 673 } 674 __set_task_state(tsk, TASK_RUNNING); 675 down_read(&mm->mmap_sem); 676 } 677 atomic_inc(&mm->mm_count); 678 BUG_ON(mm != tsk->active_mm); 679 /* more a memory barrier than a real lock */ 680 task_lock(tsk); 681 tsk->mm = NULL; 682 up_read(&mm->mmap_sem); 683 enter_lazy_tlb(mm, current); 684 task_unlock(tsk); 685 mm_update_next_owner(mm); 686 mmput(mm); 687 } 688 689 /* 690 * When we die, we re-parent all our children, and try to: 691 * 1. give them to another thread in our thread group, if such a member exists 692 * 2. give it to the first ancestor process which prctl'd itself as a 693 * child_subreaper for its children (like a service manager) 694 * 3. give it to the init process (PID 1) in our pid namespace 695 */ 696 static struct task_struct *find_new_reaper(struct task_struct *father) 697 __releases(&tasklist_lock) 698 __acquires(&tasklist_lock) 699 { 700 struct pid_namespace *pid_ns = task_active_pid_ns(father); 701 struct task_struct *thread; 702 703 thread = father; 704 while_each_thread(father, thread) { 705 if (thread->flags & PF_EXITING) 706 continue; 707 if (unlikely(pid_ns->child_reaper == father)) 708 pid_ns->child_reaper = thread; 709 return thread; 710 } 711 712 if (unlikely(pid_ns->child_reaper == father)) { 713 write_unlock_irq(&tasklist_lock); 714 if (unlikely(pid_ns == &init_pid_ns)) { 715 panic("Attempted to kill init! exitcode=0x%08x\n", 716 father->signal->group_exit_code ?: 717 father->exit_code); 718 } 719 720 zap_pid_ns_processes(pid_ns); 721 write_lock_irq(&tasklist_lock); 722 /* 723 * We can not clear ->child_reaper or leave it alone. 724 * There may by stealth EXIT_DEAD tasks on ->children, 725 * forget_original_parent() must move them somewhere. 726 */ 727 pid_ns->child_reaper = init_pid_ns.child_reaper; 728 } else if (father->signal->has_child_subreaper) { 729 struct task_struct *reaper; 730 731 /* 732 * Find the first ancestor marked as child_subreaper. 733 * Note that the code below checks same_thread_group(reaper, 734 * pid_ns->child_reaper). This is what we need to DTRT in a 735 * PID namespace. However we still need the check above, see 736 * http://marc.info/?l=linux-kernel&m=131385460420380 737 */ 738 for (reaper = father->real_parent; 739 reaper != &init_task; 740 reaper = reaper->real_parent) { 741 if (same_thread_group(reaper, pid_ns->child_reaper)) 742 break; 743 if (!reaper->signal->is_child_subreaper) 744 continue; 745 thread = reaper; 746 do { 747 if (!(thread->flags & PF_EXITING)) 748 return reaper; 749 } while_each_thread(reaper, thread); 750 } 751 } 752 753 return pid_ns->child_reaper; 754 } 755 756 /* 757 * Any that need to be release_task'd are put on the @dead list. 758 */ 759 static void reparent_leader(struct task_struct *father, struct task_struct *p, 760 struct list_head *dead) 761 { 762 list_move_tail(&p->sibling, &p->real_parent->children); 763 764 if (p->exit_state == EXIT_DEAD) 765 return; 766 /* 767 * If this is a threaded reparent there is no need to 768 * notify anyone anything has happened. 769 */ 770 if (same_thread_group(p->real_parent, father)) 771 return; 772 773 /* We don't want people slaying init. */ 774 p->exit_signal = SIGCHLD; 775 776 /* If it has exited notify the new parent about this child's death. */ 777 if (!p->ptrace && 778 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { 779 if (do_notify_parent(p, p->exit_signal)) { 780 p->exit_state = EXIT_DEAD; 781 list_move_tail(&p->sibling, dead); 782 } 783 } 784 785 kill_orphaned_pgrp(p, father); 786 } 787 788 static void forget_original_parent(struct task_struct *father) 789 { 790 struct task_struct *p, *n, *reaper; 791 LIST_HEAD(dead_children); 792 793 write_lock_irq(&tasklist_lock); 794 /* 795 * Note that exit_ptrace() and find_new_reaper() might 796 * drop tasklist_lock and reacquire it. 797 */ 798 exit_ptrace(father); 799 reaper = find_new_reaper(father); 800 801 list_for_each_entry_safe(p, n, &father->children, sibling) { 802 struct task_struct *t = p; 803 do { 804 t->real_parent = reaper; 805 if (t->parent == father) { 806 BUG_ON(t->ptrace); 807 t->parent = t->real_parent; 808 } 809 if (t->pdeath_signal) 810 group_send_sig_info(t->pdeath_signal, 811 SEND_SIG_NOINFO, t); 812 } while_each_thread(p, t); 813 reparent_leader(father, p, &dead_children); 814 } 815 write_unlock_irq(&tasklist_lock); 816 817 BUG_ON(!list_empty(&father->children)); 818 819 list_for_each_entry_safe(p, n, &dead_children, sibling) { 820 list_del_init(&p->sibling); 821 release_task(p); 822 } 823 } 824 825 /* 826 * Send signals to all our closest relatives so that they know 827 * to properly mourn us.. 828 */ 829 static void exit_notify(struct task_struct *tsk, int group_dead) 830 { 831 bool autoreap; 832 833 /* 834 * This does two things: 835 * 836 * A. Make init inherit all the child processes 837 * B. Check to see if any process groups have become orphaned 838 * as a result of our exiting, and if they have any stopped 839 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 840 */ 841 forget_original_parent(tsk); 842 exit_task_namespaces(tsk); 843 844 write_lock_irq(&tasklist_lock); 845 if (group_dead) 846 kill_orphaned_pgrp(tsk->group_leader, NULL); 847 848 if (unlikely(tsk->ptrace)) { 849 int sig = thread_group_leader(tsk) && 850 thread_group_empty(tsk) && 851 !ptrace_reparented(tsk) ? 852 tsk->exit_signal : SIGCHLD; 853 autoreap = do_notify_parent(tsk, sig); 854 } else if (thread_group_leader(tsk)) { 855 autoreap = thread_group_empty(tsk) && 856 do_notify_parent(tsk, tsk->exit_signal); 857 } else { 858 autoreap = true; 859 } 860 861 tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE; 862 863 /* mt-exec, de_thread() is waiting for group leader */ 864 if (unlikely(tsk->signal->notify_count < 0)) 865 wake_up_process(tsk->signal->group_exit_task); 866 write_unlock_irq(&tasklist_lock); 867 868 /* If the process is dead, release it - nobody will wait for it */ 869 if (autoreap) 870 release_task(tsk); 871 } 872 873 #ifdef CONFIG_DEBUG_STACK_USAGE 874 static void check_stack_usage(void) 875 { 876 static DEFINE_SPINLOCK(low_water_lock); 877 static int lowest_to_date = THREAD_SIZE; 878 unsigned long free; 879 880 free = stack_not_used(current); 881 882 if (free >= lowest_to_date) 883 return; 884 885 spin_lock(&low_water_lock); 886 if (free < lowest_to_date) { 887 printk(KERN_WARNING "%s used greatest stack depth: %lu bytes " 888 "left\n", 889 current->comm, free); 890 lowest_to_date = free; 891 } 892 spin_unlock(&low_water_lock); 893 } 894 #else 895 static inline void check_stack_usage(void) {} 896 #endif 897 898 void do_exit(long code) 899 { 900 struct task_struct *tsk = current; 901 int group_dead; 902 903 profile_task_exit(tsk); 904 905 WARN_ON(blk_needs_flush_plug(tsk)); 906 907 if (unlikely(in_interrupt())) 908 panic("Aiee, killing interrupt handler!"); 909 if (unlikely(!tsk->pid)) 910 panic("Attempted to kill the idle task!"); 911 912 /* 913 * If do_exit is called because this processes oopsed, it's possible 914 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before 915 * continuing. Amongst other possible reasons, this is to prevent 916 * mm_release()->clear_child_tid() from writing to a user-controlled 917 * kernel address. 918 */ 919 set_fs(USER_DS); 920 921 ptrace_event(PTRACE_EVENT_EXIT, code); 922 923 validate_creds_for_do_exit(tsk); 924 925 /* 926 * We're taking recursive faults here in do_exit. Safest is to just 927 * leave this task alone and wait for reboot. 928 */ 929 if (unlikely(tsk->flags & PF_EXITING)) { 930 printk(KERN_ALERT 931 "Fixing recursive fault but reboot is needed!\n"); 932 /* 933 * We can do this unlocked here. The futex code uses 934 * this flag just to verify whether the pi state 935 * cleanup has been done or not. In the worst case it 936 * loops once more. We pretend that the cleanup was 937 * done as there is no way to return. Either the 938 * OWNER_DIED bit is set by now or we push the blocked 939 * task into the wait for ever nirwana as well. 940 */ 941 tsk->flags |= PF_EXITPIDONE; 942 set_current_state(TASK_UNINTERRUPTIBLE); 943 schedule(); 944 } 945 946 exit_signals(tsk); /* sets PF_EXITING */ 947 /* 948 * tsk->flags are checked in the futex code to protect against 949 * an exiting task cleaning up the robust pi futexes. 950 */ 951 smp_mb(); 952 raw_spin_unlock_wait(&tsk->pi_lock); 953 954 exit_irq_thread(); 955 956 if (unlikely(in_atomic())) 957 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", 958 current->comm, task_pid_nr(current), 959 preempt_count()); 960 961 acct_update_integrals(tsk); 962 /* sync mm's RSS info before statistics gathering */ 963 if (tsk->mm) 964 sync_mm_rss(tsk->mm); 965 group_dead = atomic_dec_and_test(&tsk->signal->live); 966 if (group_dead) { 967 hrtimer_cancel(&tsk->signal->real_timer); 968 exit_itimers(tsk->signal); 969 if (tsk->mm) 970 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); 971 } 972 acct_collect(code, group_dead); 973 if (group_dead) 974 tty_audit_exit(); 975 audit_free(tsk); 976 977 tsk->exit_code = code; 978 taskstats_exit(tsk, group_dead); 979 980 exit_mm(tsk); 981 982 if (group_dead) 983 acct_process(); 984 trace_sched_process_exit(tsk); 985 986 exit_sem(tsk); 987 exit_shm(tsk); 988 exit_files(tsk); 989 exit_fs(tsk); 990 check_stack_usage(); 991 exit_thread(); 992 993 /* 994 * Flush inherited counters to the parent - before the parent 995 * gets woken up by child-exit notifications. 996 * 997 * because of cgroup mode, must be called before cgroup_exit() 998 */ 999 perf_event_exit_task(tsk); 1000 1001 cgroup_exit(tsk, 1); 1002 1003 if (group_dead) 1004 disassociate_ctty(1); 1005 1006 module_put(task_thread_info(tsk)->exec_domain->module); 1007 1008 proc_exit_connector(tsk); 1009 1010 /* 1011 * FIXME: do that only when needed, using sched_exit tracepoint 1012 */ 1013 ptrace_put_breakpoints(tsk); 1014 1015 exit_notify(tsk, group_dead); 1016 #ifdef CONFIG_NUMA 1017 task_lock(tsk); 1018 mpol_put(tsk->mempolicy); 1019 tsk->mempolicy = NULL; 1020 task_unlock(tsk); 1021 #endif 1022 #ifdef CONFIG_FUTEX 1023 if (unlikely(current->pi_state_cache)) 1024 kfree(current->pi_state_cache); 1025 #endif 1026 /* 1027 * Make sure we are holding no locks: 1028 */ 1029 debug_check_no_locks_held(tsk); 1030 /* 1031 * We can do this unlocked here. The futex code uses this flag 1032 * just to verify whether the pi state cleanup has been done 1033 * or not. In the worst case it loops once more. 1034 */ 1035 tsk->flags |= PF_EXITPIDONE; 1036 1037 if (tsk->io_context) 1038 exit_io_context(tsk); 1039 1040 if (tsk->splice_pipe) 1041 __free_pipe_info(tsk->splice_pipe); 1042 1043 validate_creds_for_do_exit(tsk); 1044 1045 preempt_disable(); 1046 if (tsk->nr_dirtied) 1047 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); 1048 exit_rcu(); 1049 1050 /* 1051 * The setting of TASK_RUNNING by try_to_wake_up() may be delayed 1052 * when the following two conditions become true. 1053 * - There is race condition of mmap_sem (It is acquired by 1054 * exit_mm()), and 1055 * - SMI occurs before setting TASK_RUNINNG. 1056 * (or hypervisor of virtual machine switches to other guest) 1057 * As a result, we may become TASK_RUNNING after becoming TASK_DEAD 1058 * 1059 * To avoid it, we have to wait for releasing tsk->pi_lock which 1060 * is held by try_to_wake_up() 1061 */ 1062 smp_mb(); 1063 raw_spin_unlock_wait(&tsk->pi_lock); 1064 1065 /* causes final put_task_struct in finish_task_switch(). */ 1066 tsk->state = TASK_DEAD; 1067 tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */ 1068 schedule(); 1069 BUG(); 1070 /* Avoid "noreturn function does return". */ 1071 for (;;) 1072 cpu_relax(); /* For when BUG is null */ 1073 } 1074 1075 EXPORT_SYMBOL_GPL(do_exit); 1076 1077 void complete_and_exit(struct completion *comp, long code) 1078 { 1079 if (comp) 1080 complete(comp); 1081 1082 do_exit(code); 1083 } 1084 1085 EXPORT_SYMBOL(complete_and_exit); 1086 1087 SYSCALL_DEFINE1(exit, int, error_code) 1088 { 1089 do_exit((error_code&0xff)<<8); 1090 } 1091 1092 /* 1093 * Take down every thread in the group. This is called by fatal signals 1094 * as well as by sys_exit_group (below). 1095 */ 1096 void 1097 do_group_exit(int exit_code) 1098 { 1099 struct signal_struct *sig = current->signal; 1100 1101 BUG_ON(exit_code & 0x80); /* core dumps don't get here */ 1102 1103 if (signal_group_exit(sig)) 1104 exit_code = sig->group_exit_code; 1105 else if (!thread_group_empty(current)) { 1106 struct sighand_struct *const sighand = current->sighand; 1107 spin_lock_irq(&sighand->siglock); 1108 if (signal_group_exit(sig)) 1109 /* Another thread got here before we took the lock. */ 1110 exit_code = sig->group_exit_code; 1111 else { 1112 sig->group_exit_code = exit_code; 1113 sig->flags = SIGNAL_GROUP_EXIT; 1114 zap_other_threads(current); 1115 } 1116 spin_unlock_irq(&sighand->siglock); 1117 } 1118 1119 do_exit(exit_code); 1120 /* NOTREACHED */ 1121 } 1122 1123 /* 1124 * this kills every thread in the thread group. Note that any externally 1125 * wait4()-ing process will get the correct exit code - even if this 1126 * thread is not the thread group leader. 1127 */ 1128 SYSCALL_DEFINE1(exit_group, int, error_code) 1129 { 1130 do_group_exit((error_code & 0xff) << 8); 1131 /* NOTREACHED */ 1132 return 0; 1133 } 1134 1135 struct wait_opts { 1136 enum pid_type wo_type; 1137 int wo_flags; 1138 struct pid *wo_pid; 1139 1140 struct siginfo __user *wo_info; 1141 int __user *wo_stat; 1142 struct rusage __user *wo_rusage; 1143 1144 wait_queue_t child_wait; 1145 int notask_error; 1146 }; 1147 1148 static inline 1149 struct pid *task_pid_type(struct task_struct *task, enum pid_type type) 1150 { 1151 if (type != PIDTYPE_PID) 1152 task = task->group_leader; 1153 return task->pids[type].pid; 1154 } 1155 1156 static int eligible_pid(struct wait_opts *wo, struct task_struct *p) 1157 { 1158 return wo->wo_type == PIDTYPE_MAX || 1159 task_pid_type(p, wo->wo_type) == wo->wo_pid; 1160 } 1161 1162 static int eligible_child(struct wait_opts *wo, struct task_struct *p) 1163 { 1164 if (!eligible_pid(wo, p)) 1165 return 0; 1166 /* Wait for all children (clone and not) if __WALL is set; 1167 * otherwise, wait for clone children *only* if __WCLONE is 1168 * set; otherwise, wait for non-clone children *only*. (Note: 1169 * A "clone" child here is one that reports to its parent 1170 * using a signal other than SIGCHLD.) */ 1171 if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) 1172 && !(wo->wo_flags & __WALL)) 1173 return 0; 1174 1175 return 1; 1176 } 1177 1178 static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p, 1179 pid_t pid, uid_t uid, int why, int status) 1180 { 1181 struct siginfo __user *infop; 1182 int retval = wo->wo_rusage 1183 ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; 1184 1185 put_task_struct(p); 1186 infop = wo->wo_info; 1187 if (infop) { 1188 if (!retval) 1189 retval = put_user(SIGCHLD, &infop->si_signo); 1190 if (!retval) 1191 retval = put_user(0, &infop->si_errno); 1192 if (!retval) 1193 retval = put_user((short)why, &infop->si_code); 1194 if (!retval) 1195 retval = put_user(pid, &infop->si_pid); 1196 if (!retval) 1197 retval = put_user(uid, &infop->si_uid); 1198 if (!retval) 1199 retval = put_user(status, &infop->si_status); 1200 } 1201 if (!retval) 1202 retval = pid; 1203 return retval; 1204 } 1205 1206 /* 1207 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold 1208 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1209 * the lock and this task is uninteresting. If we return nonzero, we have 1210 * released the lock and the system call should return. 1211 */ 1212 static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) 1213 { 1214 unsigned long state; 1215 int retval, status, traced; 1216 pid_t pid = task_pid_vnr(p); 1217 uid_t uid = __task_cred(p)->uid; 1218 struct siginfo __user *infop; 1219 1220 if (!likely(wo->wo_flags & WEXITED)) 1221 return 0; 1222 1223 if (unlikely(wo->wo_flags & WNOWAIT)) { 1224 int exit_code = p->exit_code; 1225 int why; 1226 1227 get_task_struct(p); 1228 read_unlock(&tasklist_lock); 1229 if ((exit_code & 0x7f) == 0) { 1230 why = CLD_EXITED; 1231 status = exit_code >> 8; 1232 } else { 1233 why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED; 1234 status = exit_code & 0x7f; 1235 } 1236 return wait_noreap_copyout(wo, p, pid, uid, why, status); 1237 } 1238 1239 /* 1240 * Try to move the task's state to DEAD 1241 * only one thread is allowed to do this: 1242 */ 1243 state = xchg(&p->exit_state, EXIT_DEAD); 1244 if (state != EXIT_ZOMBIE) { 1245 BUG_ON(state != EXIT_DEAD); 1246 return 0; 1247 } 1248 1249 traced = ptrace_reparented(p); 1250 /* 1251 * It can be ptraced but not reparented, check 1252 * thread_group_leader() to filter out sub-threads. 1253 */ 1254 if (likely(!traced) && thread_group_leader(p)) { 1255 struct signal_struct *psig; 1256 struct signal_struct *sig; 1257 unsigned long maxrss; 1258 cputime_t tgutime, tgstime; 1259 1260 /* 1261 * The resource counters for the group leader are in its 1262 * own task_struct. Those for dead threads in the group 1263 * are in its signal_struct, as are those for the child 1264 * processes it has previously reaped. All these 1265 * accumulate in the parent's signal_struct c* fields. 1266 * 1267 * We don't bother to take a lock here to protect these 1268 * p->signal fields, because they are only touched by 1269 * __exit_signal, which runs with tasklist_lock 1270 * write-locked anyway, and so is excluded here. We do 1271 * need to protect the access to parent->signal fields, 1272 * as other threads in the parent group can be right 1273 * here reaping other children at the same time. 1274 * 1275 * We use thread_group_times() to get times for the thread 1276 * group, which consolidates times for all threads in the 1277 * group including the group leader. 1278 */ 1279 thread_group_times(p, &tgutime, &tgstime); 1280 spin_lock_irq(&p->real_parent->sighand->siglock); 1281 psig = p->real_parent->signal; 1282 sig = p->signal; 1283 psig->cutime += tgutime + sig->cutime; 1284 psig->cstime += tgstime + sig->cstime; 1285 psig->cgtime += p->gtime + sig->gtime + sig->cgtime; 1286 psig->cmin_flt += 1287 p->min_flt + sig->min_flt + sig->cmin_flt; 1288 psig->cmaj_flt += 1289 p->maj_flt + sig->maj_flt + sig->cmaj_flt; 1290 psig->cnvcsw += 1291 p->nvcsw + sig->nvcsw + sig->cnvcsw; 1292 psig->cnivcsw += 1293 p->nivcsw + sig->nivcsw + sig->cnivcsw; 1294 psig->cinblock += 1295 task_io_get_inblock(p) + 1296 sig->inblock + sig->cinblock; 1297 psig->coublock += 1298 task_io_get_oublock(p) + 1299 sig->oublock + sig->coublock; 1300 maxrss = max(sig->maxrss, sig->cmaxrss); 1301 if (psig->cmaxrss < maxrss) 1302 psig->cmaxrss = maxrss; 1303 task_io_accounting_add(&psig->ioac, &p->ioac); 1304 task_io_accounting_add(&psig->ioac, &sig->ioac); 1305 spin_unlock_irq(&p->real_parent->sighand->siglock); 1306 } 1307 1308 /* 1309 * Now we are sure this task is interesting, and no other 1310 * thread can reap it because we set its state to EXIT_DEAD. 1311 */ 1312 read_unlock(&tasklist_lock); 1313 1314 retval = wo->wo_rusage 1315 ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; 1316 status = (p->signal->flags & SIGNAL_GROUP_EXIT) 1317 ? p->signal->group_exit_code : p->exit_code; 1318 if (!retval && wo->wo_stat) 1319 retval = put_user(status, wo->wo_stat); 1320 1321 infop = wo->wo_info; 1322 if (!retval && infop) 1323 retval = put_user(SIGCHLD, &infop->si_signo); 1324 if (!retval && infop) 1325 retval = put_user(0, &infop->si_errno); 1326 if (!retval && infop) { 1327 int why; 1328 1329 if ((status & 0x7f) == 0) { 1330 why = CLD_EXITED; 1331 status >>= 8; 1332 } else { 1333 why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; 1334 status &= 0x7f; 1335 } 1336 retval = put_user((short)why, &infop->si_code); 1337 if (!retval) 1338 retval = put_user(status, &infop->si_status); 1339 } 1340 if (!retval && infop) 1341 retval = put_user(pid, &infop->si_pid); 1342 if (!retval && infop) 1343 retval = put_user(uid, &infop->si_uid); 1344 if (!retval) 1345 retval = pid; 1346 1347 if (traced) { 1348 write_lock_irq(&tasklist_lock); 1349 /* We dropped tasklist, ptracer could die and untrace */ 1350 ptrace_unlink(p); 1351 /* 1352 * If this is not a sub-thread, notify the parent. 1353 * If parent wants a zombie, don't release it now. 1354 */ 1355 if (thread_group_leader(p) && 1356 !do_notify_parent(p, p->exit_signal)) { 1357 p->exit_state = EXIT_ZOMBIE; 1358 p = NULL; 1359 } 1360 write_unlock_irq(&tasklist_lock); 1361 } 1362 if (p != NULL) 1363 release_task(p); 1364 1365 return retval; 1366 } 1367 1368 static int *task_stopped_code(struct task_struct *p, bool ptrace) 1369 { 1370 if (ptrace) { 1371 if (task_is_stopped_or_traced(p) && 1372 !(p->jobctl & JOBCTL_LISTENING)) 1373 return &p->exit_code; 1374 } else { 1375 if (p->signal->flags & SIGNAL_STOP_STOPPED) 1376 return &p->signal->group_exit_code; 1377 } 1378 return NULL; 1379 } 1380 1381 /** 1382 * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED 1383 * @wo: wait options 1384 * @ptrace: is the wait for ptrace 1385 * @p: task to wait for 1386 * 1387 * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED. 1388 * 1389 * CONTEXT: 1390 * read_lock(&tasklist_lock), which is released if return value is 1391 * non-zero. Also, grabs and releases @p->sighand->siglock. 1392 * 1393 * RETURNS: 1394 * 0 if wait condition didn't exist and search for other wait conditions 1395 * should continue. Non-zero return, -errno on failure and @p's pid on 1396 * success, implies that tasklist_lock is released and wait condition 1397 * search should terminate. 1398 */ 1399 static int wait_task_stopped(struct wait_opts *wo, 1400 int ptrace, struct task_struct *p) 1401 { 1402 struct siginfo __user *infop; 1403 int retval, exit_code, *p_code, why; 1404 uid_t uid = 0; /* unneeded, required by compiler */ 1405 pid_t pid; 1406 1407 /* 1408 * Traditionally we see ptrace'd stopped tasks regardless of options. 1409 */ 1410 if (!ptrace && !(wo->wo_flags & WUNTRACED)) 1411 return 0; 1412 1413 if (!task_stopped_code(p, ptrace)) 1414 return 0; 1415 1416 exit_code = 0; 1417 spin_lock_irq(&p->sighand->siglock); 1418 1419 p_code = task_stopped_code(p, ptrace); 1420 if (unlikely(!p_code)) 1421 goto unlock_sig; 1422 1423 exit_code = *p_code; 1424 if (!exit_code) 1425 goto unlock_sig; 1426 1427 if (!unlikely(wo->wo_flags & WNOWAIT)) 1428 *p_code = 0; 1429 1430 uid = task_uid(p); 1431 unlock_sig: 1432 spin_unlock_irq(&p->sighand->siglock); 1433 if (!exit_code) 1434 return 0; 1435 1436 /* 1437 * Now we are pretty sure this task is interesting. 1438 * Make sure it doesn't get reaped out from under us while we 1439 * give up the lock and then examine it below. We don't want to 1440 * keep holding onto the tasklist_lock while we call getrusage and 1441 * possibly take page faults for user memory. 1442 */ 1443 get_task_struct(p); 1444 pid = task_pid_vnr(p); 1445 why = ptrace ? CLD_TRAPPED : CLD_STOPPED; 1446 read_unlock(&tasklist_lock); 1447 1448 if (unlikely(wo->wo_flags & WNOWAIT)) 1449 return wait_noreap_copyout(wo, p, pid, uid, why, exit_code); 1450 1451 retval = wo->wo_rusage 1452 ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; 1453 if (!retval && wo->wo_stat) 1454 retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat); 1455 1456 infop = wo->wo_info; 1457 if (!retval && infop) 1458 retval = put_user(SIGCHLD, &infop->si_signo); 1459 if (!retval && infop) 1460 retval = put_user(0, &infop->si_errno); 1461 if (!retval && infop) 1462 retval = put_user((short)why, &infop->si_code); 1463 if (!retval && infop) 1464 retval = put_user(exit_code, &infop->si_status); 1465 if (!retval && infop) 1466 retval = put_user(pid, &infop->si_pid); 1467 if (!retval && infop) 1468 retval = put_user(uid, &infop->si_uid); 1469 if (!retval) 1470 retval = pid; 1471 put_task_struct(p); 1472 1473 BUG_ON(!retval); 1474 return retval; 1475 } 1476 1477 /* 1478 * Handle do_wait work for one task in a live, non-stopped state. 1479 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1480 * the lock and this task is uninteresting. If we return nonzero, we have 1481 * released the lock and the system call should return. 1482 */ 1483 static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) 1484 { 1485 int retval; 1486 pid_t pid; 1487 uid_t uid; 1488 1489 if (!unlikely(wo->wo_flags & WCONTINUED)) 1490 return 0; 1491 1492 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) 1493 return 0; 1494 1495 spin_lock_irq(&p->sighand->siglock); 1496 /* Re-check with the lock held. */ 1497 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { 1498 spin_unlock_irq(&p->sighand->siglock); 1499 return 0; 1500 } 1501 if (!unlikely(wo->wo_flags & WNOWAIT)) 1502 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; 1503 uid = task_uid(p); 1504 spin_unlock_irq(&p->sighand->siglock); 1505 1506 pid = task_pid_vnr(p); 1507 get_task_struct(p); 1508 read_unlock(&tasklist_lock); 1509 1510 if (!wo->wo_info) { 1511 retval = wo->wo_rusage 1512 ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; 1513 put_task_struct(p); 1514 if (!retval && wo->wo_stat) 1515 retval = put_user(0xffff, wo->wo_stat); 1516 if (!retval) 1517 retval = pid; 1518 } else { 1519 retval = wait_noreap_copyout(wo, p, pid, uid, 1520 CLD_CONTINUED, SIGCONT); 1521 BUG_ON(retval == 0); 1522 } 1523 1524 return retval; 1525 } 1526 1527 /* 1528 * Consider @p for a wait by @parent. 1529 * 1530 * -ECHILD should be in ->notask_error before the first call. 1531 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1532 * Returns zero if the search for a child should continue; 1533 * then ->notask_error is 0 if @p is an eligible child, 1534 * or another error from security_task_wait(), or still -ECHILD. 1535 */ 1536 static int wait_consider_task(struct wait_opts *wo, int ptrace, 1537 struct task_struct *p) 1538 { 1539 int ret = eligible_child(wo, p); 1540 if (!ret) 1541 return ret; 1542 1543 ret = security_task_wait(p); 1544 if (unlikely(ret < 0)) { 1545 /* 1546 * If we have not yet seen any eligible child, 1547 * then let this error code replace -ECHILD. 1548 * A permission error will give the user a clue 1549 * to look for security policy problems, rather 1550 * than for mysterious wait bugs. 1551 */ 1552 if (wo->notask_error) 1553 wo->notask_error = ret; 1554 return 0; 1555 } 1556 1557 /* dead body doesn't have much to contribute */ 1558 if (unlikely(p->exit_state == EXIT_DEAD)) { 1559 /* 1560 * But do not ignore this task until the tracer does 1561 * wait_task_zombie()->do_notify_parent(). 1562 */ 1563 if (likely(!ptrace) && unlikely(ptrace_reparented(p))) 1564 wo->notask_error = 0; 1565 return 0; 1566 } 1567 1568 /* slay zombie? */ 1569 if (p->exit_state == EXIT_ZOMBIE) { 1570 /* 1571 * A zombie ptracee is only visible to its ptracer. 1572 * Notification and reaping will be cascaded to the real 1573 * parent when the ptracer detaches. 1574 */ 1575 if (likely(!ptrace) && unlikely(p->ptrace)) { 1576 /* it will become visible, clear notask_error */ 1577 wo->notask_error = 0; 1578 return 0; 1579 } 1580 1581 /* we don't reap group leaders with subthreads */ 1582 if (!delay_group_leader(p)) 1583 return wait_task_zombie(wo, p); 1584 1585 /* 1586 * Allow access to stopped/continued state via zombie by 1587 * falling through. Clearing of notask_error is complex. 1588 * 1589 * When !@ptrace: 1590 * 1591 * If WEXITED is set, notask_error should naturally be 1592 * cleared. If not, subset of WSTOPPED|WCONTINUED is set, 1593 * so, if there are live subthreads, there are events to 1594 * wait for. If all subthreads are dead, it's still safe 1595 * to clear - this function will be called again in finite 1596 * amount time once all the subthreads are released and 1597 * will then return without clearing. 1598 * 1599 * When @ptrace: 1600 * 1601 * Stopped state is per-task and thus can't change once the 1602 * target task dies. Only continued and exited can happen. 1603 * Clear notask_error if WCONTINUED | WEXITED. 1604 */ 1605 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) 1606 wo->notask_error = 0; 1607 } else { 1608 /* 1609 * If @p is ptraced by a task in its real parent's group, 1610 * hide group stop/continued state when looking at @p as 1611 * the real parent; otherwise, a single stop can be 1612 * reported twice as group and ptrace stops. 1613 * 1614 * If a ptracer wants to distinguish the two events for its 1615 * own children, it should create a separate process which 1616 * takes the role of real parent. 1617 */ 1618 if (likely(!ptrace) && p->ptrace && !ptrace_reparented(p)) 1619 return 0; 1620 1621 /* 1622 * @p is alive and it's gonna stop, continue or exit, so 1623 * there always is something to wait for. 1624 */ 1625 wo->notask_error = 0; 1626 } 1627 1628 /* 1629 * Wait for stopped. Depending on @ptrace, different stopped state 1630 * is used and the two don't interact with each other. 1631 */ 1632 ret = wait_task_stopped(wo, ptrace, p); 1633 if (ret) 1634 return ret; 1635 1636 /* 1637 * Wait for continued. There's only one continued state and the 1638 * ptracer can consume it which can confuse the real parent. Don't 1639 * use WCONTINUED from ptracer. You don't need or want it. 1640 */ 1641 return wait_task_continued(wo, p); 1642 } 1643 1644 /* 1645 * Do the work of do_wait() for one thread in the group, @tsk. 1646 * 1647 * -ECHILD should be in ->notask_error before the first call. 1648 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1649 * Returns zero if the search for a child should continue; then 1650 * ->notask_error is 0 if there were any eligible children, 1651 * or another error from security_task_wait(), or still -ECHILD. 1652 */ 1653 static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) 1654 { 1655 struct task_struct *p; 1656 1657 list_for_each_entry(p, &tsk->children, sibling) { 1658 int ret = wait_consider_task(wo, 0, p); 1659 if (ret) 1660 return ret; 1661 } 1662 1663 return 0; 1664 } 1665 1666 static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) 1667 { 1668 struct task_struct *p; 1669 1670 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { 1671 int ret = wait_consider_task(wo, 1, p); 1672 if (ret) 1673 return ret; 1674 } 1675 1676 return 0; 1677 } 1678 1679 static int child_wait_callback(wait_queue_t *wait, unsigned mode, 1680 int sync, void *key) 1681 { 1682 struct wait_opts *wo = container_of(wait, struct wait_opts, 1683 child_wait); 1684 struct task_struct *p = key; 1685 1686 if (!eligible_pid(wo, p)) 1687 return 0; 1688 1689 if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent) 1690 return 0; 1691 1692 return default_wake_function(wait, mode, sync, key); 1693 } 1694 1695 void __wake_up_parent(struct task_struct *p, struct task_struct *parent) 1696 { 1697 __wake_up_sync_key(&parent->signal->wait_chldexit, 1698 TASK_INTERRUPTIBLE, 1, p); 1699 } 1700 1701 static long do_wait(struct wait_opts *wo) 1702 { 1703 struct task_struct *tsk; 1704 int retval; 1705 1706 trace_sched_process_wait(wo->wo_pid); 1707 1708 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback); 1709 wo->child_wait.private = current; 1710 add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 1711 repeat: 1712 /* 1713 * If there is nothing that can match our critiera just get out. 1714 * We will clear ->notask_error to zero if we see any child that 1715 * might later match our criteria, even if we are not able to reap 1716 * it yet. 1717 */ 1718 wo->notask_error = -ECHILD; 1719 if ((wo->wo_type < PIDTYPE_MAX) && 1720 (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type]))) 1721 goto notask; 1722 1723 set_current_state(TASK_INTERRUPTIBLE); 1724 read_lock(&tasklist_lock); 1725 tsk = current; 1726 do { 1727 retval = do_wait_thread(wo, tsk); 1728 if (retval) 1729 goto end; 1730 1731 retval = ptrace_do_wait(wo, tsk); 1732 if (retval) 1733 goto end; 1734 1735 if (wo->wo_flags & __WNOTHREAD) 1736 break; 1737 } while_each_thread(current, tsk); 1738 read_unlock(&tasklist_lock); 1739 1740 notask: 1741 retval = wo->notask_error; 1742 if (!retval && !(wo->wo_flags & WNOHANG)) { 1743 retval = -ERESTARTSYS; 1744 if (!signal_pending(current)) { 1745 schedule(); 1746 goto repeat; 1747 } 1748 } 1749 end: 1750 __set_current_state(TASK_RUNNING); 1751 remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 1752 return retval; 1753 } 1754 1755 SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, 1756 infop, int, options, struct rusage __user *, ru) 1757 { 1758 struct wait_opts wo; 1759 struct pid *pid = NULL; 1760 enum pid_type type; 1761 long ret; 1762 1763 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED)) 1764 return -EINVAL; 1765 if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) 1766 return -EINVAL; 1767 1768 switch (which) { 1769 case P_ALL: 1770 type = PIDTYPE_MAX; 1771 break; 1772 case P_PID: 1773 type = PIDTYPE_PID; 1774 if (upid <= 0) 1775 return -EINVAL; 1776 break; 1777 case P_PGID: 1778 type = PIDTYPE_PGID; 1779 if (upid <= 0) 1780 return -EINVAL; 1781 break; 1782 default: 1783 return -EINVAL; 1784 } 1785 1786 if (type < PIDTYPE_MAX) 1787 pid = find_get_pid(upid); 1788 1789 wo.wo_type = type; 1790 wo.wo_pid = pid; 1791 wo.wo_flags = options; 1792 wo.wo_info = infop; 1793 wo.wo_stat = NULL; 1794 wo.wo_rusage = ru; 1795 ret = do_wait(&wo); 1796 1797 if (ret > 0) { 1798 ret = 0; 1799 } else if (infop) { 1800 /* 1801 * For a WNOHANG return, clear out all the fields 1802 * we would set so the user can easily tell the 1803 * difference. 1804 */ 1805 if (!ret) 1806 ret = put_user(0, &infop->si_signo); 1807 if (!ret) 1808 ret = put_user(0, &infop->si_errno); 1809 if (!ret) 1810 ret = put_user(0, &infop->si_code); 1811 if (!ret) 1812 ret = put_user(0, &infop->si_pid); 1813 if (!ret) 1814 ret = put_user(0, &infop->si_uid); 1815 if (!ret) 1816 ret = put_user(0, &infop->si_status); 1817 } 1818 1819 put_pid(pid); 1820 1821 /* avoid REGPARM breakage on x86: */ 1822 asmlinkage_protect(5, ret, which, upid, infop, options, ru); 1823 return ret; 1824 } 1825 1826 SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, 1827 int, options, struct rusage __user *, ru) 1828 { 1829 struct wait_opts wo; 1830 struct pid *pid = NULL; 1831 enum pid_type type; 1832 long ret; 1833 1834 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| 1835 __WNOTHREAD|__WCLONE|__WALL)) 1836 return -EINVAL; 1837 1838 if (upid == -1) 1839 type = PIDTYPE_MAX; 1840 else if (upid < 0) { 1841 type = PIDTYPE_PGID; 1842 pid = find_get_pid(-upid); 1843 } else if (upid == 0) { 1844 type = PIDTYPE_PGID; 1845 pid = get_task_pid(current, PIDTYPE_PGID); 1846 } else /* upid > 0 */ { 1847 type = PIDTYPE_PID; 1848 pid = find_get_pid(upid); 1849 } 1850 1851 wo.wo_type = type; 1852 wo.wo_pid = pid; 1853 wo.wo_flags = options | WEXITED; 1854 wo.wo_info = NULL; 1855 wo.wo_stat = stat_addr; 1856 wo.wo_rusage = ru; 1857 ret = do_wait(&wo); 1858 put_pid(pid); 1859 1860 /* avoid REGPARM breakage on x86: */ 1861 asmlinkage_protect(4, ret, upid, stat_addr, options, ru); 1862 return ret; 1863 } 1864 1865 #ifdef __ARCH_WANT_SYS_WAITPID 1866 1867 /* 1868 * sys_waitpid() remains for compatibility. waitpid() should be 1869 * implemented by calling sys_wait4() from libc.a. 1870 */ 1871 SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options) 1872 { 1873 return sys_wait4(pid, stat_addr, options, NULL); 1874 } 1875 1876 #endif 1877