1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/exit.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/mm.h> 9 #include <linux/slab.h> 10 #include <linux/sched/autogroup.h> 11 #include <linux/sched/mm.h> 12 #include <linux/sched/stat.h> 13 #include <linux/sched/task.h> 14 #include <linux/sched/task_stack.h> 15 #include <linux/sched/cputime.h> 16 #include <linux/interrupt.h> 17 #include <linux/module.h> 18 #include <linux/capability.h> 19 #include <linux/completion.h> 20 #include <linux/personality.h> 21 #include <linux/tty.h> 22 #include <linux/iocontext.h> 23 #include <linux/key.h> 24 #include <linux/cpu.h> 25 #include <linux/acct.h> 26 #include <linux/tsacct_kern.h> 27 #include <linux/file.h> 28 #include <linux/fdtable.h> 29 #include <linux/freezer.h> 30 #include <linux/binfmts.h> 31 #include <linux/nsproxy.h> 32 #include <linux/pid_namespace.h> 33 #include <linux/ptrace.h> 34 #include <linux/profile.h> 35 #include <linux/mount.h> 36 #include <linux/proc_fs.h> 37 #include <linux/kthread.h> 38 #include <linux/mempolicy.h> 39 #include <linux/taskstats_kern.h> 40 #include <linux/delayacct.h> 41 #include <linux/cgroup.h> 42 #include <linux/syscalls.h> 43 #include <linux/signal.h> 44 #include <linux/posix-timers.h> 45 #include <linux/cn_proc.h> 46 #include <linux/mutex.h> 47 #include <linux/futex.h> 48 #include <linux/pipe_fs_i.h> 49 #include <linux/audit.h> /* for audit_free() */ 50 #include <linux/resource.h> 51 #include <linux/blkdev.h> 52 #include <linux/task_io_accounting_ops.h> 53 #include <linux/tracehook.h> 54 #include <linux/fs_struct.h> 55 #include <linux/init_task.h> 56 #include <linux/perf_event.h> 57 #include <trace/events/sched.h> 58 #include <linux/hw_breakpoint.h> 59 #include <linux/oom.h> 60 #include <linux/writeback.h> 61 #include <linux/shm.h> 62 #include <linux/kcov.h> 63 #include <linux/random.h> 64 #include <linux/rcuwait.h> 65 #include <linux/compat.h> 66 67 #include <linux/uaccess.h> 68 #include <asm/unistd.h> 69 #include <asm/mmu_context.h> 70 71 static void __unhash_process(struct task_struct *p, bool group_dead) 72 { 73 nr_threads--; 74 detach_pid(p, PIDTYPE_PID); 75 if (group_dead) { 76 detach_pid(p, PIDTYPE_TGID); 77 detach_pid(p, PIDTYPE_PGID); 78 detach_pid(p, PIDTYPE_SID); 79 80 list_del_rcu(&p->tasks); 81 list_del_init(&p->sibling); 82 __this_cpu_dec(process_counts); 83 } 84 list_del_rcu(&p->thread_group); 85 list_del_rcu(&p->thread_node); 86 } 87 88 /* 89 * This function expects the tasklist_lock write-locked. 90 */ 91 static void __exit_signal(struct task_struct *tsk) 92 { 93 struct signal_struct *sig = tsk->signal; 94 bool group_dead = thread_group_leader(tsk); 95 struct sighand_struct *sighand; 96 struct tty_struct *uninitialized_var(tty); 97 u64 utime, stime; 98 99 sighand = rcu_dereference_check(tsk->sighand, 100 lockdep_tasklist_lock_is_held()); 101 spin_lock(&sighand->siglock); 102 103 #ifdef CONFIG_POSIX_TIMERS 104 posix_cpu_timers_exit(tsk); 105 if (group_dead) 106 posix_cpu_timers_exit_group(tsk); 107 #endif 108 109 if (group_dead) { 110 tty = sig->tty; 111 sig->tty = NULL; 112 } else { 113 /* 114 * If there is any task waiting for the group exit 115 * then notify it: 116 */ 117 if (sig->notify_count > 0 && !--sig->notify_count) 118 wake_up_process(sig->group_exit_task); 119 120 if (tsk == sig->curr_target) 121 sig->curr_target = next_thread(tsk); 122 } 123 124 add_device_randomness((const void*) &tsk->se.sum_exec_runtime, 125 sizeof(unsigned long long)); 126 127 /* 128 * Accumulate here the counters for all threads as they die. We could 129 * skip the group leader because it is the last user of signal_struct, 130 * but we want to avoid the race with thread_group_cputime() which can 131 * see the empty ->thread_head list. 132 */ 133 task_cputime(tsk, &utime, &stime); 134 write_seqlock(&sig->stats_lock); 135 sig->utime += utime; 136 sig->stime += stime; 137 sig->gtime += task_gtime(tsk); 138 sig->min_flt += tsk->min_flt; 139 sig->maj_flt += tsk->maj_flt; 140 sig->nvcsw += tsk->nvcsw; 141 sig->nivcsw += tsk->nivcsw; 142 sig->inblock += task_io_get_inblock(tsk); 143 sig->oublock += task_io_get_oublock(tsk); 144 task_io_accounting_add(&sig->ioac, &tsk->ioac); 145 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; 146 sig->nr_threads--; 147 __unhash_process(tsk, group_dead); 148 write_sequnlock(&sig->stats_lock); 149 150 /* 151 * Do this under ->siglock, we can race with another thread 152 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. 153 */ 154 flush_sigqueue(&tsk->pending); 155 tsk->sighand = NULL; 156 spin_unlock(&sighand->siglock); 157 158 __cleanup_sighand(sighand); 159 clear_tsk_thread_flag(tsk, TIF_SIGPENDING); 160 if (group_dead) { 161 flush_sigqueue(&sig->shared_pending); 162 tty_kref_put(tty); 163 } 164 } 165 166 static void delayed_put_task_struct(struct rcu_head *rhp) 167 { 168 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 169 170 perf_event_delayed_put(tsk); 171 trace_sched_process_free(tsk); 172 put_task_struct(tsk); 173 } 174 175 void put_task_struct_rcu_user(struct task_struct *task) 176 { 177 if (refcount_dec_and_test(&task->rcu_users)) 178 call_rcu(&task->rcu, delayed_put_task_struct); 179 } 180 181 void release_task(struct task_struct *p) 182 { 183 struct task_struct *leader; 184 struct pid *thread_pid; 185 int zap_leader; 186 repeat: 187 /* don't need to get the RCU readlock here - the process is dead and 188 * can't be modifying its own credentials. But shut RCU-lockdep up */ 189 rcu_read_lock(); 190 atomic_dec(&__task_cred(p)->user->processes); 191 rcu_read_unlock(); 192 193 cgroup_release(p); 194 195 write_lock_irq(&tasklist_lock); 196 ptrace_release_task(p); 197 thread_pid = get_pid(p->thread_pid); 198 __exit_signal(p); 199 200 /* 201 * If we are the last non-leader member of the thread 202 * group, and the leader is zombie, then notify the 203 * group leader's parent process. (if it wants notification.) 204 */ 205 zap_leader = 0; 206 leader = p->group_leader; 207 if (leader != p && thread_group_empty(leader) 208 && leader->exit_state == EXIT_ZOMBIE) { 209 /* 210 * If we were the last child thread and the leader has 211 * exited already, and the leader's parent ignores SIGCHLD, 212 * then we are the one who should release the leader. 213 */ 214 zap_leader = do_notify_parent(leader, leader->exit_signal); 215 if (zap_leader) 216 leader->exit_state = EXIT_DEAD; 217 } 218 219 write_unlock_irq(&tasklist_lock); 220 proc_flush_pid(thread_pid); 221 put_pid(thread_pid); 222 release_thread(p); 223 put_task_struct_rcu_user(p); 224 225 p = leader; 226 if (unlikely(zap_leader)) 227 goto repeat; 228 } 229 230 int rcuwait_wake_up(struct rcuwait *w) 231 { 232 int ret = 0; 233 struct task_struct *task; 234 235 rcu_read_lock(); 236 237 /* 238 * Order condition vs @task, such that everything prior to the load 239 * of @task is visible. This is the condition as to why the user called 240 * rcuwait_wake() in the first place. Pairs with set_current_state() 241 * barrier (A) in rcuwait_wait_event(). 242 * 243 * WAIT WAKE 244 * [S] tsk = current [S] cond = true 245 * MB (A) MB (B) 246 * [L] cond [L] tsk 247 */ 248 smp_mb(); /* (B) */ 249 250 task = rcu_dereference(w->task); 251 if (task) 252 ret = wake_up_process(task); 253 rcu_read_unlock(); 254 255 return ret; 256 } 257 EXPORT_SYMBOL_GPL(rcuwait_wake_up); 258 259 /* 260 * Determine if a process group is "orphaned", according to the POSIX 261 * definition in 2.2.2.52. Orphaned process groups are not to be affected 262 * by terminal-generated stop signals. Newly orphaned process groups are 263 * to receive a SIGHUP and a SIGCONT. 264 * 265 * "I ask you, have you ever known what it is to be an orphan?" 266 */ 267 static int will_become_orphaned_pgrp(struct pid *pgrp, 268 struct task_struct *ignored_task) 269 { 270 struct task_struct *p; 271 272 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 273 if ((p == ignored_task) || 274 (p->exit_state && thread_group_empty(p)) || 275 is_global_init(p->real_parent)) 276 continue; 277 278 if (task_pgrp(p->real_parent) != pgrp && 279 task_session(p->real_parent) == task_session(p)) 280 return 0; 281 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 282 283 return 1; 284 } 285 286 int is_current_pgrp_orphaned(void) 287 { 288 int retval; 289 290 read_lock(&tasklist_lock); 291 retval = will_become_orphaned_pgrp(task_pgrp(current), NULL); 292 read_unlock(&tasklist_lock); 293 294 return retval; 295 } 296 297 static bool has_stopped_jobs(struct pid *pgrp) 298 { 299 struct task_struct *p; 300 301 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 302 if (p->signal->flags & SIGNAL_STOP_STOPPED) 303 return true; 304 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 305 306 return false; 307 } 308 309 /* 310 * Check to see if any process groups have become orphaned as 311 * a result of our exiting, and if they have any stopped jobs, 312 * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 313 */ 314 static void 315 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) 316 { 317 struct pid *pgrp = task_pgrp(tsk); 318 struct task_struct *ignored_task = tsk; 319 320 if (!parent) 321 /* exit: our father is in a different pgrp than 322 * we are and we were the only connection outside. 323 */ 324 parent = tsk->real_parent; 325 else 326 /* reparent: our child is in a different pgrp than 327 * we are, and it was the only connection outside. 328 */ 329 ignored_task = NULL; 330 331 if (task_pgrp(parent) != pgrp && 332 task_session(parent) == task_session(tsk) && 333 will_become_orphaned_pgrp(pgrp, ignored_task) && 334 has_stopped_jobs(pgrp)) { 335 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); 336 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); 337 } 338 } 339 340 #ifdef CONFIG_MEMCG 341 /* 342 * A task is exiting. If it owned this mm, find a new owner for the mm. 343 */ 344 void mm_update_next_owner(struct mm_struct *mm) 345 { 346 struct task_struct *c, *g, *p = current; 347 348 retry: 349 /* 350 * If the exiting or execing task is not the owner, it's 351 * someone else's problem. 352 */ 353 if (mm->owner != p) 354 return; 355 /* 356 * The current owner is exiting/execing and there are no other 357 * candidates. Do not leave the mm pointing to a possibly 358 * freed task structure. 359 */ 360 if (atomic_read(&mm->mm_users) <= 1) { 361 WRITE_ONCE(mm->owner, NULL); 362 return; 363 } 364 365 read_lock(&tasklist_lock); 366 /* 367 * Search in the children 368 */ 369 list_for_each_entry(c, &p->children, sibling) { 370 if (c->mm == mm) 371 goto assign_new_owner; 372 } 373 374 /* 375 * Search in the siblings 376 */ 377 list_for_each_entry(c, &p->real_parent->children, sibling) { 378 if (c->mm == mm) 379 goto assign_new_owner; 380 } 381 382 /* 383 * Search through everything else, we should not get here often. 384 */ 385 for_each_process(g) { 386 if (g->flags & PF_KTHREAD) 387 continue; 388 for_each_thread(g, c) { 389 if (c->mm == mm) 390 goto assign_new_owner; 391 if (c->mm) 392 break; 393 } 394 } 395 read_unlock(&tasklist_lock); 396 /* 397 * We found no owner yet mm_users > 1: this implies that we are 398 * most likely racing with swapoff (try_to_unuse()) or /proc or 399 * ptrace or page migration (get_task_mm()). Mark owner as NULL. 400 */ 401 WRITE_ONCE(mm->owner, NULL); 402 return; 403 404 assign_new_owner: 405 BUG_ON(c == p); 406 get_task_struct(c); 407 /* 408 * The task_lock protects c->mm from changing. 409 * We always want mm->owner->mm == mm 410 */ 411 task_lock(c); 412 /* 413 * Delay read_unlock() till we have the task_lock() 414 * to ensure that c does not slip away underneath us 415 */ 416 read_unlock(&tasklist_lock); 417 if (c->mm != mm) { 418 task_unlock(c); 419 put_task_struct(c); 420 goto retry; 421 } 422 WRITE_ONCE(mm->owner, c); 423 task_unlock(c); 424 put_task_struct(c); 425 } 426 #endif /* CONFIG_MEMCG */ 427 428 /* 429 * Turn us into a lazy TLB process if we 430 * aren't already.. 431 */ 432 static void exit_mm(void) 433 { 434 struct mm_struct *mm = current->mm; 435 struct core_state *core_state; 436 437 exit_mm_release(current, mm); 438 if (!mm) 439 return; 440 sync_mm_rss(mm); 441 /* 442 * Serialize with any possible pending coredump. 443 * We must hold mmap_lock around checking core_state 444 * and clearing tsk->mm. The core-inducing thread 445 * will increment ->nr_threads for each thread in the 446 * group with ->mm != NULL. 447 */ 448 mmap_read_lock(mm); 449 core_state = mm->core_state; 450 if (core_state) { 451 struct core_thread self; 452 453 mmap_read_unlock(mm); 454 455 self.task = current; 456 self.next = xchg(&core_state->dumper.next, &self); 457 /* 458 * Implies mb(), the result of xchg() must be visible 459 * to core_state->dumper. 460 */ 461 if (atomic_dec_and_test(&core_state->nr_threads)) 462 complete(&core_state->startup); 463 464 for (;;) { 465 set_current_state(TASK_UNINTERRUPTIBLE); 466 if (!self.task) /* see coredump_finish() */ 467 break; 468 freezable_schedule(); 469 } 470 __set_current_state(TASK_RUNNING); 471 mmap_read_lock(mm); 472 } 473 mmgrab(mm); 474 BUG_ON(mm != current->active_mm); 475 /* more a memory barrier than a real lock */ 476 task_lock(current); 477 current->mm = NULL; 478 mmap_read_unlock(mm); 479 enter_lazy_tlb(mm, current); 480 task_unlock(current); 481 mm_update_next_owner(mm); 482 mmput(mm); 483 if (test_thread_flag(TIF_MEMDIE)) 484 exit_oom_victim(); 485 } 486 487 static struct task_struct *find_alive_thread(struct task_struct *p) 488 { 489 struct task_struct *t; 490 491 for_each_thread(p, t) { 492 if (!(t->flags & PF_EXITING)) 493 return t; 494 } 495 return NULL; 496 } 497 498 static struct task_struct *find_child_reaper(struct task_struct *father, 499 struct list_head *dead) 500 __releases(&tasklist_lock) 501 __acquires(&tasklist_lock) 502 { 503 struct pid_namespace *pid_ns = task_active_pid_ns(father); 504 struct task_struct *reaper = pid_ns->child_reaper; 505 struct task_struct *p, *n; 506 507 if (likely(reaper != father)) 508 return reaper; 509 510 reaper = find_alive_thread(father); 511 if (reaper) { 512 pid_ns->child_reaper = reaper; 513 return reaper; 514 } 515 516 write_unlock_irq(&tasklist_lock); 517 518 list_for_each_entry_safe(p, n, dead, ptrace_entry) { 519 list_del_init(&p->ptrace_entry); 520 release_task(p); 521 } 522 523 zap_pid_ns_processes(pid_ns); 524 write_lock_irq(&tasklist_lock); 525 526 return father; 527 } 528 529 /* 530 * When we die, we re-parent all our children, and try to: 531 * 1. give them to another thread in our thread group, if such a member exists 532 * 2. give it to the first ancestor process which prctl'd itself as a 533 * child_subreaper for its children (like a service manager) 534 * 3. give it to the init process (PID 1) in our pid namespace 535 */ 536 static struct task_struct *find_new_reaper(struct task_struct *father, 537 struct task_struct *child_reaper) 538 { 539 struct task_struct *thread, *reaper; 540 541 thread = find_alive_thread(father); 542 if (thread) 543 return thread; 544 545 if (father->signal->has_child_subreaper) { 546 unsigned int ns_level = task_pid(father)->level; 547 /* 548 * Find the first ->is_child_subreaper ancestor in our pid_ns. 549 * We can't check reaper != child_reaper to ensure we do not 550 * cross the namespaces, the exiting parent could be injected 551 * by setns() + fork(). 552 * We check pid->level, this is slightly more efficient than 553 * task_active_pid_ns(reaper) != task_active_pid_ns(father). 554 */ 555 for (reaper = father->real_parent; 556 task_pid(reaper)->level == ns_level; 557 reaper = reaper->real_parent) { 558 if (reaper == &init_task) 559 break; 560 if (!reaper->signal->is_child_subreaper) 561 continue; 562 thread = find_alive_thread(reaper); 563 if (thread) 564 return thread; 565 } 566 } 567 568 return child_reaper; 569 } 570 571 /* 572 * Any that need to be release_task'd are put on the @dead list. 573 */ 574 static void reparent_leader(struct task_struct *father, struct task_struct *p, 575 struct list_head *dead) 576 { 577 if (unlikely(p->exit_state == EXIT_DEAD)) 578 return; 579 580 /* We don't want people slaying init. */ 581 p->exit_signal = SIGCHLD; 582 583 /* If it has exited notify the new parent about this child's death. */ 584 if (!p->ptrace && 585 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { 586 if (do_notify_parent(p, p->exit_signal)) { 587 p->exit_state = EXIT_DEAD; 588 list_add(&p->ptrace_entry, dead); 589 } 590 } 591 592 kill_orphaned_pgrp(p, father); 593 } 594 595 /* 596 * This does two things: 597 * 598 * A. Make init inherit all the child processes 599 * B. Check to see if any process groups have become orphaned 600 * as a result of our exiting, and if they have any stopped 601 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 602 */ 603 static void forget_original_parent(struct task_struct *father, 604 struct list_head *dead) 605 { 606 struct task_struct *p, *t, *reaper; 607 608 if (unlikely(!list_empty(&father->ptraced))) 609 exit_ptrace(father, dead); 610 611 /* Can drop and reacquire tasklist_lock */ 612 reaper = find_child_reaper(father, dead); 613 if (list_empty(&father->children)) 614 return; 615 616 reaper = find_new_reaper(father, reaper); 617 list_for_each_entry(p, &father->children, sibling) { 618 for_each_thread(p, t) { 619 RCU_INIT_POINTER(t->real_parent, reaper); 620 BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father)); 621 if (likely(!t->ptrace)) 622 t->parent = t->real_parent; 623 if (t->pdeath_signal) 624 group_send_sig_info(t->pdeath_signal, 625 SEND_SIG_NOINFO, t, 626 PIDTYPE_TGID); 627 } 628 /* 629 * If this is a threaded reparent there is no need to 630 * notify anyone anything has happened. 631 */ 632 if (!same_thread_group(reaper, father)) 633 reparent_leader(father, p, dead); 634 } 635 list_splice_tail_init(&father->children, &reaper->children); 636 } 637 638 /* 639 * Send signals to all our closest relatives so that they know 640 * to properly mourn us.. 641 */ 642 static void exit_notify(struct task_struct *tsk, int group_dead) 643 { 644 bool autoreap; 645 struct task_struct *p, *n; 646 LIST_HEAD(dead); 647 648 write_lock_irq(&tasklist_lock); 649 forget_original_parent(tsk, &dead); 650 651 if (group_dead) 652 kill_orphaned_pgrp(tsk->group_leader, NULL); 653 654 tsk->exit_state = EXIT_ZOMBIE; 655 if (unlikely(tsk->ptrace)) { 656 int sig = thread_group_leader(tsk) && 657 thread_group_empty(tsk) && 658 !ptrace_reparented(tsk) ? 659 tsk->exit_signal : SIGCHLD; 660 autoreap = do_notify_parent(tsk, sig); 661 } else if (thread_group_leader(tsk)) { 662 autoreap = thread_group_empty(tsk) && 663 do_notify_parent(tsk, tsk->exit_signal); 664 } else { 665 autoreap = true; 666 } 667 668 if (autoreap) { 669 tsk->exit_state = EXIT_DEAD; 670 list_add(&tsk->ptrace_entry, &dead); 671 } 672 673 /* mt-exec, de_thread() is waiting for group leader */ 674 if (unlikely(tsk->signal->notify_count < 0)) 675 wake_up_process(tsk->signal->group_exit_task); 676 write_unlock_irq(&tasklist_lock); 677 678 list_for_each_entry_safe(p, n, &dead, ptrace_entry) { 679 list_del_init(&p->ptrace_entry); 680 release_task(p); 681 } 682 } 683 684 #ifdef CONFIG_DEBUG_STACK_USAGE 685 static void check_stack_usage(void) 686 { 687 static DEFINE_SPINLOCK(low_water_lock); 688 static int lowest_to_date = THREAD_SIZE; 689 unsigned long free; 690 691 free = stack_not_used(current); 692 693 if (free >= lowest_to_date) 694 return; 695 696 spin_lock(&low_water_lock); 697 if (free < lowest_to_date) { 698 pr_info("%s (%d) used greatest stack depth: %lu bytes left\n", 699 current->comm, task_pid_nr(current), free); 700 lowest_to_date = free; 701 } 702 spin_unlock(&low_water_lock); 703 } 704 #else 705 static inline void check_stack_usage(void) {} 706 #endif 707 708 void __noreturn do_exit(long code) 709 { 710 struct task_struct *tsk = current; 711 int group_dead; 712 713 /* 714 * We can get here from a kernel oops, sometimes with preemption off. 715 * Start by checking for critical errors. 716 * Then fix up important state like USER_DS and preemption. 717 * Then do everything else. 718 */ 719 720 WARN_ON(blk_needs_flush_plug(tsk)); 721 722 if (unlikely(in_interrupt())) 723 panic("Aiee, killing interrupt handler!"); 724 if (unlikely(!tsk->pid)) 725 panic("Attempted to kill the idle task!"); 726 727 /* 728 * If do_exit is called because this processes oopsed, it's possible 729 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before 730 * continuing. Amongst other possible reasons, this is to prevent 731 * mm_release()->clear_child_tid() from writing to a user-controlled 732 * kernel address. 733 */ 734 set_fs(USER_DS); 735 736 if (unlikely(in_atomic())) { 737 pr_info("note: %s[%d] exited with preempt_count %d\n", 738 current->comm, task_pid_nr(current), 739 preempt_count()); 740 preempt_count_set(PREEMPT_ENABLED); 741 } 742 743 profile_task_exit(tsk); 744 kcov_task_exit(tsk); 745 746 ptrace_event(PTRACE_EVENT_EXIT, code); 747 748 validate_creds_for_do_exit(tsk); 749 750 /* 751 * We're taking recursive faults here in do_exit. Safest is to just 752 * leave this task alone and wait for reboot. 753 */ 754 if (unlikely(tsk->flags & PF_EXITING)) { 755 pr_alert("Fixing recursive fault but reboot is needed!\n"); 756 futex_exit_recursive(tsk); 757 set_current_state(TASK_UNINTERRUPTIBLE); 758 schedule(); 759 } 760 761 exit_signals(tsk); /* sets PF_EXITING */ 762 763 /* sync mm's RSS info before statistics gathering */ 764 if (tsk->mm) 765 sync_mm_rss(tsk->mm); 766 acct_update_integrals(tsk); 767 group_dead = atomic_dec_and_test(&tsk->signal->live); 768 if (group_dead) { 769 /* 770 * If the last thread of global init has exited, panic 771 * immediately to get a useable coredump. 772 */ 773 if (unlikely(is_global_init(tsk))) 774 panic("Attempted to kill init! exitcode=0x%08x\n", 775 tsk->signal->group_exit_code ?: (int)code); 776 777 #ifdef CONFIG_POSIX_TIMERS 778 hrtimer_cancel(&tsk->signal->real_timer); 779 exit_itimers(tsk->signal); 780 #endif 781 if (tsk->mm) 782 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); 783 } 784 acct_collect(code, group_dead); 785 if (group_dead) 786 tty_audit_exit(); 787 audit_free(tsk); 788 789 tsk->exit_code = code; 790 taskstats_exit(tsk, group_dead); 791 792 exit_mm(); 793 794 if (group_dead) 795 acct_process(); 796 trace_sched_process_exit(tsk); 797 798 exit_sem(tsk); 799 exit_shm(tsk); 800 exit_files(tsk); 801 exit_fs(tsk); 802 if (group_dead) 803 disassociate_ctty(1); 804 exit_task_namespaces(tsk); 805 exit_task_work(tsk); 806 exit_thread(tsk); 807 808 /* 809 * Flush inherited counters to the parent - before the parent 810 * gets woken up by child-exit notifications. 811 * 812 * because of cgroup mode, must be called before cgroup_exit() 813 */ 814 perf_event_exit_task(tsk); 815 816 sched_autogroup_exit_task(tsk); 817 cgroup_exit(tsk); 818 819 /* 820 * FIXME: do that only when needed, using sched_exit tracepoint 821 */ 822 flush_ptrace_hw_breakpoint(tsk); 823 824 exit_tasks_rcu_start(); 825 exit_notify(tsk, group_dead); 826 proc_exit_connector(tsk); 827 mpol_put_task_policy(tsk); 828 #ifdef CONFIG_FUTEX 829 if (unlikely(current->pi_state_cache)) 830 kfree(current->pi_state_cache); 831 #endif 832 /* 833 * Make sure we are holding no locks: 834 */ 835 debug_check_no_locks_held(); 836 837 if (tsk->io_context) 838 exit_io_context(tsk); 839 840 if (tsk->splice_pipe) 841 free_pipe_info(tsk->splice_pipe); 842 843 if (tsk->task_frag.page) 844 put_page(tsk->task_frag.page); 845 846 validate_creds_for_do_exit(tsk); 847 848 check_stack_usage(); 849 preempt_disable(); 850 if (tsk->nr_dirtied) 851 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); 852 exit_rcu(); 853 exit_tasks_rcu_finish(); 854 855 lockdep_free_task(tsk); 856 do_task_dead(); 857 } 858 EXPORT_SYMBOL_GPL(do_exit); 859 860 void complete_and_exit(struct completion *comp, long code) 861 { 862 if (comp) 863 complete(comp); 864 865 do_exit(code); 866 } 867 EXPORT_SYMBOL(complete_and_exit); 868 869 SYSCALL_DEFINE1(exit, int, error_code) 870 { 871 do_exit((error_code&0xff)<<8); 872 } 873 874 /* 875 * Take down every thread in the group. This is called by fatal signals 876 * as well as by sys_exit_group (below). 877 */ 878 void 879 do_group_exit(int exit_code) 880 { 881 struct signal_struct *sig = current->signal; 882 883 BUG_ON(exit_code & 0x80); /* core dumps don't get here */ 884 885 if (signal_group_exit(sig)) 886 exit_code = sig->group_exit_code; 887 else if (!thread_group_empty(current)) { 888 struct sighand_struct *const sighand = current->sighand; 889 890 spin_lock_irq(&sighand->siglock); 891 if (signal_group_exit(sig)) 892 /* Another thread got here before we took the lock. */ 893 exit_code = sig->group_exit_code; 894 else { 895 sig->group_exit_code = exit_code; 896 sig->flags = SIGNAL_GROUP_EXIT; 897 zap_other_threads(current); 898 } 899 spin_unlock_irq(&sighand->siglock); 900 } 901 902 do_exit(exit_code); 903 /* NOTREACHED */ 904 } 905 906 /* 907 * this kills every thread in the thread group. Note that any externally 908 * wait4()-ing process will get the correct exit code - even if this 909 * thread is not the thread group leader. 910 */ 911 SYSCALL_DEFINE1(exit_group, int, error_code) 912 { 913 do_group_exit((error_code & 0xff) << 8); 914 /* NOTREACHED */ 915 return 0; 916 } 917 918 struct waitid_info { 919 pid_t pid; 920 uid_t uid; 921 int status; 922 int cause; 923 }; 924 925 struct wait_opts { 926 enum pid_type wo_type; 927 int wo_flags; 928 struct pid *wo_pid; 929 930 struct waitid_info *wo_info; 931 int wo_stat; 932 struct rusage *wo_rusage; 933 934 wait_queue_entry_t child_wait; 935 int notask_error; 936 }; 937 938 static int eligible_pid(struct wait_opts *wo, struct task_struct *p) 939 { 940 return wo->wo_type == PIDTYPE_MAX || 941 task_pid_type(p, wo->wo_type) == wo->wo_pid; 942 } 943 944 static int 945 eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p) 946 { 947 if (!eligible_pid(wo, p)) 948 return 0; 949 950 /* 951 * Wait for all children (clone and not) if __WALL is set or 952 * if it is traced by us. 953 */ 954 if (ptrace || (wo->wo_flags & __WALL)) 955 return 1; 956 957 /* 958 * Otherwise, wait for clone children *only* if __WCLONE is set; 959 * otherwise, wait for non-clone children *only*. 960 * 961 * Note: a "clone" child here is one that reports to its parent 962 * using a signal other than SIGCHLD, or a non-leader thread which 963 * we can only see if it is traced by us. 964 */ 965 if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) 966 return 0; 967 968 return 1; 969 } 970 971 /* 972 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold 973 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 974 * the lock and this task is uninteresting. If we return nonzero, we have 975 * released the lock and the system call should return. 976 */ 977 static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) 978 { 979 int state, status; 980 pid_t pid = task_pid_vnr(p); 981 uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p)); 982 struct waitid_info *infop; 983 984 if (!likely(wo->wo_flags & WEXITED)) 985 return 0; 986 987 if (unlikely(wo->wo_flags & WNOWAIT)) { 988 status = p->exit_code; 989 get_task_struct(p); 990 read_unlock(&tasklist_lock); 991 sched_annotate_sleep(); 992 if (wo->wo_rusage) 993 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 994 put_task_struct(p); 995 goto out_info; 996 } 997 /* 998 * Move the task's state to DEAD/TRACE, only one thread can do this. 999 */ 1000 state = (ptrace_reparented(p) && thread_group_leader(p)) ? 1001 EXIT_TRACE : EXIT_DEAD; 1002 if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE) 1003 return 0; 1004 /* 1005 * We own this thread, nobody else can reap it. 1006 */ 1007 read_unlock(&tasklist_lock); 1008 sched_annotate_sleep(); 1009 1010 /* 1011 * Check thread_group_leader() to exclude the traced sub-threads. 1012 */ 1013 if (state == EXIT_DEAD && thread_group_leader(p)) { 1014 struct signal_struct *sig = p->signal; 1015 struct signal_struct *psig = current->signal; 1016 unsigned long maxrss; 1017 u64 tgutime, tgstime; 1018 1019 /* 1020 * The resource counters for the group leader are in its 1021 * own task_struct. Those for dead threads in the group 1022 * are in its signal_struct, as are those for the child 1023 * processes it has previously reaped. All these 1024 * accumulate in the parent's signal_struct c* fields. 1025 * 1026 * We don't bother to take a lock here to protect these 1027 * p->signal fields because the whole thread group is dead 1028 * and nobody can change them. 1029 * 1030 * psig->stats_lock also protects us from our sub-theads 1031 * which can reap other children at the same time. Until 1032 * we change k_getrusage()-like users to rely on this lock 1033 * we have to take ->siglock as well. 1034 * 1035 * We use thread_group_cputime_adjusted() to get times for 1036 * the thread group, which consolidates times for all threads 1037 * in the group including the group leader. 1038 */ 1039 thread_group_cputime_adjusted(p, &tgutime, &tgstime); 1040 spin_lock_irq(¤t->sighand->siglock); 1041 write_seqlock(&psig->stats_lock); 1042 psig->cutime += tgutime + sig->cutime; 1043 psig->cstime += tgstime + sig->cstime; 1044 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime; 1045 psig->cmin_flt += 1046 p->min_flt + sig->min_flt + sig->cmin_flt; 1047 psig->cmaj_flt += 1048 p->maj_flt + sig->maj_flt + sig->cmaj_flt; 1049 psig->cnvcsw += 1050 p->nvcsw + sig->nvcsw + sig->cnvcsw; 1051 psig->cnivcsw += 1052 p->nivcsw + sig->nivcsw + sig->cnivcsw; 1053 psig->cinblock += 1054 task_io_get_inblock(p) + 1055 sig->inblock + sig->cinblock; 1056 psig->coublock += 1057 task_io_get_oublock(p) + 1058 sig->oublock + sig->coublock; 1059 maxrss = max(sig->maxrss, sig->cmaxrss); 1060 if (psig->cmaxrss < maxrss) 1061 psig->cmaxrss = maxrss; 1062 task_io_accounting_add(&psig->ioac, &p->ioac); 1063 task_io_accounting_add(&psig->ioac, &sig->ioac); 1064 write_sequnlock(&psig->stats_lock); 1065 spin_unlock_irq(¤t->sighand->siglock); 1066 } 1067 1068 if (wo->wo_rusage) 1069 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1070 status = (p->signal->flags & SIGNAL_GROUP_EXIT) 1071 ? p->signal->group_exit_code : p->exit_code; 1072 wo->wo_stat = status; 1073 1074 if (state == EXIT_TRACE) { 1075 write_lock_irq(&tasklist_lock); 1076 /* We dropped tasklist, ptracer could die and untrace */ 1077 ptrace_unlink(p); 1078 1079 /* If parent wants a zombie, don't release it now */ 1080 state = EXIT_ZOMBIE; 1081 if (do_notify_parent(p, p->exit_signal)) 1082 state = EXIT_DEAD; 1083 p->exit_state = state; 1084 write_unlock_irq(&tasklist_lock); 1085 } 1086 if (state == EXIT_DEAD) 1087 release_task(p); 1088 1089 out_info: 1090 infop = wo->wo_info; 1091 if (infop) { 1092 if ((status & 0x7f) == 0) { 1093 infop->cause = CLD_EXITED; 1094 infop->status = status >> 8; 1095 } else { 1096 infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; 1097 infop->status = status & 0x7f; 1098 } 1099 infop->pid = pid; 1100 infop->uid = uid; 1101 } 1102 1103 return pid; 1104 } 1105 1106 static int *task_stopped_code(struct task_struct *p, bool ptrace) 1107 { 1108 if (ptrace) { 1109 if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING)) 1110 return &p->exit_code; 1111 } else { 1112 if (p->signal->flags & SIGNAL_STOP_STOPPED) 1113 return &p->signal->group_exit_code; 1114 } 1115 return NULL; 1116 } 1117 1118 /** 1119 * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED 1120 * @wo: wait options 1121 * @ptrace: is the wait for ptrace 1122 * @p: task to wait for 1123 * 1124 * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED. 1125 * 1126 * CONTEXT: 1127 * read_lock(&tasklist_lock), which is released if return value is 1128 * non-zero. Also, grabs and releases @p->sighand->siglock. 1129 * 1130 * RETURNS: 1131 * 0 if wait condition didn't exist and search for other wait conditions 1132 * should continue. Non-zero return, -errno on failure and @p's pid on 1133 * success, implies that tasklist_lock is released and wait condition 1134 * search should terminate. 1135 */ 1136 static int wait_task_stopped(struct wait_opts *wo, 1137 int ptrace, struct task_struct *p) 1138 { 1139 struct waitid_info *infop; 1140 int exit_code, *p_code, why; 1141 uid_t uid = 0; /* unneeded, required by compiler */ 1142 pid_t pid; 1143 1144 /* 1145 * Traditionally we see ptrace'd stopped tasks regardless of options. 1146 */ 1147 if (!ptrace && !(wo->wo_flags & WUNTRACED)) 1148 return 0; 1149 1150 if (!task_stopped_code(p, ptrace)) 1151 return 0; 1152 1153 exit_code = 0; 1154 spin_lock_irq(&p->sighand->siglock); 1155 1156 p_code = task_stopped_code(p, ptrace); 1157 if (unlikely(!p_code)) 1158 goto unlock_sig; 1159 1160 exit_code = *p_code; 1161 if (!exit_code) 1162 goto unlock_sig; 1163 1164 if (!unlikely(wo->wo_flags & WNOWAIT)) 1165 *p_code = 0; 1166 1167 uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1168 unlock_sig: 1169 spin_unlock_irq(&p->sighand->siglock); 1170 if (!exit_code) 1171 return 0; 1172 1173 /* 1174 * Now we are pretty sure this task is interesting. 1175 * Make sure it doesn't get reaped out from under us while we 1176 * give up the lock and then examine it below. We don't want to 1177 * keep holding onto the tasklist_lock while we call getrusage and 1178 * possibly take page faults for user memory. 1179 */ 1180 get_task_struct(p); 1181 pid = task_pid_vnr(p); 1182 why = ptrace ? CLD_TRAPPED : CLD_STOPPED; 1183 read_unlock(&tasklist_lock); 1184 sched_annotate_sleep(); 1185 if (wo->wo_rusage) 1186 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1187 put_task_struct(p); 1188 1189 if (likely(!(wo->wo_flags & WNOWAIT))) 1190 wo->wo_stat = (exit_code << 8) | 0x7f; 1191 1192 infop = wo->wo_info; 1193 if (infop) { 1194 infop->cause = why; 1195 infop->status = exit_code; 1196 infop->pid = pid; 1197 infop->uid = uid; 1198 } 1199 return pid; 1200 } 1201 1202 /* 1203 * Handle do_wait work for one task in a live, non-stopped state. 1204 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1205 * the lock and this task is uninteresting. If we return nonzero, we have 1206 * released the lock and the system call should return. 1207 */ 1208 static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) 1209 { 1210 struct waitid_info *infop; 1211 pid_t pid; 1212 uid_t uid; 1213 1214 if (!unlikely(wo->wo_flags & WCONTINUED)) 1215 return 0; 1216 1217 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) 1218 return 0; 1219 1220 spin_lock_irq(&p->sighand->siglock); 1221 /* Re-check with the lock held. */ 1222 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { 1223 spin_unlock_irq(&p->sighand->siglock); 1224 return 0; 1225 } 1226 if (!unlikely(wo->wo_flags & WNOWAIT)) 1227 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; 1228 uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1229 spin_unlock_irq(&p->sighand->siglock); 1230 1231 pid = task_pid_vnr(p); 1232 get_task_struct(p); 1233 read_unlock(&tasklist_lock); 1234 sched_annotate_sleep(); 1235 if (wo->wo_rusage) 1236 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1237 put_task_struct(p); 1238 1239 infop = wo->wo_info; 1240 if (!infop) { 1241 wo->wo_stat = 0xffff; 1242 } else { 1243 infop->cause = CLD_CONTINUED; 1244 infop->pid = pid; 1245 infop->uid = uid; 1246 infop->status = SIGCONT; 1247 } 1248 return pid; 1249 } 1250 1251 /* 1252 * Consider @p for a wait by @parent. 1253 * 1254 * -ECHILD should be in ->notask_error before the first call. 1255 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1256 * Returns zero if the search for a child should continue; 1257 * then ->notask_error is 0 if @p is an eligible child, 1258 * or still -ECHILD. 1259 */ 1260 static int wait_consider_task(struct wait_opts *wo, int ptrace, 1261 struct task_struct *p) 1262 { 1263 /* 1264 * We can race with wait_task_zombie() from another thread. 1265 * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition 1266 * can't confuse the checks below. 1267 */ 1268 int exit_state = READ_ONCE(p->exit_state); 1269 int ret; 1270 1271 if (unlikely(exit_state == EXIT_DEAD)) 1272 return 0; 1273 1274 ret = eligible_child(wo, ptrace, p); 1275 if (!ret) 1276 return ret; 1277 1278 if (unlikely(exit_state == EXIT_TRACE)) { 1279 /* 1280 * ptrace == 0 means we are the natural parent. In this case 1281 * we should clear notask_error, debugger will notify us. 1282 */ 1283 if (likely(!ptrace)) 1284 wo->notask_error = 0; 1285 return 0; 1286 } 1287 1288 if (likely(!ptrace) && unlikely(p->ptrace)) { 1289 /* 1290 * If it is traced by its real parent's group, just pretend 1291 * the caller is ptrace_do_wait() and reap this child if it 1292 * is zombie. 1293 * 1294 * This also hides group stop state from real parent; otherwise 1295 * a single stop can be reported twice as group and ptrace stop. 1296 * If a ptracer wants to distinguish these two events for its 1297 * own children it should create a separate process which takes 1298 * the role of real parent. 1299 */ 1300 if (!ptrace_reparented(p)) 1301 ptrace = 1; 1302 } 1303 1304 /* slay zombie? */ 1305 if (exit_state == EXIT_ZOMBIE) { 1306 /* we don't reap group leaders with subthreads */ 1307 if (!delay_group_leader(p)) { 1308 /* 1309 * A zombie ptracee is only visible to its ptracer. 1310 * Notification and reaping will be cascaded to the 1311 * real parent when the ptracer detaches. 1312 */ 1313 if (unlikely(ptrace) || likely(!p->ptrace)) 1314 return wait_task_zombie(wo, p); 1315 } 1316 1317 /* 1318 * Allow access to stopped/continued state via zombie by 1319 * falling through. Clearing of notask_error is complex. 1320 * 1321 * When !@ptrace: 1322 * 1323 * If WEXITED is set, notask_error should naturally be 1324 * cleared. If not, subset of WSTOPPED|WCONTINUED is set, 1325 * so, if there are live subthreads, there are events to 1326 * wait for. If all subthreads are dead, it's still safe 1327 * to clear - this function will be called again in finite 1328 * amount time once all the subthreads are released and 1329 * will then return without clearing. 1330 * 1331 * When @ptrace: 1332 * 1333 * Stopped state is per-task and thus can't change once the 1334 * target task dies. Only continued and exited can happen. 1335 * Clear notask_error if WCONTINUED | WEXITED. 1336 */ 1337 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) 1338 wo->notask_error = 0; 1339 } else { 1340 /* 1341 * @p is alive and it's gonna stop, continue or exit, so 1342 * there always is something to wait for. 1343 */ 1344 wo->notask_error = 0; 1345 } 1346 1347 /* 1348 * Wait for stopped. Depending on @ptrace, different stopped state 1349 * is used and the two don't interact with each other. 1350 */ 1351 ret = wait_task_stopped(wo, ptrace, p); 1352 if (ret) 1353 return ret; 1354 1355 /* 1356 * Wait for continued. There's only one continued state and the 1357 * ptracer can consume it which can confuse the real parent. Don't 1358 * use WCONTINUED from ptracer. You don't need or want it. 1359 */ 1360 return wait_task_continued(wo, p); 1361 } 1362 1363 /* 1364 * Do the work of do_wait() for one thread in the group, @tsk. 1365 * 1366 * -ECHILD should be in ->notask_error before the first call. 1367 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1368 * Returns zero if the search for a child should continue; then 1369 * ->notask_error is 0 if there were any eligible children, 1370 * or still -ECHILD. 1371 */ 1372 static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) 1373 { 1374 struct task_struct *p; 1375 1376 list_for_each_entry(p, &tsk->children, sibling) { 1377 int ret = wait_consider_task(wo, 0, p); 1378 1379 if (ret) 1380 return ret; 1381 } 1382 1383 return 0; 1384 } 1385 1386 static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) 1387 { 1388 struct task_struct *p; 1389 1390 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { 1391 int ret = wait_consider_task(wo, 1, p); 1392 1393 if (ret) 1394 return ret; 1395 } 1396 1397 return 0; 1398 } 1399 1400 static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode, 1401 int sync, void *key) 1402 { 1403 struct wait_opts *wo = container_of(wait, struct wait_opts, 1404 child_wait); 1405 struct task_struct *p = key; 1406 1407 if (!eligible_pid(wo, p)) 1408 return 0; 1409 1410 if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent) 1411 return 0; 1412 1413 return default_wake_function(wait, mode, sync, key); 1414 } 1415 1416 void __wake_up_parent(struct task_struct *p, struct task_struct *parent) 1417 { 1418 __wake_up_sync_key(&parent->signal->wait_chldexit, 1419 TASK_INTERRUPTIBLE, p); 1420 } 1421 1422 static long do_wait(struct wait_opts *wo) 1423 { 1424 struct task_struct *tsk; 1425 int retval; 1426 1427 trace_sched_process_wait(wo->wo_pid); 1428 1429 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback); 1430 wo->child_wait.private = current; 1431 add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 1432 repeat: 1433 /* 1434 * If there is nothing that can match our criteria, just get out. 1435 * We will clear ->notask_error to zero if we see any child that 1436 * might later match our criteria, even if we are not able to reap 1437 * it yet. 1438 */ 1439 wo->notask_error = -ECHILD; 1440 if ((wo->wo_type < PIDTYPE_MAX) && 1441 (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type))) 1442 goto notask; 1443 1444 set_current_state(TASK_INTERRUPTIBLE); 1445 read_lock(&tasklist_lock); 1446 tsk = current; 1447 do { 1448 retval = do_wait_thread(wo, tsk); 1449 if (retval) 1450 goto end; 1451 1452 retval = ptrace_do_wait(wo, tsk); 1453 if (retval) 1454 goto end; 1455 1456 if (wo->wo_flags & __WNOTHREAD) 1457 break; 1458 } while_each_thread(current, tsk); 1459 read_unlock(&tasklist_lock); 1460 1461 notask: 1462 retval = wo->notask_error; 1463 if (!retval && !(wo->wo_flags & WNOHANG)) { 1464 retval = -ERESTARTSYS; 1465 if (!signal_pending(current)) { 1466 schedule(); 1467 goto repeat; 1468 } 1469 } 1470 end: 1471 __set_current_state(TASK_RUNNING); 1472 remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 1473 return retval; 1474 } 1475 1476 static struct pid *pidfd_get_pid(unsigned int fd) 1477 { 1478 struct fd f; 1479 struct pid *pid; 1480 1481 f = fdget(fd); 1482 if (!f.file) 1483 return ERR_PTR(-EBADF); 1484 1485 pid = pidfd_pid(f.file); 1486 if (!IS_ERR(pid)) 1487 get_pid(pid); 1488 1489 fdput(f); 1490 return pid; 1491 } 1492 1493 static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop, 1494 int options, struct rusage *ru) 1495 { 1496 struct wait_opts wo; 1497 struct pid *pid = NULL; 1498 enum pid_type type; 1499 long ret; 1500 1501 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED| 1502 __WNOTHREAD|__WCLONE|__WALL)) 1503 return -EINVAL; 1504 if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) 1505 return -EINVAL; 1506 1507 switch (which) { 1508 case P_ALL: 1509 type = PIDTYPE_MAX; 1510 break; 1511 case P_PID: 1512 type = PIDTYPE_PID; 1513 if (upid <= 0) 1514 return -EINVAL; 1515 1516 pid = find_get_pid(upid); 1517 break; 1518 case P_PGID: 1519 type = PIDTYPE_PGID; 1520 if (upid < 0) 1521 return -EINVAL; 1522 1523 if (upid) 1524 pid = find_get_pid(upid); 1525 else 1526 pid = get_task_pid(current, PIDTYPE_PGID); 1527 break; 1528 case P_PIDFD: 1529 type = PIDTYPE_PID; 1530 if (upid < 0) 1531 return -EINVAL; 1532 1533 pid = pidfd_get_pid(upid); 1534 if (IS_ERR(pid)) 1535 return PTR_ERR(pid); 1536 break; 1537 default: 1538 return -EINVAL; 1539 } 1540 1541 wo.wo_type = type; 1542 wo.wo_pid = pid; 1543 wo.wo_flags = options; 1544 wo.wo_info = infop; 1545 wo.wo_rusage = ru; 1546 ret = do_wait(&wo); 1547 1548 put_pid(pid); 1549 return ret; 1550 } 1551 1552 SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, 1553 infop, int, options, struct rusage __user *, ru) 1554 { 1555 struct rusage r; 1556 struct waitid_info info = {.status = 0}; 1557 long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL); 1558 int signo = 0; 1559 1560 if (err > 0) { 1561 signo = SIGCHLD; 1562 err = 0; 1563 if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) 1564 return -EFAULT; 1565 } 1566 if (!infop) 1567 return err; 1568 1569 if (!user_write_access_begin(infop, sizeof(*infop))) 1570 return -EFAULT; 1571 1572 unsafe_put_user(signo, &infop->si_signo, Efault); 1573 unsafe_put_user(0, &infop->si_errno, Efault); 1574 unsafe_put_user(info.cause, &infop->si_code, Efault); 1575 unsafe_put_user(info.pid, &infop->si_pid, Efault); 1576 unsafe_put_user(info.uid, &infop->si_uid, Efault); 1577 unsafe_put_user(info.status, &infop->si_status, Efault); 1578 user_write_access_end(); 1579 return err; 1580 Efault: 1581 user_write_access_end(); 1582 return -EFAULT; 1583 } 1584 1585 long kernel_wait4(pid_t upid, int __user *stat_addr, int options, 1586 struct rusage *ru) 1587 { 1588 struct wait_opts wo; 1589 struct pid *pid = NULL; 1590 enum pid_type type; 1591 long ret; 1592 1593 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| 1594 __WNOTHREAD|__WCLONE|__WALL)) 1595 return -EINVAL; 1596 1597 /* -INT_MIN is not defined */ 1598 if (upid == INT_MIN) 1599 return -ESRCH; 1600 1601 if (upid == -1) 1602 type = PIDTYPE_MAX; 1603 else if (upid < 0) { 1604 type = PIDTYPE_PGID; 1605 pid = find_get_pid(-upid); 1606 } else if (upid == 0) { 1607 type = PIDTYPE_PGID; 1608 pid = get_task_pid(current, PIDTYPE_PGID); 1609 } else /* upid > 0 */ { 1610 type = PIDTYPE_PID; 1611 pid = find_get_pid(upid); 1612 } 1613 1614 wo.wo_type = type; 1615 wo.wo_pid = pid; 1616 wo.wo_flags = options | WEXITED; 1617 wo.wo_info = NULL; 1618 wo.wo_stat = 0; 1619 wo.wo_rusage = ru; 1620 ret = do_wait(&wo); 1621 put_pid(pid); 1622 if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr)) 1623 ret = -EFAULT; 1624 1625 return ret; 1626 } 1627 1628 SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, 1629 int, options, struct rusage __user *, ru) 1630 { 1631 struct rusage r; 1632 long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL); 1633 1634 if (err > 0) { 1635 if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) 1636 return -EFAULT; 1637 } 1638 return err; 1639 } 1640 1641 #ifdef __ARCH_WANT_SYS_WAITPID 1642 1643 /* 1644 * sys_waitpid() remains for compatibility. waitpid() should be 1645 * implemented by calling sys_wait4() from libc.a. 1646 */ 1647 SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options) 1648 { 1649 return kernel_wait4(pid, stat_addr, options, NULL); 1650 } 1651 1652 #endif 1653 1654 #ifdef CONFIG_COMPAT 1655 COMPAT_SYSCALL_DEFINE4(wait4, 1656 compat_pid_t, pid, 1657 compat_uint_t __user *, stat_addr, 1658 int, options, 1659 struct compat_rusage __user *, ru) 1660 { 1661 struct rusage r; 1662 long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL); 1663 if (err > 0) { 1664 if (ru && put_compat_rusage(&r, ru)) 1665 return -EFAULT; 1666 } 1667 return err; 1668 } 1669 1670 COMPAT_SYSCALL_DEFINE5(waitid, 1671 int, which, compat_pid_t, pid, 1672 struct compat_siginfo __user *, infop, int, options, 1673 struct compat_rusage __user *, uru) 1674 { 1675 struct rusage ru; 1676 struct waitid_info info = {.status = 0}; 1677 long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL); 1678 int signo = 0; 1679 if (err > 0) { 1680 signo = SIGCHLD; 1681 err = 0; 1682 if (uru) { 1683 /* kernel_waitid() overwrites everything in ru */ 1684 if (COMPAT_USE_64BIT_TIME) 1685 err = copy_to_user(uru, &ru, sizeof(ru)); 1686 else 1687 err = put_compat_rusage(&ru, uru); 1688 if (err) 1689 return -EFAULT; 1690 } 1691 } 1692 1693 if (!infop) 1694 return err; 1695 1696 if (!user_write_access_begin(infop, sizeof(*infop))) 1697 return -EFAULT; 1698 1699 unsafe_put_user(signo, &infop->si_signo, Efault); 1700 unsafe_put_user(0, &infop->si_errno, Efault); 1701 unsafe_put_user(info.cause, &infop->si_code, Efault); 1702 unsafe_put_user(info.pid, &infop->si_pid, Efault); 1703 unsafe_put_user(info.uid, &infop->si_uid, Efault); 1704 unsafe_put_user(info.status, &infop->si_status, Efault); 1705 user_write_access_end(); 1706 return err; 1707 Efault: 1708 user_write_access_end(); 1709 return -EFAULT; 1710 } 1711 #endif 1712 1713 /** 1714 * thread_group_exited - check that a thread group has exited 1715 * @pid: tgid of thread group to be checked. 1716 * 1717 * Test if the thread group represented by tgid has exited (all 1718 * threads are zombies, dead or completely gone). 1719 * 1720 * Return: true if the thread group has exited. false otherwise. 1721 */ 1722 bool thread_group_exited(struct pid *pid) 1723 { 1724 struct task_struct *task; 1725 bool exited; 1726 1727 rcu_read_lock(); 1728 task = pid_task(pid, PIDTYPE_PID); 1729 exited = !task || 1730 (READ_ONCE(task->exit_state) && thread_group_empty(task)); 1731 rcu_read_unlock(); 1732 1733 return exited; 1734 } 1735 EXPORT_SYMBOL(thread_group_exited); 1736 1737 __weak void abort(void) 1738 { 1739 BUG(); 1740 1741 /* if that doesn't kill us, halt */ 1742 panic("Oops failed to kill thread"); 1743 } 1744 EXPORT_SYMBOL(abort); 1745