1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/exit.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/mm.h> 9 #include <linux/slab.h> 10 #include <linux/sched/autogroup.h> 11 #include <linux/sched/mm.h> 12 #include <linux/sched/stat.h> 13 #include <linux/sched/task.h> 14 #include <linux/sched/task_stack.h> 15 #include <linux/sched/cputime.h> 16 #include <linux/interrupt.h> 17 #include <linux/module.h> 18 #include <linux/capability.h> 19 #include <linux/completion.h> 20 #include <linux/personality.h> 21 #include <linux/tty.h> 22 #include <linux/iocontext.h> 23 #include <linux/key.h> 24 #include <linux/cpu.h> 25 #include <linux/acct.h> 26 #include <linux/tsacct_kern.h> 27 #include <linux/file.h> 28 #include <linux/fdtable.h> 29 #include <linux/freezer.h> 30 #include <linux/binfmts.h> 31 #include <linux/nsproxy.h> 32 #include <linux/pid_namespace.h> 33 #include <linux/ptrace.h> 34 #include <linux/profile.h> 35 #include <linux/mount.h> 36 #include <linux/proc_fs.h> 37 #include <linux/kthread.h> 38 #include <linux/mempolicy.h> 39 #include <linux/taskstats_kern.h> 40 #include <linux/delayacct.h> 41 #include <linux/cgroup.h> 42 #include <linux/syscalls.h> 43 #include <linux/signal.h> 44 #include <linux/posix-timers.h> 45 #include <linux/cn_proc.h> 46 #include <linux/mutex.h> 47 #include <linux/futex.h> 48 #include <linux/pipe_fs_i.h> 49 #include <linux/audit.h> /* for audit_free() */ 50 #include <linux/resource.h> 51 #include <linux/blkdev.h> 52 #include <linux/task_io_accounting_ops.h> 53 #include <linux/tracehook.h> 54 #include <linux/fs_struct.h> 55 #include <linux/init_task.h> 56 #include <linux/perf_event.h> 57 #include <trace/events/sched.h> 58 #include <linux/hw_breakpoint.h> 59 #include <linux/oom.h> 60 #include <linux/writeback.h> 61 #include <linux/shm.h> 62 #include <linux/kcov.h> 63 #include <linux/random.h> 64 #include <linux/rcuwait.h> 65 #include <linux/compat.h> 66 67 #include <linux/uaccess.h> 68 #include <asm/unistd.h> 69 #include <asm/mmu_context.h> 70 71 static void __unhash_process(struct task_struct *p, bool group_dead) 72 { 73 nr_threads--; 74 detach_pid(p, PIDTYPE_PID); 75 if (group_dead) { 76 detach_pid(p, PIDTYPE_TGID); 77 detach_pid(p, PIDTYPE_PGID); 78 detach_pid(p, PIDTYPE_SID); 79 80 list_del_rcu(&p->tasks); 81 list_del_init(&p->sibling); 82 __this_cpu_dec(process_counts); 83 } 84 list_del_rcu(&p->thread_group); 85 list_del_rcu(&p->thread_node); 86 } 87 88 /* 89 * This function expects the tasklist_lock write-locked. 90 */ 91 static void __exit_signal(struct task_struct *tsk) 92 { 93 struct signal_struct *sig = tsk->signal; 94 bool group_dead = thread_group_leader(tsk); 95 struct sighand_struct *sighand; 96 struct tty_struct *tty; 97 u64 utime, stime; 98 99 sighand = rcu_dereference_check(tsk->sighand, 100 lockdep_tasklist_lock_is_held()); 101 spin_lock(&sighand->siglock); 102 103 #ifdef CONFIG_POSIX_TIMERS 104 posix_cpu_timers_exit(tsk); 105 if (group_dead) 106 posix_cpu_timers_exit_group(tsk); 107 #endif 108 109 if (group_dead) { 110 tty = sig->tty; 111 sig->tty = NULL; 112 } else { 113 /* 114 * If there is any task waiting for the group exit 115 * then notify it: 116 */ 117 if (sig->notify_count > 0 && !--sig->notify_count) 118 wake_up_process(sig->group_exit_task); 119 120 if (tsk == sig->curr_target) 121 sig->curr_target = next_thread(tsk); 122 } 123 124 add_device_randomness((const void*) &tsk->se.sum_exec_runtime, 125 sizeof(unsigned long long)); 126 127 /* 128 * Accumulate here the counters for all threads as they die. We could 129 * skip the group leader because it is the last user of signal_struct, 130 * but we want to avoid the race with thread_group_cputime() which can 131 * see the empty ->thread_head list. 132 */ 133 task_cputime(tsk, &utime, &stime); 134 write_seqlock(&sig->stats_lock); 135 sig->utime += utime; 136 sig->stime += stime; 137 sig->gtime += task_gtime(tsk); 138 sig->min_flt += tsk->min_flt; 139 sig->maj_flt += tsk->maj_flt; 140 sig->nvcsw += tsk->nvcsw; 141 sig->nivcsw += tsk->nivcsw; 142 sig->inblock += task_io_get_inblock(tsk); 143 sig->oublock += task_io_get_oublock(tsk); 144 task_io_accounting_add(&sig->ioac, &tsk->ioac); 145 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; 146 sig->nr_threads--; 147 __unhash_process(tsk, group_dead); 148 write_sequnlock(&sig->stats_lock); 149 150 /* 151 * Do this under ->siglock, we can race with another thread 152 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. 153 */ 154 flush_sigqueue(&tsk->pending); 155 tsk->sighand = NULL; 156 spin_unlock(&sighand->siglock); 157 158 __cleanup_sighand(sighand); 159 clear_tsk_thread_flag(tsk, TIF_SIGPENDING); 160 if (group_dead) { 161 flush_sigqueue(&sig->shared_pending); 162 tty_kref_put(tty); 163 } 164 } 165 166 static void delayed_put_task_struct(struct rcu_head *rhp) 167 { 168 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 169 170 perf_event_delayed_put(tsk); 171 trace_sched_process_free(tsk); 172 put_task_struct(tsk); 173 } 174 175 void put_task_struct_rcu_user(struct task_struct *task) 176 { 177 if (refcount_dec_and_test(&task->rcu_users)) 178 call_rcu(&task->rcu, delayed_put_task_struct); 179 } 180 181 void release_task(struct task_struct *p) 182 { 183 struct task_struct *leader; 184 struct pid *thread_pid; 185 int zap_leader; 186 repeat: 187 /* don't need to get the RCU readlock here - the process is dead and 188 * can't be modifying its own credentials. But shut RCU-lockdep up */ 189 rcu_read_lock(); 190 atomic_dec(&__task_cred(p)->user->processes); 191 rcu_read_unlock(); 192 193 cgroup_release(p); 194 195 write_lock_irq(&tasklist_lock); 196 ptrace_release_task(p); 197 thread_pid = get_pid(p->thread_pid); 198 __exit_signal(p); 199 200 /* 201 * If we are the last non-leader member of the thread 202 * group, and the leader is zombie, then notify the 203 * group leader's parent process. (if it wants notification.) 204 */ 205 zap_leader = 0; 206 leader = p->group_leader; 207 if (leader != p && thread_group_empty(leader) 208 && leader->exit_state == EXIT_ZOMBIE) { 209 /* 210 * If we were the last child thread and the leader has 211 * exited already, and the leader's parent ignores SIGCHLD, 212 * then we are the one who should release the leader. 213 */ 214 zap_leader = do_notify_parent(leader, leader->exit_signal); 215 if (zap_leader) 216 leader->exit_state = EXIT_DEAD; 217 } 218 219 write_unlock_irq(&tasklist_lock); 220 seccomp_filter_release(p); 221 proc_flush_pid(thread_pid); 222 put_pid(thread_pid); 223 release_thread(p); 224 put_task_struct_rcu_user(p); 225 226 p = leader; 227 if (unlikely(zap_leader)) 228 goto repeat; 229 } 230 231 int rcuwait_wake_up(struct rcuwait *w) 232 { 233 int ret = 0; 234 struct task_struct *task; 235 236 rcu_read_lock(); 237 238 /* 239 * Order condition vs @task, such that everything prior to the load 240 * of @task is visible. This is the condition as to why the user called 241 * rcuwait_wake() in the first place. Pairs with set_current_state() 242 * barrier (A) in rcuwait_wait_event(). 243 * 244 * WAIT WAKE 245 * [S] tsk = current [S] cond = true 246 * MB (A) MB (B) 247 * [L] cond [L] tsk 248 */ 249 smp_mb(); /* (B) */ 250 251 task = rcu_dereference(w->task); 252 if (task) 253 ret = wake_up_process(task); 254 rcu_read_unlock(); 255 256 return ret; 257 } 258 EXPORT_SYMBOL_GPL(rcuwait_wake_up); 259 260 /* 261 * Determine if a process group is "orphaned", according to the POSIX 262 * definition in 2.2.2.52. Orphaned process groups are not to be affected 263 * by terminal-generated stop signals. Newly orphaned process groups are 264 * to receive a SIGHUP and a SIGCONT. 265 * 266 * "I ask you, have you ever known what it is to be an orphan?" 267 */ 268 static int will_become_orphaned_pgrp(struct pid *pgrp, 269 struct task_struct *ignored_task) 270 { 271 struct task_struct *p; 272 273 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 274 if ((p == ignored_task) || 275 (p->exit_state && thread_group_empty(p)) || 276 is_global_init(p->real_parent)) 277 continue; 278 279 if (task_pgrp(p->real_parent) != pgrp && 280 task_session(p->real_parent) == task_session(p)) 281 return 0; 282 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 283 284 return 1; 285 } 286 287 int is_current_pgrp_orphaned(void) 288 { 289 int retval; 290 291 read_lock(&tasklist_lock); 292 retval = will_become_orphaned_pgrp(task_pgrp(current), NULL); 293 read_unlock(&tasklist_lock); 294 295 return retval; 296 } 297 298 static bool has_stopped_jobs(struct pid *pgrp) 299 { 300 struct task_struct *p; 301 302 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 303 if (p->signal->flags & SIGNAL_STOP_STOPPED) 304 return true; 305 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 306 307 return false; 308 } 309 310 /* 311 * Check to see if any process groups have become orphaned as 312 * a result of our exiting, and if they have any stopped jobs, 313 * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 314 */ 315 static void 316 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) 317 { 318 struct pid *pgrp = task_pgrp(tsk); 319 struct task_struct *ignored_task = tsk; 320 321 if (!parent) 322 /* exit: our father is in a different pgrp than 323 * we are and we were the only connection outside. 324 */ 325 parent = tsk->real_parent; 326 else 327 /* reparent: our child is in a different pgrp than 328 * we are, and it was the only connection outside. 329 */ 330 ignored_task = NULL; 331 332 if (task_pgrp(parent) != pgrp && 333 task_session(parent) == task_session(tsk) && 334 will_become_orphaned_pgrp(pgrp, ignored_task) && 335 has_stopped_jobs(pgrp)) { 336 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); 337 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); 338 } 339 } 340 341 #ifdef CONFIG_MEMCG 342 /* 343 * A task is exiting. If it owned this mm, find a new owner for the mm. 344 */ 345 void mm_update_next_owner(struct mm_struct *mm) 346 { 347 struct task_struct *c, *g, *p = current; 348 349 retry: 350 /* 351 * If the exiting or execing task is not the owner, it's 352 * someone else's problem. 353 */ 354 if (mm->owner != p) 355 return; 356 /* 357 * The current owner is exiting/execing and there are no other 358 * candidates. Do not leave the mm pointing to a possibly 359 * freed task structure. 360 */ 361 if (atomic_read(&mm->mm_users) <= 1) { 362 WRITE_ONCE(mm->owner, NULL); 363 return; 364 } 365 366 read_lock(&tasklist_lock); 367 /* 368 * Search in the children 369 */ 370 list_for_each_entry(c, &p->children, sibling) { 371 if (c->mm == mm) 372 goto assign_new_owner; 373 } 374 375 /* 376 * Search in the siblings 377 */ 378 list_for_each_entry(c, &p->real_parent->children, sibling) { 379 if (c->mm == mm) 380 goto assign_new_owner; 381 } 382 383 /* 384 * Search through everything else, we should not get here often. 385 */ 386 for_each_process(g) { 387 if (g->flags & PF_KTHREAD) 388 continue; 389 for_each_thread(g, c) { 390 if (c->mm == mm) 391 goto assign_new_owner; 392 if (c->mm) 393 break; 394 } 395 } 396 read_unlock(&tasklist_lock); 397 /* 398 * We found no owner yet mm_users > 1: this implies that we are 399 * most likely racing with swapoff (try_to_unuse()) or /proc or 400 * ptrace or page migration (get_task_mm()). Mark owner as NULL. 401 */ 402 WRITE_ONCE(mm->owner, NULL); 403 return; 404 405 assign_new_owner: 406 BUG_ON(c == p); 407 get_task_struct(c); 408 /* 409 * The task_lock protects c->mm from changing. 410 * We always want mm->owner->mm == mm 411 */ 412 task_lock(c); 413 /* 414 * Delay read_unlock() till we have the task_lock() 415 * to ensure that c does not slip away underneath us 416 */ 417 read_unlock(&tasklist_lock); 418 if (c->mm != mm) { 419 task_unlock(c); 420 put_task_struct(c); 421 goto retry; 422 } 423 WRITE_ONCE(mm->owner, c); 424 task_unlock(c); 425 put_task_struct(c); 426 } 427 #endif /* CONFIG_MEMCG */ 428 429 /* 430 * Turn us into a lazy TLB process if we 431 * aren't already.. 432 */ 433 static void exit_mm(void) 434 { 435 struct mm_struct *mm = current->mm; 436 struct core_state *core_state; 437 438 exit_mm_release(current, mm); 439 if (!mm) 440 return; 441 sync_mm_rss(mm); 442 /* 443 * Serialize with any possible pending coredump. 444 * We must hold mmap_lock around checking core_state 445 * and clearing tsk->mm. The core-inducing thread 446 * will increment ->nr_threads for each thread in the 447 * group with ->mm != NULL. 448 */ 449 mmap_read_lock(mm); 450 core_state = mm->core_state; 451 if (core_state) { 452 struct core_thread self; 453 454 mmap_read_unlock(mm); 455 456 self.task = current; 457 if (self.task->flags & PF_SIGNALED) 458 self.next = xchg(&core_state->dumper.next, &self); 459 else 460 self.task = NULL; 461 /* 462 * Implies mb(), the result of xchg() must be visible 463 * to core_state->dumper. 464 */ 465 if (atomic_dec_and_test(&core_state->nr_threads)) 466 complete(&core_state->startup); 467 468 for (;;) { 469 set_current_state(TASK_UNINTERRUPTIBLE); 470 if (!self.task) /* see coredump_finish() */ 471 break; 472 freezable_schedule(); 473 } 474 __set_current_state(TASK_RUNNING); 475 mmap_read_lock(mm); 476 } 477 mmgrab(mm); 478 BUG_ON(mm != current->active_mm); 479 /* more a memory barrier than a real lock */ 480 task_lock(current); 481 /* 482 * When a thread stops operating on an address space, the loop 483 * in membarrier_private_expedited() may not observe that 484 * tsk->mm, and the loop in membarrier_global_expedited() may 485 * not observe a MEMBARRIER_STATE_GLOBAL_EXPEDITED 486 * rq->membarrier_state, so those would not issue an IPI. 487 * Membarrier requires a memory barrier after accessing 488 * user-space memory, before clearing tsk->mm or the 489 * rq->membarrier_state. 490 */ 491 smp_mb__after_spinlock(); 492 local_irq_disable(); 493 current->mm = NULL; 494 membarrier_update_current_mm(NULL); 495 enter_lazy_tlb(mm, current); 496 local_irq_enable(); 497 task_unlock(current); 498 mmap_read_unlock(mm); 499 mm_update_next_owner(mm); 500 mmput(mm); 501 if (test_thread_flag(TIF_MEMDIE)) 502 exit_oom_victim(); 503 } 504 505 static struct task_struct *find_alive_thread(struct task_struct *p) 506 { 507 struct task_struct *t; 508 509 for_each_thread(p, t) { 510 if (!(t->flags & PF_EXITING)) 511 return t; 512 } 513 return NULL; 514 } 515 516 static struct task_struct *find_child_reaper(struct task_struct *father, 517 struct list_head *dead) 518 __releases(&tasklist_lock) 519 __acquires(&tasklist_lock) 520 { 521 struct pid_namespace *pid_ns = task_active_pid_ns(father); 522 struct task_struct *reaper = pid_ns->child_reaper; 523 struct task_struct *p, *n; 524 525 if (likely(reaper != father)) 526 return reaper; 527 528 reaper = find_alive_thread(father); 529 if (reaper) { 530 pid_ns->child_reaper = reaper; 531 return reaper; 532 } 533 534 write_unlock_irq(&tasklist_lock); 535 536 list_for_each_entry_safe(p, n, dead, ptrace_entry) { 537 list_del_init(&p->ptrace_entry); 538 release_task(p); 539 } 540 541 zap_pid_ns_processes(pid_ns); 542 write_lock_irq(&tasklist_lock); 543 544 return father; 545 } 546 547 /* 548 * When we die, we re-parent all our children, and try to: 549 * 1. give them to another thread in our thread group, if such a member exists 550 * 2. give it to the first ancestor process which prctl'd itself as a 551 * child_subreaper for its children (like a service manager) 552 * 3. give it to the init process (PID 1) in our pid namespace 553 */ 554 static struct task_struct *find_new_reaper(struct task_struct *father, 555 struct task_struct *child_reaper) 556 { 557 struct task_struct *thread, *reaper; 558 559 thread = find_alive_thread(father); 560 if (thread) 561 return thread; 562 563 if (father->signal->has_child_subreaper) { 564 unsigned int ns_level = task_pid(father)->level; 565 /* 566 * Find the first ->is_child_subreaper ancestor in our pid_ns. 567 * We can't check reaper != child_reaper to ensure we do not 568 * cross the namespaces, the exiting parent could be injected 569 * by setns() + fork(). 570 * We check pid->level, this is slightly more efficient than 571 * task_active_pid_ns(reaper) != task_active_pid_ns(father). 572 */ 573 for (reaper = father->real_parent; 574 task_pid(reaper)->level == ns_level; 575 reaper = reaper->real_parent) { 576 if (reaper == &init_task) 577 break; 578 if (!reaper->signal->is_child_subreaper) 579 continue; 580 thread = find_alive_thread(reaper); 581 if (thread) 582 return thread; 583 } 584 } 585 586 return child_reaper; 587 } 588 589 /* 590 * Any that need to be release_task'd are put on the @dead list. 591 */ 592 static void reparent_leader(struct task_struct *father, struct task_struct *p, 593 struct list_head *dead) 594 { 595 if (unlikely(p->exit_state == EXIT_DEAD)) 596 return; 597 598 /* We don't want people slaying init. */ 599 p->exit_signal = SIGCHLD; 600 601 /* If it has exited notify the new parent about this child's death. */ 602 if (!p->ptrace && 603 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { 604 if (do_notify_parent(p, p->exit_signal)) { 605 p->exit_state = EXIT_DEAD; 606 list_add(&p->ptrace_entry, dead); 607 } 608 } 609 610 kill_orphaned_pgrp(p, father); 611 } 612 613 /* 614 * This does two things: 615 * 616 * A. Make init inherit all the child processes 617 * B. Check to see if any process groups have become orphaned 618 * as a result of our exiting, and if they have any stopped 619 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 620 */ 621 static void forget_original_parent(struct task_struct *father, 622 struct list_head *dead) 623 { 624 struct task_struct *p, *t, *reaper; 625 626 if (unlikely(!list_empty(&father->ptraced))) 627 exit_ptrace(father, dead); 628 629 /* Can drop and reacquire tasklist_lock */ 630 reaper = find_child_reaper(father, dead); 631 if (list_empty(&father->children)) 632 return; 633 634 reaper = find_new_reaper(father, reaper); 635 list_for_each_entry(p, &father->children, sibling) { 636 for_each_thread(p, t) { 637 RCU_INIT_POINTER(t->real_parent, reaper); 638 BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father)); 639 if (likely(!t->ptrace)) 640 t->parent = t->real_parent; 641 if (t->pdeath_signal) 642 group_send_sig_info(t->pdeath_signal, 643 SEND_SIG_NOINFO, t, 644 PIDTYPE_TGID); 645 } 646 /* 647 * If this is a threaded reparent there is no need to 648 * notify anyone anything has happened. 649 */ 650 if (!same_thread_group(reaper, father)) 651 reparent_leader(father, p, dead); 652 } 653 list_splice_tail_init(&father->children, &reaper->children); 654 } 655 656 /* 657 * Send signals to all our closest relatives so that they know 658 * to properly mourn us.. 659 */ 660 static void exit_notify(struct task_struct *tsk, int group_dead) 661 { 662 bool autoreap; 663 struct task_struct *p, *n; 664 LIST_HEAD(dead); 665 666 write_lock_irq(&tasklist_lock); 667 forget_original_parent(tsk, &dead); 668 669 if (group_dead) 670 kill_orphaned_pgrp(tsk->group_leader, NULL); 671 672 tsk->exit_state = EXIT_ZOMBIE; 673 if (unlikely(tsk->ptrace)) { 674 int sig = thread_group_leader(tsk) && 675 thread_group_empty(tsk) && 676 !ptrace_reparented(tsk) ? 677 tsk->exit_signal : SIGCHLD; 678 autoreap = do_notify_parent(tsk, sig); 679 } else if (thread_group_leader(tsk)) { 680 autoreap = thread_group_empty(tsk) && 681 do_notify_parent(tsk, tsk->exit_signal); 682 } else { 683 autoreap = true; 684 } 685 686 if (autoreap) { 687 tsk->exit_state = EXIT_DEAD; 688 list_add(&tsk->ptrace_entry, &dead); 689 } 690 691 /* mt-exec, de_thread() is waiting for group leader */ 692 if (unlikely(tsk->signal->notify_count < 0)) 693 wake_up_process(tsk->signal->group_exit_task); 694 write_unlock_irq(&tasklist_lock); 695 696 list_for_each_entry_safe(p, n, &dead, ptrace_entry) { 697 list_del_init(&p->ptrace_entry); 698 release_task(p); 699 } 700 } 701 702 #ifdef CONFIG_DEBUG_STACK_USAGE 703 static void check_stack_usage(void) 704 { 705 static DEFINE_SPINLOCK(low_water_lock); 706 static int lowest_to_date = THREAD_SIZE; 707 unsigned long free; 708 709 free = stack_not_used(current); 710 711 if (free >= lowest_to_date) 712 return; 713 714 spin_lock(&low_water_lock); 715 if (free < lowest_to_date) { 716 pr_info("%s (%d) used greatest stack depth: %lu bytes left\n", 717 current->comm, task_pid_nr(current), free); 718 lowest_to_date = free; 719 } 720 spin_unlock(&low_water_lock); 721 } 722 #else 723 static inline void check_stack_usage(void) {} 724 #endif 725 726 void __noreturn do_exit(long code) 727 { 728 struct task_struct *tsk = current; 729 int group_dead; 730 731 /* 732 * We can get here from a kernel oops, sometimes with preemption off. 733 * Start by checking for critical errors. 734 * Then fix up important state like USER_DS and preemption. 735 * Then do everything else. 736 */ 737 738 WARN_ON(blk_needs_flush_plug(tsk)); 739 740 if (unlikely(in_interrupt())) 741 panic("Aiee, killing interrupt handler!"); 742 if (unlikely(!tsk->pid)) 743 panic("Attempted to kill the idle task!"); 744 745 /* 746 * If do_exit is called because this processes oopsed, it's possible 747 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before 748 * continuing. Amongst other possible reasons, this is to prevent 749 * mm_release()->clear_child_tid() from writing to a user-controlled 750 * kernel address. 751 */ 752 force_uaccess_begin(); 753 754 if (unlikely(in_atomic())) { 755 pr_info("note: %s[%d] exited with preempt_count %d\n", 756 current->comm, task_pid_nr(current), 757 preempt_count()); 758 preempt_count_set(PREEMPT_ENABLED); 759 } 760 761 profile_task_exit(tsk); 762 kcov_task_exit(tsk); 763 764 ptrace_event(PTRACE_EVENT_EXIT, code); 765 766 validate_creds_for_do_exit(tsk); 767 768 /* 769 * We're taking recursive faults here in do_exit. Safest is to just 770 * leave this task alone and wait for reboot. 771 */ 772 if (unlikely(tsk->flags & PF_EXITING)) { 773 pr_alert("Fixing recursive fault but reboot is needed!\n"); 774 futex_exit_recursive(tsk); 775 set_current_state(TASK_UNINTERRUPTIBLE); 776 schedule(); 777 } 778 779 exit_signals(tsk); /* sets PF_EXITING */ 780 781 /* sync mm's RSS info before statistics gathering */ 782 if (tsk->mm) 783 sync_mm_rss(tsk->mm); 784 acct_update_integrals(tsk); 785 group_dead = atomic_dec_and_test(&tsk->signal->live); 786 if (group_dead) { 787 /* 788 * If the last thread of global init has exited, panic 789 * immediately to get a useable coredump. 790 */ 791 if (unlikely(is_global_init(tsk))) 792 panic("Attempted to kill init! exitcode=0x%08x\n", 793 tsk->signal->group_exit_code ?: (int)code); 794 795 #ifdef CONFIG_POSIX_TIMERS 796 hrtimer_cancel(&tsk->signal->real_timer); 797 exit_itimers(tsk->signal); 798 #endif 799 if (tsk->mm) 800 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); 801 } 802 acct_collect(code, group_dead); 803 if (group_dead) 804 tty_audit_exit(); 805 audit_free(tsk); 806 807 tsk->exit_code = code; 808 taskstats_exit(tsk, group_dead); 809 810 exit_mm(); 811 812 if (group_dead) 813 acct_process(); 814 trace_sched_process_exit(tsk); 815 816 exit_sem(tsk); 817 exit_shm(tsk); 818 exit_files(tsk); 819 exit_fs(tsk); 820 if (group_dead) 821 disassociate_ctty(1); 822 exit_task_namespaces(tsk); 823 exit_task_work(tsk); 824 exit_thread(tsk); 825 826 /* 827 * Flush inherited counters to the parent - before the parent 828 * gets woken up by child-exit notifications. 829 * 830 * because of cgroup mode, must be called before cgroup_exit() 831 */ 832 perf_event_exit_task(tsk); 833 834 sched_autogroup_exit_task(tsk); 835 cgroup_exit(tsk); 836 837 /* 838 * FIXME: do that only when needed, using sched_exit tracepoint 839 */ 840 flush_ptrace_hw_breakpoint(tsk); 841 842 exit_tasks_rcu_start(); 843 exit_notify(tsk, group_dead); 844 proc_exit_connector(tsk); 845 mpol_put_task_policy(tsk); 846 #ifdef CONFIG_FUTEX 847 if (unlikely(current->pi_state_cache)) 848 kfree(current->pi_state_cache); 849 #endif 850 /* 851 * Make sure we are holding no locks: 852 */ 853 debug_check_no_locks_held(); 854 855 if (tsk->io_context) 856 exit_io_context(tsk); 857 858 if (tsk->splice_pipe) 859 free_pipe_info(tsk->splice_pipe); 860 861 if (tsk->task_frag.page) 862 put_page(tsk->task_frag.page); 863 864 validate_creds_for_do_exit(tsk); 865 866 check_stack_usage(); 867 preempt_disable(); 868 if (tsk->nr_dirtied) 869 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); 870 exit_rcu(); 871 exit_tasks_rcu_finish(); 872 873 lockdep_free_task(tsk); 874 do_task_dead(); 875 } 876 EXPORT_SYMBOL_GPL(do_exit); 877 878 void complete_and_exit(struct completion *comp, long code) 879 { 880 if (comp) 881 complete(comp); 882 883 do_exit(code); 884 } 885 EXPORT_SYMBOL(complete_and_exit); 886 887 SYSCALL_DEFINE1(exit, int, error_code) 888 { 889 do_exit((error_code&0xff)<<8); 890 } 891 892 /* 893 * Take down every thread in the group. This is called by fatal signals 894 * as well as by sys_exit_group (below). 895 */ 896 void 897 do_group_exit(int exit_code) 898 { 899 struct signal_struct *sig = current->signal; 900 901 BUG_ON(exit_code & 0x80); /* core dumps don't get here */ 902 903 if (signal_group_exit(sig)) 904 exit_code = sig->group_exit_code; 905 else if (!thread_group_empty(current)) { 906 struct sighand_struct *const sighand = current->sighand; 907 908 spin_lock_irq(&sighand->siglock); 909 if (signal_group_exit(sig)) 910 /* Another thread got here before we took the lock. */ 911 exit_code = sig->group_exit_code; 912 else { 913 sig->group_exit_code = exit_code; 914 sig->flags = SIGNAL_GROUP_EXIT; 915 zap_other_threads(current); 916 } 917 spin_unlock_irq(&sighand->siglock); 918 } 919 920 do_exit(exit_code); 921 /* NOTREACHED */ 922 } 923 924 /* 925 * this kills every thread in the thread group. Note that any externally 926 * wait4()-ing process will get the correct exit code - even if this 927 * thread is not the thread group leader. 928 */ 929 SYSCALL_DEFINE1(exit_group, int, error_code) 930 { 931 do_group_exit((error_code & 0xff) << 8); 932 /* NOTREACHED */ 933 return 0; 934 } 935 936 struct waitid_info { 937 pid_t pid; 938 uid_t uid; 939 int status; 940 int cause; 941 }; 942 943 struct wait_opts { 944 enum pid_type wo_type; 945 int wo_flags; 946 struct pid *wo_pid; 947 948 struct waitid_info *wo_info; 949 int wo_stat; 950 struct rusage *wo_rusage; 951 952 wait_queue_entry_t child_wait; 953 int notask_error; 954 }; 955 956 static int eligible_pid(struct wait_opts *wo, struct task_struct *p) 957 { 958 return wo->wo_type == PIDTYPE_MAX || 959 task_pid_type(p, wo->wo_type) == wo->wo_pid; 960 } 961 962 static int 963 eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p) 964 { 965 if (!eligible_pid(wo, p)) 966 return 0; 967 968 /* 969 * Wait for all children (clone and not) if __WALL is set or 970 * if it is traced by us. 971 */ 972 if (ptrace || (wo->wo_flags & __WALL)) 973 return 1; 974 975 /* 976 * Otherwise, wait for clone children *only* if __WCLONE is set; 977 * otherwise, wait for non-clone children *only*. 978 * 979 * Note: a "clone" child here is one that reports to its parent 980 * using a signal other than SIGCHLD, or a non-leader thread which 981 * we can only see if it is traced by us. 982 */ 983 if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) 984 return 0; 985 986 return 1; 987 } 988 989 /* 990 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold 991 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 992 * the lock and this task is uninteresting. If we return nonzero, we have 993 * released the lock and the system call should return. 994 */ 995 static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) 996 { 997 int state, status; 998 pid_t pid = task_pid_vnr(p); 999 uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1000 struct waitid_info *infop; 1001 1002 if (!likely(wo->wo_flags & WEXITED)) 1003 return 0; 1004 1005 if (unlikely(wo->wo_flags & WNOWAIT)) { 1006 status = p->exit_code; 1007 get_task_struct(p); 1008 read_unlock(&tasklist_lock); 1009 sched_annotate_sleep(); 1010 if (wo->wo_rusage) 1011 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1012 put_task_struct(p); 1013 goto out_info; 1014 } 1015 /* 1016 * Move the task's state to DEAD/TRACE, only one thread can do this. 1017 */ 1018 state = (ptrace_reparented(p) && thread_group_leader(p)) ? 1019 EXIT_TRACE : EXIT_DEAD; 1020 if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE) 1021 return 0; 1022 /* 1023 * We own this thread, nobody else can reap it. 1024 */ 1025 read_unlock(&tasklist_lock); 1026 sched_annotate_sleep(); 1027 1028 /* 1029 * Check thread_group_leader() to exclude the traced sub-threads. 1030 */ 1031 if (state == EXIT_DEAD && thread_group_leader(p)) { 1032 struct signal_struct *sig = p->signal; 1033 struct signal_struct *psig = current->signal; 1034 unsigned long maxrss; 1035 u64 tgutime, tgstime; 1036 1037 /* 1038 * The resource counters for the group leader are in its 1039 * own task_struct. Those for dead threads in the group 1040 * are in its signal_struct, as are those for the child 1041 * processes it has previously reaped. All these 1042 * accumulate in the parent's signal_struct c* fields. 1043 * 1044 * We don't bother to take a lock here to protect these 1045 * p->signal fields because the whole thread group is dead 1046 * and nobody can change them. 1047 * 1048 * psig->stats_lock also protects us from our sub-theads 1049 * which can reap other children at the same time. Until 1050 * we change k_getrusage()-like users to rely on this lock 1051 * we have to take ->siglock as well. 1052 * 1053 * We use thread_group_cputime_adjusted() to get times for 1054 * the thread group, which consolidates times for all threads 1055 * in the group including the group leader. 1056 */ 1057 thread_group_cputime_adjusted(p, &tgutime, &tgstime); 1058 spin_lock_irq(¤t->sighand->siglock); 1059 write_seqlock(&psig->stats_lock); 1060 psig->cutime += tgutime + sig->cutime; 1061 psig->cstime += tgstime + sig->cstime; 1062 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime; 1063 psig->cmin_flt += 1064 p->min_flt + sig->min_flt + sig->cmin_flt; 1065 psig->cmaj_flt += 1066 p->maj_flt + sig->maj_flt + sig->cmaj_flt; 1067 psig->cnvcsw += 1068 p->nvcsw + sig->nvcsw + sig->cnvcsw; 1069 psig->cnivcsw += 1070 p->nivcsw + sig->nivcsw + sig->cnivcsw; 1071 psig->cinblock += 1072 task_io_get_inblock(p) + 1073 sig->inblock + sig->cinblock; 1074 psig->coublock += 1075 task_io_get_oublock(p) + 1076 sig->oublock + sig->coublock; 1077 maxrss = max(sig->maxrss, sig->cmaxrss); 1078 if (psig->cmaxrss < maxrss) 1079 psig->cmaxrss = maxrss; 1080 task_io_accounting_add(&psig->ioac, &p->ioac); 1081 task_io_accounting_add(&psig->ioac, &sig->ioac); 1082 write_sequnlock(&psig->stats_lock); 1083 spin_unlock_irq(¤t->sighand->siglock); 1084 } 1085 1086 if (wo->wo_rusage) 1087 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1088 status = (p->signal->flags & SIGNAL_GROUP_EXIT) 1089 ? p->signal->group_exit_code : p->exit_code; 1090 wo->wo_stat = status; 1091 1092 if (state == EXIT_TRACE) { 1093 write_lock_irq(&tasklist_lock); 1094 /* We dropped tasklist, ptracer could die and untrace */ 1095 ptrace_unlink(p); 1096 1097 /* If parent wants a zombie, don't release it now */ 1098 state = EXIT_ZOMBIE; 1099 if (do_notify_parent(p, p->exit_signal)) 1100 state = EXIT_DEAD; 1101 p->exit_state = state; 1102 write_unlock_irq(&tasklist_lock); 1103 } 1104 if (state == EXIT_DEAD) 1105 release_task(p); 1106 1107 out_info: 1108 infop = wo->wo_info; 1109 if (infop) { 1110 if ((status & 0x7f) == 0) { 1111 infop->cause = CLD_EXITED; 1112 infop->status = status >> 8; 1113 } else { 1114 infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; 1115 infop->status = status & 0x7f; 1116 } 1117 infop->pid = pid; 1118 infop->uid = uid; 1119 } 1120 1121 return pid; 1122 } 1123 1124 static int *task_stopped_code(struct task_struct *p, bool ptrace) 1125 { 1126 if (ptrace) { 1127 if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING)) 1128 return &p->exit_code; 1129 } else { 1130 if (p->signal->flags & SIGNAL_STOP_STOPPED) 1131 return &p->signal->group_exit_code; 1132 } 1133 return NULL; 1134 } 1135 1136 /** 1137 * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED 1138 * @wo: wait options 1139 * @ptrace: is the wait for ptrace 1140 * @p: task to wait for 1141 * 1142 * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED. 1143 * 1144 * CONTEXT: 1145 * read_lock(&tasklist_lock), which is released if return value is 1146 * non-zero. Also, grabs and releases @p->sighand->siglock. 1147 * 1148 * RETURNS: 1149 * 0 if wait condition didn't exist and search for other wait conditions 1150 * should continue. Non-zero return, -errno on failure and @p's pid on 1151 * success, implies that tasklist_lock is released and wait condition 1152 * search should terminate. 1153 */ 1154 static int wait_task_stopped(struct wait_opts *wo, 1155 int ptrace, struct task_struct *p) 1156 { 1157 struct waitid_info *infop; 1158 int exit_code, *p_code, why; 1159 uid_t uid = 0; /* unneeded, required by compiler */ 1160 pid_t pid; 1161 1162 /* 1163 * Traditionally we see ptrace'd stopped tasks regardless of options. 1164 */ 1165 if (!ptrace && !(wo->wo_flags & WUNTRACED)) 1166 return 0; 1167 1168 if (!task_stopped_code(p, ptrace)) 1169 return 0; 1170 1171 exit_code = 0; 1172 spin_lock_irq(&p->sighand->siglock); 1173 1174 p_code = task_stopped_code(p, ptrace); 1175 if (unlikely(!p_code)) 1176 goto unlock_sig; 1177 1178 exit_code = *p_code; 1179 if (!exit_code) 1180 goto unlock_sig; 1181 1182 if (!unlikely(wo->wo_flags & WNOWAIT)) 1183 *p_code = 0; 1184 1185 uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1186 unlock_sig: 1187 spin_unlock_irq(&p->sighand->siglock); 1188 if (!exit_code) 1189 return 0; 1190 1191 /* 1192 * Now we are pretty sure this task is interesting. 1193 * Make sure it doesn't get reaped out from under us while we 1194 * give up the lock and then examine it below. We don't want to 1195 * keep holding onto the tasklist_lock while we call getrusage and 1196 * possibly take page faults for user memory. 1197 */ 1198 get_task_struct(p); 1199 pid = task_pid_vnr(p); 1200 why = ptrace ? CLD_TRAPPED : CLD_STOPPED; 1201 read_unlock(&tasklist_lock); 1202 sched_annotate_sleep(); 1203 if (wo->wo_rusage) 1204 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1205 put_task_struct(p); 1206 1207 if (likely(!(wo->wo_flags & WNOWAIT))) 1208 wo->wo_stat = (exit_code << 8) | 0x7f; 1209 1210 infop = wo->wo_info; 1211 if (infop) { 1212 infop->cause = why; 1213 infop->status = exit_code; 1214 infop->pid = pid; 1215 infop->uid = uid; 1216 } 1217 return pid; 1218 } 1219 1220 /* 1221 * Handle do_wait work for one task in a live, non-stopped state. 1222 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1223 * the lock and this task is uninteresting. If we return nonzero, we have 1224 * released the lock and the system call should return. 1225 */ 1226 static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) 1227 { 1228 struct waitid_info *infop; 1229 pid_t pid; 1230 uid_t uid; 1231 1232 if (!unlikely(wo->wo_flags & WCONTINUED)) 1233 return 0; 1234 1235 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) 1236 return 0; 1237 1238 spin_lock_irq(&p->sighand->siglock); 1239 /* Re-check with the lock held. */ 1240 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { 1241 spin_unlock_irq(&p->sighand->siglock); 1242 return 0; 1243 } 1244 if (!unlikely(wo->wo_flags & WNOWAIT)) 1245 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; 1246 uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1247 spin_unlock_irq(&p->sighand->siglock); 1248 1249 pid = task_pid_vnr(p); 1250 get_task_struct(p); 1251 read_unlock(&tasklist_lock); 1252 sched_annotate_sleep(); 1253 if (wo->wo_rusage) 1254 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1255 put_task_struct(p); 1256 1257 infop = wo->wo_info; 1258 if (!infop) { 1259 wo->wo_stat = 0xffff; 1260 } else { 1261 infop->cause = CLD_CONTINUED; 1262 infop->pid = pid; 1263 infop->uid = uid; 1264 infop->status = SIGCONT; 1265 } 1266 return pid; 1267 } 1268 1269 /* 1270 * Consider @p for a wait by @parent. 1271 * 1272 * -ECHILD should be in ->notask_error before the first call. 1273 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1274 * Returns zero if the search for a child should continue; 1275 * then ->notask_error is 0 if @p is an eligible child, 1276 * or still -ECHILD. 1277 */ 1278 static int wait_consider_task(struct wait_opts *wo, int ptrace, 1279 struct task_struct *p) 1280 { 1281 /* 1282 * We can race with wait_task_zombie() from another thread. 1283 * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition 1284 * can't confuse the checks below. 1285 */ 1286 int exit_state = READ_ONCE(p->exit_state); 1287 int ret; 1288 1289 if (unlikely(exit_state == EXIT_DEAD)) 1290 return 0; 1291 1292 ret = eligible_child(wo, ptrace, p); 1293 if (!ret) 1294 return ret; 1295 1296 if (unlikely(exit_state == EXIT_TRACE)) { 1297 /* 1298 * ptrace == 0 means we are the natural parent. In this case 1299 * we should clear notask_error, debugger will notify us. 1300 */ 1301 if (likely(!ptrace)) 1302 wo->notask_error = 0; 1303 return 0; 1304 } 1305 1306 if (likely(!ptrace) && unlikely(p->ptrace)) { 1307 /* 1308 * If it is traced by its real parent's group, just pretend 1309 * the caller is ptrace_do_wait() and reap this child if it 1310 * is zombie. 1311 * 1312 * This also hides group stop state from real parent; otherwise 1313 * a single stop can be reported twice as group and ptrace stop. 1314 * If a ptracer wants to distinguish these two events for its 1315 * own children it should create a separate process which takes 1316 * the role of real parent. 1317 */ 1318 if (!ptrace_reparented(p)) 1319 ptrace = 1; 1320 } 1321 1322 /* slay zombie? */ 1323 if (exit_state == EXIT_ZOMBIE) { 1324 /* we don't reap group leaders with subthreads */ 1325 if (!delay_group_leader(p)) { 1326 /* 1327 * A zombie ptracee is only visible to its ptracer. 1328 * Notification and reaping will be cascaded to the 1329 * real parent when the ptracer detaches. 1330 */ 1331 if (unlikely(ptrace) || likely(!p->ptrace)) 1332 return wait_task_zombie(wo, p); 1333 } 1334 1335 /* 1336 * Allow access to stopped/continued state via zombie by 1337 * falling through. Clearing of notask_error is complex. 1338 * 1339 * When !@ptrace: 1340 * 1341 * If WEXITED is set, notask_error should naturally be 1342 * cleared. If not, subset of WSTOPPED|WCONTINUED is set, 1343 * so, if there are live subthreads, there are events to 1344 * wait for. If all subthreads are dead, it's still safe 1345 * to clear - this function will be called again in finite 1346 * amount time once all the subthreads are released and 1347 * will then return without clearing. 1348 * 1349 * When @ptrace: 1350 * 1351 * Stopped state is per-task and thus can't change once the 1352 * target task dies. Only continued and exited can happen. 1353 * Clear notask_error if WCONTINUED | WEXITED. 1354 */ 1355 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) 1356 wo->notask_error = 0; 1357 } else { 1358 /* 1359 * @p is alive and it's gonna stop, continue or exit, so 1360 * there always is something to wait for. 1361 */ 1362 wo->notask_error = 0; 1363 } 1364 1365 /* 1366 * Wait for stopped. Depending on @ptrace, different stopped state 1367 * is used and the two don't interact with each other. 1368 */ 1369 ret = wait_task_stopped(wo, ptrace, p); 1370 if (ret) 1371 return ret; 1372 1373 /* 1374 * Wait for continued. There's only one continued state and the 1375 * ptracer can consume it which can confuse the real parent. Don't 1376 * use WCONTINUED from ptracer. You don't need or want it. 1377 */ 1378 return wait_task_continued(wo, p); 1379 } 1380 1381 /* 1382 * Do the work of do_wait() for one thread in the group, @tsk. 1383 * 1384 * -ECHILD should be in ->notask_error before the first call. 1385 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1386 * Returns zero if the search for a child should continue; then 1387 * ->notask_error is 0 if there were any eligible children, 1388 * or still -ECHILD. 1389 */ 1390 static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) 1391 { 1392 struct task_struct *p; 1393 1394 list_for_each_entry(p, &tsk->children, sibling) { 1395 int ret = wait_consider_task(wo, 0, p); 1396 1397 if (ret) 1398 return ret; 1399 } 1400 1401 return 0; 1402 } 1403 1404 static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) 1405 { 1406 struct task_struct *p; 1407 1408 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { 1409 int ret = wait_consider_task(wo, 1, p); 1410 1411 if (ret) 1412 return ret; 1413 } 1414 1415 return 0; 1416 } 1417 1418 static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode, 1419 int sync, void *key) 1420 { 1421 struct wait_opts *wo = container_of(wait, struct wait_opts, 1422 child_wait); 1423 struct task_struct *p = key; 1424 1425 if (!eligible_pid(wo, p)) 1426 return 0; 1427 1428 if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent) 1429 return 0; 1430 1431 return default_wake_function(wait, mode, sync, key); 1432 } 1433 1434 void __wake_up_parent(struct task_struct *p, struct task_struct *parent) 1435 { 1436 __wake_up_sync_key(&parent->signal->wait_chldexit, 1437 TASK_INTERRUPTIBLE, p); 1438 } 1439 1440 static long do_wait(struct wait_opts *wo) 1441 { 1442 struct task_struct *tsk; 1443 int retval; 1444 1445 trace_sched_process_wait(wo->wo_pid); 1446 1447 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback); 1448 wo->child_wait.private = current; 1449 add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 1450 repeat: 1451 /* 1452 * If there is nothing that can match our criteria, just get out. 1453 * We will clear ->notask_error to zero if we see any child that 1454 * might later match our criteria, even if we are not able to reap 1455 * it yet. 1456 */ 1457 wo->notask_error = -ECHILD; 1458 if ((wo->wo_type < PIDTYPE_MAX) && 1459 (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type))) 1460 goto notask; 1461 1462 set_current_state(TASK_INTERRUPTIBLE); 1463 read_lock(&tasklist_lock); 1464 tsk = current; 1465 do { 1466 retval = do_wait_thread(wo, tsk); 1467 if (retval) 1468 goto end; 1469 1470 retval = ptrace_do_wait(wo, tsk); 1471 if (retval) 1472 goto end; 1473 1474 if (wo->wo_flags & __WNOTHREAD) 1475 break; 1476 } while_each_thread(current, tsk); 1477 read_unlock(&tasklist_lock); 1478 1479 notask: 1480 retval = wo->notask_error; 1481 if (!retval && !(wo->wo_flags & WNOHANG)) { 1482 retval = -ERESTARTSYS; 1483 if (!signal_pending(current)) { 1484 schedule(); 1485 goto repeat; 1486 } 1487 } 1488 end: 1489 __set_current_state(TASK_RUNNING); 1490 remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 1491 return retval; 1492 } 1493 1494 static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop, 1495 int options, struct rusage *ru) 1496 { 1497 struct wait_opts wo; 1498 struct pid *pid = NULL; 1499 enum pid_type type; 1500 long ret; 1501 unsigned int f_flags = 0; 1502 1503 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED| 1504 __WNOTHREAD|__WCLONE|__WALL)) 1505 return -EINVAL; 1506 if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) 1507 return -EINVAL; 1508 1509 switch (which) { 1510 case P_ALL: 1511 type = PIDTYPE_MAX; 1512 break; 1513 case P_PID: 1514 type = PIDTYPE_PID; 1515 if (upid <= 0) 1516 return -EINVAL; 1517 1518 pid = find_get_pid(upid); 1519 break; 1520 case P_PGID: 1521 type = PIDTYPE_PGID; 1522 if (upid < 0) 1523 return -EINVAL; 1524 1525 if (upid) 1526 pid = find_get_pid(upid); 1527 else 1528 pid = get_task_pid(current, PIDTYPE_PGID); 1529 break; 1530 case P_PIDFD: 1531 type = PIDTYPE_PID; 1532 if (upid < 0) 1533 return -EINVAL; 1534 1535 pid = pidfd_get_pid(upid, &f_flags); 1536 if (IS_ERR(pid)) 1537 return PTR_ERR(pid); 1538 1539 break; 1540 default: 1541 return -EINVAL; 1542 } 1543 1544 wo.wo_type = type; 1545 wo.wo_pid = pid; 1546 wo.wo_flags = options; 1547 wo.wo_info = infop; 1548 wo.wo_rusage = ru; 1549 if (f_flags & O_NONBLOCK) 1550 wo.wo_flags |= WNOHANG; 1551 1552 ret = do_wait(&wo); 1553 if (!ret && !(options & WNOHANG) && (f_flags & O_NONBLOCK)) 1554 ret = -EAGAIN; 1555 1556 put_pid(pid); 1557 return ret; 1558 } 1559 1560 SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, 1561 infop, int, options, struct rusage __user *, ru) 1562 { 1563 struct rusage r; 1564 struct waitid_info info = {.status = 0}; 1565 long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL); 1566 int signo = 0; 1567 1568 if (err > 0) { 1569 signo = SIGCHLD; 1570 err = 0; 1571 if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) 1572 return -EFAULT; 1573 } 1574 if (!infop) 1575 return err; 1576 1577 if (!user_write_access_begin(infop, sizeof(*infop))) 1578 return -EFAULT; 1579 1580 unsafe_put_user(signo, &infop->si_signo, Efault); 1581 unsafe_put_user(0, &infop->si_errno, Efault); 1582 unsafe_put_user(info.cause, &infop->si_code, Efault); 1583 unsafe_put_user(info.pid, &infop->si_pid, Efault); 1584 unsafe_put_user(info.uid, &infop->si_uid, Efault); 1585 unsafe_put_user(info.status, &infop->si_status, Efault); 1586 user_write_access_end(); 1587 return err; 1588 Efault: 1589 user_write_access_end(); 1590 return -EFAULT; 1591 } 1592 1593 long kernel_wait4(pid_t upid, int __user *stat_addr, int options, 1594 struct rusage *ru) 1595 { 1596 struct wait_opts wo; 1597 struct pid *pid = NULL; 1598 enum pid_type type; 1599 long ret; 1600 1601 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| 1602 __WNOTHREAD|__WCLONE|__WALL)) 1603 return -EINVAL; 1604 1605 /* -INT_MIN is not defined */ 1606 if (upid == INT_MIN) 1607 return -ESRCH; 1608 1609 if (upid == -1) 1610 type = PIDTYPE_MAX; 1611 else if (upid < 0) { 1612 type = PIDTYPE_PGID; 1613 pid = find_get_pid(-upid); 1614 } else if (upid == 0) { 1615 type = PIDTYPE_PGID; 1616 pid = get_task_pid(current, PIDTYPE_PGID); 1617 } else /* upid > 0 */ { 1618 type = PIDTYPE_PID; 1619 pid = find_get_pid(upid); 1620 } 1621 1622 wo.wo_type = type; 1623 wo.wo_pid = pid; 1624 wo.wo_flags = options | WEXITED; 1625 wo.wo_info = NULL; 1626 wo.wo_stat = 0; 1627 wo.wo_rusage = ru; 1628 ret = do_wait(&wo); 1629 put_pid(pid); 1630 if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr)) 1631 ret = -EFAULT; 1632 1633 return ret; 1634 } 1635 1636 int kernel_wait(pid_t pid, int *stat) 1637 { 1638 struct wait_opts wo = { 1639 .wo_type = PIDTYPE_PID, 1640 .wo_pid = find_get_pid(pid), 1641 .wo_flags = WEXITED, 1642 }; 1643 int ret; 1644 1645 ret = do_wait(&wo); 1646 if (ret > 0 && wo.wo_stat) 1647 *stat = wo.wo_stat; 1648 put_pid(wo.wo_pid); 1649 return ret; 1650 } 1651 1652 SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, 1653 int, options, struct rusage __user *, ru) 1654 { 1655 struct rusage r; 1656 long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL); 1657 1658 if (err > 0) { 1659 if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) 1660 return -EFAULT; 1661 } 1662 return err; 1663 } 1664 1665 #ifdef __ARCH_WANT_SYS_WAITPID 1666 1667 /* 1668 * sys_waitpid() remains for compatibility. waitpid() should be 1669 * implemented by calling sys_wait4() from libc.a. 1670 */ 1671 SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options) 1672 { 1673 return kernel_wait4(pid, stat_addr, options, NULL); 1674 } 1675 1676 #endif 1677 1678 #ifdef CONFIG_COMPAT 1679 COMPAT_SYSCALL_DEFINE4(wait4, 1680 compat_pid_t, pid, 1681 compat_uint_t __user *, stat_addr, 1682 int, options, 1683 struct compat_rusage __user *, ru) 1684 { 1685 struct rusage r; 1686 long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL); 1687 if (err > 0) { 1688 if (ru && put_compat_rusage(&r, ru)) 1689 return -EFAULT; 1690 } 1691 return err; 1692 } 1693 1694 COMPAT_SYSCALL_DEFINE5(waitid, 1695 int, which, compat_pid_t, pid, 1696 struct compat_siginfo __user *, infop, int, options, 1697 struct compat_rusage __user *, uru) 1698 { 1699 struct rusage ru; 1700 struct waitid_info info = {.status = 0}; 1701 long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL); 1702 int signo = 0; 1703 if (err > 0) { 1704 signo = SIGCHLD; 1705 err = 0; 1706 if (uru) { 1707 /* kernel_waitid() overwrites everything in ru */ 1708 if (COMPAT_USE_64BIT_TIME) 1709 err = copy_to_user(uru, &ru, sizeof(ru)); 1710 else 1711 err = put_compat_rusage(&ru, uru); 1712 if (err) 1713 return -EFAULT; 1714 } 1715 } 1716 1717 if (!infop) 1718 return err; 1719 1720 if (!user_write_access_begin(infop, sizeof(*infop))) 1721 return -EFAULT; 1722 1723 unsafe_put_user(signo, &infop->si_signo, Efault); 1724 unsafe_put_user(0, &infop->si_errno, Efault); 1725 unsafe_put_user(info.cause, &infop->si_code, Efault); 1726 unsafe_put_user(info.pid, &infop->si_pid, Efault); 1727 unsafe_put_user(info.uid, &infop->si_uid, Efault); 1728 unsafe_put_user(info.status, &infop->si_status, Efault); 1729 user_write_access_end(); 1730 return err; 1731 Efault: 1732 user_write_access_end(); 1733 return -EFAULT; 1734 } 1735 #endif 1736 1737 /** 1738 * thread_group_exited - check that a thread group has exited 1739 * @pid: tgid of thread group to be checked. 1740 * 1741 * Test if the thread group represented by tgid has exited (all 1742 * threads are zombies, dead or completely gone). 1743 * 1744 * Return: true if the thread group has exited. false otherwise. 1745 */ 1746 bool thread_group_exited(struct pid *pid) 1747 { 1748 struct task_struct *task; 1749 bool exited; 1750 1751 rcu_read_lock(); 1752 task = pid_task(pid, PIDTYPE_PID); 1753 exited = !task || 1754 (READ_ONCE(task->exit_state) && thread_group_empty(task)); 1755 rcu_read_unlock(); 1756 1757 return exited; 1758 } 1759 EXPORT_SYMBOL(thread_group_exited); 1760 1761 __weak void abort(void) 1762 { 1763 BUG(); 1764 1765 /* if that doesn't kill us, halt */ 1766 panic("Oops failed to kill thread"); 1767 } 1768 EXPORT_SYMBOL(abort); 1769