1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/exit.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/mm.h> 9 #include <linux/slab.h> 10 #include <linux/sched/autogroup.h> 11 #include <linux/sched/mm.h> 12 #include <linux/sched/stat.h> 13 #include <linux/sched/task.h> 14 #include <linux/sched/task_stack.h> 15 #include <linux/sched/cputime.h> 16 #include <linux/interrupt.h> 17 #include <linux/module.h> 18 #include <linux/capability.h> 19 #include <linux/completion.h> 20 #include <linux/personality.h> 21 #include <linux/tty.h> 22 #include <linux/iocontext.h> 23 #include <linux/key.h> 24 #include <linux/cpu.h> 25 #include <linux/acct.h> 26 #include <linux/tsacct_kern.h> 27 #include <linux/file.h> 28 #include <linux/fdtable.h> 29 #include <linux/freezer.h> 30 #include <linux/binfmts.h> 31 #include <linux/nsproxy.h> 32 #include <linux/pid_namespace.h> 33 #include <linux/ptrace.h> 34 #include <linux/profile.h> 35 #include <linux/mount.h> 36 #include <linux/proc_fs.h> 37 #include <linux/kthread.h> 38 #include <linux/mempolicy.h> 39 #include <linux/taskstats_kern.h> 40 #include <linux/delayacct.h> 41 #include <linux/cgroup.h> 42 #include <linux/syscalls.h> 43 #include <linux/signal.h> 44 #include <linux/posix-timers.h> 45 #include <linux/cn_proc.h> 46 #include <linux/mutex.h> 47 #include <linux/futex.h> 48 #include <linux/pipe_fs_i.h> 49 #include <linux/audit.h> /* for audit_free() */ 50 #include <linux/resource.h> 51 #include <linux/blkdev.h> 52 #include <linux/task_io_accounting_ops.h> 53 #include <linux/tracehook.h> 54 #include <linux/fs_struct.h> 55 #include <linux/init_task.h> 56 #include <linux/perf_event.h> 57 #include <trace/events/sched.h> 58 #include <linux/hw_breakpoint.h> 59 #include <linux/oom.h> 60 #include <linux/writeback.h> 61 #include <linux/shm.h> 62 #include <linux/kcov.h> 63 #include <linux/random.h> 64 #include <linux/rcuwait.h> 65 #include <linux/compat.h> 66 67 #include <linux/uaccess.h> 68 #include <asm/unistd.h> 69 #include <asm/pgtable.h> 70 #include <asm/mmu_context.h> 71 72 static void __unhash_process(struct task_struct *p, bool group_dead) 73 { 74 nr_threads--; 75 detach_pid(p, PIDTYPE_PID); 76 if (group_dead) { 77 detach_pid(p, PIDTYPE_TGID); 78 detach_pid(p, PIDTYPE_PGID); 79 detach_pid(p, PIDTYPE_SID); 80 81 list_del_rcu(&p->tasks); 82 list_del_init(&p->sibling); 83 __this_cpu_dec(process_counts); 84 } 85 list_del_rcu(&p->thread_group); 86 list_del_rcu(&p->thread_node); 87 } 88 89 /* 90 * This function expects the tasklist_lock write-locked. 91 */ 92 static void __exit_signal(struct task_struct *tsk) 93 { 94 struct signal_struct *sig = tsk->signal; 95 bool group_dead = thread_group_leader(tsk); 96 struct sighand_struct *sighand; 97 struct tty_struct *uninitialized_var(tty); 98 u64 utime, stime; 99 100 sighand = rcu_dereference_check(tsk->sighand, 101 lockdep_tasklist_lock_is_held()); 102 spin_lock(&sighand->siglock); 103 104 #ifdef CONFIG_POSIX_TIMERS 105 posix_cpu_timers_exit(tsk); 106 if (group_dead) 107 posix_cpu_timers_exit_group(tsk); 108 #endif 109 110 if (group_dead) { 111 tty = sig->tty; 112 sig->tty = NULL; 113 } else { 114 /* 115 * If there is any task waiting for the group exit 116 * then notify it: 117 */ 118 if (sig->notify_count > 0 && !--sig->notify_count) 119 wake_up_process(sig->group_exit_task); 120 121 if (tsk == sig->curr_target) 122 sig->curr_target = next_thread(tsk); 123 } 124 125 add_device_randomness((const void*) &tsk->se.sum_exec_runtime, 126 sizeof(unsigned long long)); 127 128 /* 129 * Accumulate here the counters for all threads as they die. We could 130 * skip the group leader because it is the last user of signal_struct, 131 * but we want to avoid the race with thread_group_cputime() which can 132 * see the empty ->thread_head list. 133 */ 134 task_cputime(tsk, &utime, &stime); 135 write_seqlock(&sig->stats_lock); 136 sig->utime += utime; 137 sig->stime += stime; 138 sig->gtime += task_gtime(tsk); 139 sig->min_flt += tsk->min_flt; 140 sig->maj_flt += tsk->maj_flt; 141 sig->nvcsw += tsk->nvcsw; 142 sig->nivcsw += tsk->nivcsw; 143 sig->inblock += task_io_get_inblock(tsk); 144 sig->oublock += task_io_get_oublock(tsk); 145 task_io_accounting_add(&sig->ioac, &tsk->ioac); 146 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; 147 sig->nr_threads--; 148 __unhash_process(tsk, group_dead); 149 write_sequnlock(&sig->stats_lock); 150 151 /* 152 * Do this under ->siglock, we can race with another thread 153 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. 154 */ 155 flush_sigqueue(&tsk->pending); 156 tsk->sighand = NULL; 157 spin_unlock(&sighand->siglock); 158 159 __cleanup_sighand(sighand); 160 clear_tsk_thread_flag(tsk, TIF_SIGPENDING); 161 if (group_dead) { 162 flush_sigqueue(&sig->shared_pending); 163 tty_kref_put(tty); 164 } 165 } 166 167 static void delayed_put_task_struct(struct rcu_head *rhp) 168 { 169 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 170 171 perf_event_delayed_put(tsk); 172 trace_sched_process_free(tsk); 173 put_task_struct(tsk); 174 } 175 176 void put_task_struct_rcu_user(struct task_struct *task) 177 { 178 if (refcount_dec_and_test(&task->rcu_users)) 179 call_rcu(&task->rcu, delayed_put_task_struct); 180 } 181 182 void release_task(struct task_struct *p) 183 { 184 struct task_struct *leader; 185 struct pid *thread_pid; 186 int zap_leader; 187 repeat: 188 /* don't need to get the RCU readlock here - the process is dead and 189 * can't be modifying its own credentials. But shut RCU-lockdep up */ 190 rcu_read_lock(); 191 atomic_dec(&__task_cred(p)->user->processes); 192 rcu_read_unlock(); 193 194 cgroup_release(p); 195 196 write_lock_irq(&tasklist_lock); 197 ptrace_release_task(p); 198 thread_pid = get_pid(p->thread_pid); 199 __exit_signal(p); 200 201 /* 202 * If we are the last non-leader member of the thread 203 * group, and the leader is zombie, then notify the 204 * group leader's parent process. (if it wants notification.) 205 */ 206 zap_leader = 0; 207 leader = p->group_leader; 208 if (leader != p && thread_group_empty(leader) 209 && leader->exit_state == EXIT_ZOMBIE) { 210 /* 211 * If we were the last child thread and the leader has 212 * exited already, and the leader's parent ignores SIGCHLD, 213 * then we are the one who should release the leader. 214 */ 215 zap_leader = do_notify_parent(leader, leader->exit_signal); 216 if (zap_leader) 217 leader->exit_state = EXIT_DEAD; 218 } 219 220 write_unlock_irq(&tasklist_lock); 221 proc_flush_pid(thread_pid); 222 release_thread(p); 223 put_task_struct_rcu_user(p); 224 225 p = leader; 226 if (unlikely(zap_leader)) 227 goto repeat; 228 } 229 230 void rcuwait_wake_up(struct rcuwait *w) 231 { 232 struct task_struct *task; 233 234 rcu_read_lock(); 235 236 /* 237 * Order condition vs @task, such that everything prior to the load 238 * of @task is visible. This is the condition as to why the user called 239 * rcuwait_trywake() in the first place. Pairs with set_current_state() 240 * barrier (A) in rcuwait_wait_event(). 241 * 242 * WAIT WAKE 243 * [S] tsk = current [S] cond = true 244 * MB (A) MB (B) 245 * [L] cond [L] tsk 246 */ 247 smp_mb(); /* (B) */ 248 249 task = rcu_dereference(w->task); 250 if (task) 251 wake_up_process(task); 252 rcu_read_unlock(); 253 } 254 EXPORT_SYMBOL_GPL(rcuwait_wake_up); 255 256 /* 257 * Determine if a process group is "orphaned", according to the POSIX 258 * definition in 2.2.2.52. Orphaned process groups are not to be affected 259 * by terminal-generated stop signals. Newly orphaned process groups are 260 * to receive a SIGHUP and a SIGCONT. 261 * 262 * "I ask you, have you ever known what it is to be an orphan?" 263 */ 264 static int will_become_orphaned_pgrp(struct pid *pgrp, 265 struct task_struct *ignored_task) 266 { 267 struct task_struct *p; 268 269 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 270 if ((p == ignored_task) || 271 (p->exit_state && thread_group_empty(p)) || 272 is_global_init(p->real_parent)) 273 continue; 274 275 if (task_pgrp(p->real_parent) != pgrp && 276 task_session(p->real_parent) == task_session(p)) 277 return 0; 278 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 279 280 return 1; 281 } 282 283 int is_current_pgrp_orphaned(void) 284 { 285 int retval; 286 287 read_lock(&tasklist_lock); 288 retval = will_become_orphaned_pgrp(task_pgrp(current), NULL); 289 read_unlock(&tasklist_lock); 290 291 return retval; 292 } 293 294 static bool has_stopped_jobs(struct pid *pgrp) 295 { 296 struct task_struct *p; 297 298 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 299 if (p->signal->flags & SIGNAL_STOP_STOPPED) 300 return true; 301 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 302 303 return false; 304 } 305 306 /* 307 * Check to see if any process groups have become orphaned as 308 * a result of our exiting, and if they have any stopped jobs, 309 * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 310 */ 311 static void 312 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) 313 { 314 struct pid *pgrp = task_pgrp(tsk); 315 struct task_struct *ignored_task = tsk; 316 317 if (!parent) 318 /* exit: our father is in a different pgrp than 319 * we are and we were the only connection outside. 320 */ 321 parent = tsk->real_parent; 322 else 323 /* reparent: our child is in a different pgrp than 324 * we are, and it was the only connection outside. 325 */ 326 ignored_task = NULL; 327 328 if (task_pgrp(parent) != pgrp && 329 task_session(parent) == task_session(tsk) && 330 will_become_orphaned_pgrp(pgrp, ignored_task) && 331 has_stopped_jobs(pgrp)) { 332 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); 333 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); 334 } 335 } 336 337 #ifdef CONFIG_MEMCG 338 /* 339 * A task is exiting. If it owned this mm, find a new owner for the mm. 340 */ 341 void mm_update_next_owner(struct mm_struct *mm) 342 { 343 struct task_struct *c, *g, *p = current; 344 345 retry: 346 /* 347 * If the exiting or execing task is not the owner, it's 348 * someone else's problem. 349 */ 350 if (mm->owner != p) 351 return; 352 /* 353 * The current owner is exiting/execing and there are no other 354 * candidates. Do not leave the mm pointing to a possibly 355 * freed task structure. 356 */ 357 if (atomic_read(&mm->mm_users) <= 1) { 358 WRITE_ONCE(mm->owner, NULL); 359 return; 360 } 361 362 read_lock(&tasklist_lock); 363 /* 364 * Search in the children 365 */ 366 list_for_each_entry(c, &p->children, sibling) { 367 if (c->mm == mm) 368 goto assign_new_owner; 369 } 370 371 /* 372 * Search in the siblings 373 */ 374 list_for_each_entry(c, &p->real_parent->children, sibling) { 375 if (c->mm == mm) 376 goto assign_new_owner; 377 } 378 379 /* 380 * Search through everything else, we should not get here often. 381 */ 382 for_each_process(g) { 383 if (g->flags & PF_KTHREAD) 384 continue; 385 for_each_thread(g, c) { 386 if (c->mm == mm) 387 goto assign_new_owner; 388 if (c->mm) 389 break; 390 } 391 } 392 read_unlock(&tasklist_lock); 393 /* 394 * We found no owner yet mm_users > 1: this implies that we are 395 * most likely racing with swapoff (try_to_unuse()) or /proc or 396 * ptrace or page migration (get_task_mm()). Mark owner as NULL. 397 */ 398 WRITE_ONCE(mm->owner, NULL); 399 return; 400 401 assign_new_owner: 402 BUG_ON(c == p); 403 get_task_struct(c); 404 /* 405 * The task_lock protects c->mm from changing. 406 * We always want mm->owner->mm == mm 407 */ 408 task_lock(c); 409 /* 410 * Delay read_unlock() till we have the task_lock() 411 * to ensure that c does not slip away underneath us 412 */ 413 read_unlock(&tasklist_lock); 414 if (c->mm != mm) { 415 task_unlock(c); 416 put_task_struct(c); 417 goto retry; 418 } 419 WRITE_ONCE(mm->owner, c); 420 task_unlock(c); 421 put_task_struct(c); 422 } 423 #endif /* CONFIG_MEMCG */ 424 425 /* 426 * Turn us into a lazy TLB process if we 427 * aren't already.. 428 */ 429 static void exit_mm(void) 430 { 431 struct mm_struct *mm = current->mm; 432 struct core_state *core_state; 433 434 exit_mm_release(current, mm); 435 if (!mm) 436 return; 437 sync_mm_rss(mm); 438 /* 439 * Serialize with any possible pending coredump. 440 * We must hold mmap_sem around checking core_state 441 * and clearing tsk->mm. The core-inducing thread 442 * will increment ->nr_threads for each thread in the 443 * group with ->mm != NULL. 444 */ 445 down_read(&mm->mmap_sem); 446 core_state = mm->core_state; 447 if (core_state) { 448 struct core_thread self; 449 450 up_read(&mm->mmap_sem); 451 452 self.task = current; 453 self.next = xchg(&core_state->dumper.next, &self); 454 /* 455 * Implies mb(), the result of xchg() must be visible 456 * to core_state->dumper. 457 */ 458 if (atomic_dec_and_test(&core_state->nr_threads)) 459 complete(&core_state->startup); 460 461 for (;;) { 462 set_current_state(TASK_UNINTERRUPTIBLE); 463 if (!self.task) /* see coredump_finish() */ 464 break; 465 freezable_schedule(); 466 } 467 __set_current_state(TASK_RUNNING); 468 down_read(&mm->mmap_sem); 469 } 470 mmgrab(mm); 471 BUG_ON(mm != current->active_mm); 472 /* more a memory barrier than a real lock */ 473 task_lock(current); 474 current->mm = NULL; 475 up_read(&mm->mmap_sem); 476 enter_lazy_tlb(mm, current); 477 task_unlock(current); 478 mm_update_next_owner(mm); 479 mmput(mm); 480 if (test_thread_flag(TIF_MEMDIE)) 481 exit_oom_victim(); 482 } 483 484 static struct task_struct *find_alive_thread(struct task_struct *p) 485 { 486 struct task_struct *t; 487 488 for_each_thread(p, t) { 489 if (!(t->flags & PF_EXITING)) 490 return t; 491 } 492 return NULL; 493 } 494 495 static struct task_struct *find_child_reaper(struct task_struct *father, 496 struct list_head *dead) 497 __releases(&tasklist_lock) 498 __acquires(&tasklist_lock) 499 { 500 struct pid_namespace *pid_ns = task_active_pid_ns(father); 501 struct task_struct *reaper = pid_ns->child_reaper; 502 struct task_struct *p, *n; 503 504 if (likely(reaper != father)) 505 return reaper; 506 507 reaper = find_alive_thread(father); 508 if (reaper) { 509 pid_ns->child_reaper = reaper; 510 return reaper; 511 } 512 513 write_unlock_irq(&tasklist_lock); 514 515 list_for_each_entry_safe(p, n, dead, ptrace_entry) { 516 list_del_init(&p->ptrace_entry); 517 release_task(p); 518 } 519 520 zap_pid_ns_processes(pid_ns); 521 write_lock_irq(&tasklist_lock); 522 523 return father; 524 } 525 526 /* 527 * When we die, we re-parent all our children, and try to: 528 * 1. give them to another thread in our thread group, if such a member exists 529 * 2. give it to the first ancestor process which prctl'd itself as a 530 * child_subreaper for its children (like a service manager) 531 * 3. give it to the init process (PID 1) in our pid namespace 532 */ 533 static struct task_struct *find_new_reaper(struct task_struct *father, 534 struct task_struct *child_reaper) 535 { 536 struct task_struct *thread, *reaper; 537 538 thread = find_alive_thread(father); 539 if (thread) 540 return thread; 541 542 if (father->signal->has_child_subreaper) { 543 unsigned int ns_level = task_pid(father)->level; 544 /* 545 * Find the first ->is_child_subreaper ancestor in our pid_ns. 546 * We can't check reaper != child_reaper to ensure we do not 547 * cross the namespaces, the exiting parent could be injected 548 * by setns() + fork(). 549 * We check pid->level, this is slightly more efficient than 550 * task_active_pid_ns(reaper) != task_active_pid_ns(father). 551 */ 552 for (reaper = father->real_parent; 553 task_pid(reaper)->level == ns_level; 554 reaper = reaper->real_parent) { 555 if (reaper == &init_task) 556 break; 557 if (!reaper->signal->is_child_subreaper) 558 continue; 559 thread = find_alive_thread(reaper); 560 if (thread) 561 return thread; 562 } 563 } 564 565 return child_reaper; 566 } 567 568 /* 569 * Any that need to be release_task'd are put on the @dead list. 570 */ 571 static void reparent_leader(struct task_struct *father, struct task_struct *p, 572 struct list_head *dead) 573 { 574 if (unlikely(p->exit_state == EXIT_DEAD)) 575 return; 576 577 /* We don't want people slaying init. */ 578 p->exit_signal = SIGCHLD; 579 580 /* If it has exited notify the new parent about this child's death. */ 581 if (!p->ptrace && 582 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { 583 if (do_notify_parent(p, p->exit_signal)) { 584 p->exit_state = EXIT_DEAD; 585 list_add(&p->ptrace_entry, dead); 586 } 587 } 588 589 kill_orphaned_pgrp(p, father); 590 } 591 592 /* 593 * This does two things: 594 * 595 * A. Make init inherit all the child processes 596 * B. Check to see if any process groups have become orphaned 597 * as a result of our exiting, and if they have any stopped 598 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 599 */ 600 static void forget_original_parent(struct task_struct *father, 601 struct list_head *dead) 602 { 603 struct task_struct *p, *t, *reaper; 604 605 if (unlikely(!list_empty(&father->ptraced))) 606 exit_ptrace(father, dead); 607 608 /* Can drop and reacquire tasklist_lock */ 609 reaper = find_child_reaper(father, dead); 610 if (list_empty(&father->children)) 611 return; 612 613 reaper = find_new_reaper(father, reaper); 614 list_for_each_entry(p, &father->children, sibling) { 615 for_each_thread(p, t) { 616 RCU_INIT_POINTER(t->real_parent, reaper); 617 BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father)); 618 if (likely(!t->ptrace)) 619 t->parent = t->real_parent; 620 if (t->pdeath_signal) 621 group_send_sig_info(t->pdeath_signal, 622 SEND_SIG_NOINFO, t, 623 PIDTYPE_TGID); 624 } 625 /* 626 * If this is a threaded reparent there is no need to 627 * notify anyone anything has happened. 628 */ 629 if (!same_thread_group(reaper, father)) 630 reparent_leader(father, p, dead); 631 } 632 list_splice_tail_init(&father->children, &reaper->children); 633 } 634 635 /* 636 * Send signals to all our closest relatives so that they know 637 * to properly mourn us.. 638 */ 639 static void exit_notify(struct task_struct *tsk, int group_dead) 640 { 641 bool autoreap; 642 struct task_struct *p, *n; 643 LIST_HEAD(dead); 644 645 write_lock_irq(&tasklist_lock); 646 forget_original_parent(tsk, &dead); 647 648 if (group_dead) 649 kill_orphaned_pgrp(tsk->group_leader, NULL); 650 651 tsk->exit_state = EXIT_ZOMBIE; 652 if (unlikely(tsk->ptrace)) { 653 int sig = thread_group_leader(tsk) && 654 thread_group_empty(tsk) && 655 !ptrace_reparented(tsk) ? 656 tsk->exit_signal : SIGCHLD; 657 autoreap = do_notify_parent(tsk, sig); 658 } else if (thread_group_leader(tsk)) { 659 autoreap = thread_group_empty(tsk) && 660 do_notify_parent(tsk, tsk->exit_signal); 661 } else { 662 autoreap = true; 663 } 664 665 if (autoreap) { 666 tsk->exit_state = EXIT_DEAD; 667 list_add(&tsk->ptrace_entry, &dead); 668 } 669 670 /* mt-exec, de_thread() is waiting for group leader */ 671 if (unlikely(tsk->signal->notify_count < 0)) 672 wake_up_process(tsk->signal->group_exit_task); 673 write_unlock_irq(&tasklist_lock); 674 675 list_for_each_entry_safe(p, n, &dead, ptrace_entry) { 676 list_del_init(&p->ptrace_entry); 677 release_task(p); 678 } 679 } 680 681 #ifdef CONFIG_DEBUG_STACK_USAGE 682 static void check_stack_usage(void) 683 { 684 static DEFINE_SPINLOCK(low_water_lock); 685 static int lowest_to_date = THREAD_SIZE; 686 unsigned long free; 687 688 free = stack_not_used(current); 689 690 if (free >= lowest_to_date) 691 return; 692 693 spin_lock(&low_water_lock); 694 if (free < lowest_to_date) { 695 pr_info("%s (%d) used greatest stack depth: %lu bytes left\n", 696 current->comm, task_pid_nr(current), free); 697 lowest_to_date = free; 698 } 699 spin_unlock(&low_water_lock); 700 } 701 #else 702 static inline void check_stack_usage(void) {} 703 #endif 704 705 void __noreturn do_exit(long code) 706 { 707 struct task_struct *tsk = current; 708 int group_dead; 709 710 profile_task_exit(tsk); 711 kcov_task_exit(tsk); 712 713 WARN_ON(blk_needs_flush_plug(tsk)); 714 715 if (unlikely(in_interrupt())) 716 panic("Aiee, killing interrupt handler!"); 717 if (unlikely(!tsk->pid)) 718 panic("Attempted to kill the idle task!"); 719 720 /* 721 * If do_exit is called because this processes oopsed, it's possible 722 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before 723 * continuing. Amongst other possible reasons, this is to prevent 724 * mm_release()->clear_child_tid() from writing to a user-controlled 725 * kernel address. 726 */ 727 set_fs(USER_DS); 728 729 ptrace_event(PTRACE_EVENT_EXIT, code); 730 731 validate_creds_for_do_exit(tsk); 732 733 /* 734 * We're taking recursive faults here in do_exit. Safest is to just 735 * leave this task alone and wait for reboot. 736 */ 737 if (unlikely(tsk->flags & PF_EXITING)) { 738 pr_alert("Fixing recursive fault but reboot is needed!\n"); 739 futex_exit_recursive(tsk); 740 set_current_state(TASK_UNINTERRUPTIBLE); 741 schedule(); 742 } 743 744 exit_signals(tsk); /* sets PF_EXITING */ 745 746 if (unlikely(in_atomic())) { 747 pr_info("note: %s[%d] exited with preempt_count %d\n", 748 current->comm, task_pid_nr(current), 749 preempt_count()); 750 preempt_count_set(PREEMPT_ENABLED); 751 } 752 753 /* sync mm's RSS info before statistics gathering */ 754 if (tsk->mm) 755 sync_mm_rss(tsk->mm); 756 acct_update_integrals(tsk); 757 group_dead = atomic_dec_and_test(&tsk->signal->live); 758 if (group_dead) { 759 /* 760 * If the last thread of global init has exited, panic 761 * immediately to get a useable coredump. 762 */ 763 if (unlikely(is_global_init(tsk))) 764 panic("Attempted to kill init! exitcode=0x%08x\n", 765 tsk->signal->group_exit_code ?: (int)code); 766 767 #ifdef CONFIG_POSIX_TIMERS 768 hrtimer_cancel(&tsk->signal->real_timer); 769 exit_itimers(tsk->signal); 770 #endif 771 if (tsk->mm) 772 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); 773 } 774 acct_collect(code, group_dead); 775 if (group_dead) 776 tty_audit_exit(); 777 audit_free(tsk); 778 779 tsk->exit_code = code; 780 taskstats_exit(tsk, group_dead); 781 782 exit_mm(); 783 784 if (group_dead) 785 acct_process(); 786 trace_sched_process_exit(tsk); 787 788 exit_sem(tsk); 789 exit_shm(tsk); 790 exit_files(tsk); 791 exit_fs(tsk); 792 if (group_dead) 793 disassociate_ctty(1); 794 exit_task_namespaces(tsk); 795 exit_task_work(tsk); 796 exit_thread(tsk); 797 exit_umh(tsk); 798 799 /* 800 * Flush inherited counters to the parent - before the parent 801 * gets woken up by child-exit notifications. 802 * 803 * because of cgroup mode, must be called before cgroup_exit() 804 */ 805 perf_event_exit_task(tsk); 806 807 sched_autogroup_exit_task(tsk); 808 cgroup_exit(tsk); 809 810 /* 811 * FIXME: do that only when needed, using sched_exit tracepoint 812 */ 813 flush_ptrace_hw_breakpoint(tsk); 814 815 exit_tasks_rcu_start(); 816 exit_notify(tsk, group_dead); 817 proc_exit_connector(tsk); 818 mpol_put_task_policy(tsk); 819 #ifdef CONFIG_FUTEX 820 if (unlikely(current->pi_state_cache)) 821 kfree(current->pi_state_cache); 822 #endif 823 /* 824 * Make sure we are holding no locks: 825 */ 826 debug_check_no_locks_held(); 827 828 if (tsk->io_context) 829 exit_io_context(tsk); 830 831 if (tsk->splice_pipe) 832 free_pipe_info(tsk->splice_pipe); 833 834 if (tsk->task_frag.page) 835 put_page(tsk->task_frag.page); 836 837 validate_creds_for_do_exit(tsk); 838 839 check_stack_usage(); 840 preempt_disable(); 841 if (tsk->nr_dirtied) 842 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); 843 exit_rcu(); 844 exit_tasks_rcu_finish(); 845 846 lockdep_free_task(tsk); 847 do_task_dead(); 848 } 849 EXPORT_SYMBOL_GPL(do_exit); 850 851 void complete_and_exit(struct completion *comp, long code) 852 { 853 if (comp) 854 complete(comp); 855 856 do_exit(code); 857 } 858 EXPORT_SYMBOL(complete_and_exit); 859 860 SYSCALL_DEFINE1(exit, int, error_code) 861 { 862 do_exit((error_code&0xff)<<8); 863 } 864 865 /* 866 * Take down every thread in the group. This is called by fatal signals 867 * as well as by sys_exit_group (below). 868 */ 869 void 870 do_group_exit(int exit_code) 871 { 872 struct signal_struct *sig = current->signal; 873 874 BUG_ON(exit_code & 0x80); /* core dumps don't get here */ 875 876 if (signal_group_exit(sig)) 877 exit_code = sig->group_exit_code; 878 else if (!thread_group_empty(current)) { 879 struct sighand_struct *const sighand = current->sighand; 880 881 spin_lock_irq(&sighand->siglock); 882 if (signal_group_exit(sig)) 883 /* Another thread got here before we took the lock. */ 884 exit_code = sig->group_exit_code; 885 else { 886 sig->group_exit_code = exit_code; 887 sig->flags = SIGNAL_GROUP_EXIT; 888 zap_other_threads(current); 889 } 890 spin_unlock_irq(&sighand->siglock); 891 } 892 893 do_exit(exit_code); 894 /* NOTREACHED */ 895 } 896 897 /* 898 * this kills every thread in the thread group. Note that any externally 899 * wait4()-ing process will get the correct exit code - even if this 900 * thread is not the thread group leader. 901 */ 902 SYSCALL_DEFINE1(exit_group, int, error_code) 903 { 904 do_group_exit((error_code & 0xff) << 8); 905 /* NOTREACHED */ 906 return 0; 907 } 908 909 struct waitid_info { 910 pid_t pid; 911 uid_t uid; 912 int status; 913 int cause; 914 }; 915 916 struct wait_opts { 917 enum pid_type wo_type; 918 int wo_flags; 919 struct pid *wo_pid; 920 921 struct waitid_info *wo_info; 922 int wo_stat; 923 struct rusage *wo_rusage; 924 925 wait_queue_entry_t child_wait; 926 int notask_error; 927 }; 928 929 static int eligible_pid(struct wait_opts *wo, struct task_struct *p) 930 { 931 return wo->wo_type == PIDTYPE_MAX || 932 task_pid_type(p, wo->wo_type) == wo->wo_pid; 933 } 934 935 static int 936 eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p) 937 { 938 if (!eligible_pid(wo, p)) 939 return 0; 940 941 /* 942 * Wait for all children (clone and not) if __WALL is set or 943 * if it is traced by us. 944 */ 945 if (ptrace || (wo->wo_flags & __WALL)) 946 return 1; 947 948 /* 949 * Otherwise, wait for clone children *only* if __WCLONE is set; 950 * otherwise, wait for non-clone children *only*. 951 * 952 * Note: a "clone" child here is one that reports to its parent 953 * using a signal other than SIGCHLD, or a non-leader thread which 954 * we can only see if it is traced by us. 955 */ 956 if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) 957 return 0; 958 959 return 1; 960 } 961 962 /* 963 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold 964 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 965 * the lock and this task is uninteresting. If we return nonzero, we have 966 * released the lock and the system call should return. 967 */ 968 static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) 969 { 970 int state, status; 971 pid_t pid = task_pid_vnr(p); 972 uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p)); 973 struct waitid_info *infop; 974 975 if (!likely(wo->wo_flags & WEXITED)) 976 return 0; 977 978 if (unlikely(wo->wo_flags & WNOWAIT)) { 979 status = p->exit_code; 980 get_task_struct(p); 981 read_unlock(&tasklist_lock); 982 sched_annotate_sleep(); 983 if (wo->wo_rusage) 984 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 985 put_task_struct(p); 986 goto out_info; 987 } 988 /* 989 * Move the task's state to DEAD/TRACE, only one thread can do this. 990 */ 991 state = (ptrace_reparented(p) && thread_group_leader(p)) ? 992 EXIT_TRACE : EXIT_DEAD; 993 if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE) 994 return 0; 995 /* 996 * We own this thread, nobody else can reap it. 997 */ 998 read_unlock(&tasklist_lock); 999 sched_annotate_sleep(); 1000 1001 /* 1002 * Check thread_group_leader() to exclude the traced sub-threads. 1003 */ 1004 if (state == EXIT_DEAD && thread_group_leader(p)) { 1005 struct signal_struct *sig = p->signal; 1006 struct signal_struct *psig = current->signal; 1007 unsigned long maxrss; 1008 u64 tgutime, tgstime; 1009 1010 /* 1011 * The resource counters for the group leader are in its 1012 * own task_struct. Those for dead threads in the group 1013 * are in its signal_struct, as are those for the child 1014 * processes it has previously reaped. All these 1015 * accumulate in the parent's signal_struct c* fields. 1016 * 1017 * We don't bother to take a lock here to protect these 1018 * p->signal fields because the whole thread group is dead 1019 * and nobody can change them. 1020 * 1021 * psig->stats_lock also protects us from our sub-theads 1022 * which can reap other children at the same time. Until 1023 * we change k_getrusage()-like users to rely on this lock 1024 * we have to take ->siglock as well. 1025 * 1026 * We use thread_group_cputime_adjusted() to get times for 1027 * the thread group, which consolidates times for all threads 1028 * in the group including the group leader. 1029 */ 1030 thread_group_cputime_adjusted(p, &tgutime, &tgstime); 1031 spin_lock_irq(¤t->sighand->siglock); 1032 write_seqlock(&psig->stats_lock); 1033 psig->cutime += tgutime + sig->cutime; 1034 psig->cstime += tgstime + sig->cstime; 1035 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime; 1036 psig->cmin_flt += 1037 p->min_flt + sig->min_flt + sig->cmin_flt; 1038 psig->cmaj_flt += 1039 p->maj_flt + sig->maj_flt + sig->cmaj_flt; 1040 psig->cnvcsw += 1041 p->nvcsw + sig->nvcsw + sig->cnvcsw; 1042 psig->cnivcsw += 1043 p->nivcsw + sig->nivcsw + sig->cnivcsw; 1044 psig->cinblock += 1045 task_io_get_inblock(p) + 1046 sig->inblock + sig->cinblock; 1047 psig->coublock += 1048 task_io_get_oublock(p) + 1049 sig->oublock + sig->coublock; 1050 maxrss = max(sig->maxrss, sig->cmaxrss); 1051 if (psig->cmaxrss < maxrss) 1052 psig->cmaxrss = maxrss; 1053 task_io_accounting_add(&psig->ioac, &p->ioac); 1054 task_io_accounting_add(&psig->ioac, &sig->ioac); 1055 write_sequnlock(&psig->stats_lock); 1056 spin_unlock_irq(¤t->sighand->siglock); 1057 } 1058 1059 if (wo->wo_rusage) 1060 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1061 status = (p->signal->flags & SIGNAL_GROUP_EXIT) 1062 ? p->signal->group_exit_code : p->exit_code; 1063 wo->wo_stat = status; 1064 1065 if (state == EXIT_TRACE) { 1066 write_lock_irq(&tasklist_lock); 1067 /* We dropped tasklist, ptracer could die and untrace */ 1068 ptrace_unlink(p); 1069 1070 /* If parent wants a zombie, don't release it now */ 1071 state = EXIT_ZOMBIE; 1072 if (do_notify_parent(p, p->exit_signal)) 1073 state = EXIT_DEAD; 1074 p->exit_state = state; 1075 write_unlock_irq(&tasklist_lock); 1076 } 1077 if (state == EXIT_DEAD) 1078 release_task(p); 1079 1080 out_info: 1081 infop = wo->wo_info; 1082 if (infop) { 1083 if ((status & 0x7f) == 0) { 1084 infop->cause = CLD_EXITED; 1085 infop->status = status >> 8; 1086 } else { 1087 infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; 1088 infop->status = status & 0x7f; 1089 } 1090 infop->pid = pid; 1091 infop->uid = uid; 1092 } 1093 1094 return pid; 1095 } 1096 1097 static int *task_stopped_code(struct task_struct *p, bool ptrace) 1098 { 1099 if (ptrace) { 1100 if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING)) 1101 return &p->exit_code; 1102 } else { 1103 if (p->signal->flags & SIGNAL_STOP_STOPPED) 1104 return &p->signal->group_exit_code; 1105 } 1106 return NULL; 1107 } 1108 1109 /** 1110 * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED 1111 * @wo: wait options 1112 * @ptrace: is the wait for ptrace 1113 * @p: task to wait for 1114 * 1115 * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED. 1116 * 1117 * CONTEXT: 1118 * read_lock(&tasklist_lock), which is released if return value is 1119 * non-zero. Also, grabs and releases @p->sighand->siglock. 1120 * 1121 * RETURNS: 1122 * 0 if wait condition didn't exist and search for other wait conditions 1123 * should continue. Non-zero return, -errno on failure and @p's pid on 1124 * success, implies that tasklist_lock is released and wait condition 1125 * search should terminate. 1126 */ 1127 static int wait_task_stopped(struct wait_opts *wo, 1128 int ptrace, struct task_struct *p) 1129 { 1130 struct waitid_info *infop; 1131 int exit_code, *p_code, why; 1132 uid_t uid = 0; /* unneeded, required by compiler */ 1133 pid_t pid; 1134 1135 /* 1136 * Traditionally we see ptrace'd stopped tasks regardless of options. 1137 */ 1138 if (!ptrace && !(wo->wo_flags & WUNTRACED)) 1139 return 0; 1140 1141 if (!task_stopped_code(p, ptrace)) 1142 return 0; 1143 1144 exit_code = 0; 1145 spin_lock_irq(&p->sighand->siglock); 1146 1147 p_code = task_stopped_code(p, ptrace); 1148 if (unlikely(!p_code)) 1149 goto unlock_sig; 1150 1151 exit_code = *p_code; 1152 if (!exit_code) 1153 goto unlock_sig; 1154 1155 if (!unlikely(wo->wo_flags & WNOWAIT)) 1156 *p_code = 0; 1157 1158 uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1159 unlock_sig: 1160 spin_unlock_irq(&p->sighand->siglock); 1161 if (!exit_code) 1162 return 0; 1163 1164 /* 1165 * Now we are pretty sure this task is interesting. 1166 * Make sure it doesn't get reaped out from under us while we 1167 * give up the lock and then examine it below. We don't want to 1168 * keep holding onto the tasklist_lock while we call getrusage and 1169 * possibly take page faults for user memory. 1170 */ 1171 get_task_struct(p); 1172 pid = task_pid_vnr(p); 1173 why = ptrace ? CLD_TRAPPED : CLD_STOPPED; 1174 read_unlock(&tasklist_lock); 1175 sched_annotate_sleep(); 1176 if (wo->wo_rusage) 1177 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1178 put_task_struct(p); 1179 1180 if (likely(!(wo->wo_flags & WNOWAIT))) 1181 wo->wo_stat = (exit_code << 8) | 0x7f; 1182 1183 infop = wo->wo_info; 1184 if (infop) { 1185 infop->cause = why; 1186 infop->status = exit_code; 1187 infop->pid = pid; 1188 infop->uid = uid; 1189 } 1190 return pid; 1191 } 1192 1193 /* 1194 * Handle do_wait work for one task in a live, non-stopped state. 1195 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1196 * the lock and this task is uninteresting. If we return nonzero, we have 1197 * released the lock and the system call should return. 1198 */ 1199 static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) 1200 { 1201 struct waitid_info *infop; 1202 pid_t pid; 1203 uid_t uid; 1204 1205 if (!unlikely(wo->wo_flags & WCONTINUED)) 1206 return 0; 1207 1208 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) 1209 return 0; 1210 1211 spin_lock_irq(&p->sighand->siglock); 1212 /* Re-check with the lock held. */ 1213 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { 1214 spin_unlock_irq(&p->sighand->siglock); 1215 return 0; 1216 } 1217 if (!unlikely(wo->wo_flags & WNOWAIT)) 1218 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; 1219 uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1220 spin_unlock_irq(&p->sighand->siglock); 1221 1222 pid = task_pid_vnr(p); 1223 get_task_struct(p); 1224 read_unlock(&tasklist_lock); 1225 sched_annotate_sleep(); 1226 if (wo->wo_rusage) 1227 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1228 put_task_struct(p); 1229 1230 infop = wo->wo_info; 1231 if (!infop) { 1232 wo->wo_stat = 0xffff; 1233 } else { 1234 infop->cause = CLD_CONTINUED; 1235 infop->pid = pid; 1236 infop->uid = uid; 1237 infop->status = SIGCONT; 1238 } 1239 return pid; 1240 } 1241 1242 /* 1243 * Consider @p for a wait by @parent. 1244 * 1245 * -ECHILD should be in ->notask_error before the first call. 1246 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1247 * Returns zero if the search for a child should continue; 1248 * then ->notask_error is 0 if @p is an eligible child, 1249 * or still -ECHILD. 1250 */ 1251 static int wait_consider_task(struct wait_opts *wo, int ptrace, 1252 struct task_struct *p) 1253 { 1254 /* 1255 * We can race with wait_task_zombie() from another thread. 1256 * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition 1257 * can't confuse the checks below. 1258 */ 1259 int exit_state = READ_ONCE(p->exit_state); 1260 int ret; 1261 1262 if (unlikely(exit_state == EXIT_DEAD)) 1263 return 0; 1264 1265 ret = eligible_child(wo, ptrace, p); 1266 if (!ret) 1267 return ret; 1268 1269 if (unlikely(exit_state == EXIT_TRACE)) { 1270 /* 1271 * ptrace == 0 means we are the natural parent. In this case 1272 * we should clear notask_error, debugger will notify us. 1273 */ 1274 if (likely(!ptrace)) 1275 wo->notask_error = 0; 1276 return 0; 1277 } 1278 1279 if (likely(!ptrace) && unlikely(p->ptrace)) { 1280 /* 1281 * If it is traced by its real parent's group, just pretend 1282 * the caller is ptrace_do_wait() and reap this child if it 1283 * is zombie. 1284 * 1285 * This also hides group stop state from real parent; otherwise 1286 * a single stop can be reported twice as group and ptrace stop. 1287 * If a ptracer wants to distinguish these two events for its 1288 * own children it should create a separate process which takes 1289 * the role of real parent. 1290 */ 1291 if (!ptrace_reparented(p)) 1292 ptrace = 1; 1293 } 1294 1295 /* slay zombie? */ 1296 if (exit_state == EXIT_ZOMBIE) { 1297 /* we don't reap group leaders with subthreads */ 1298 if (!delay_group_leader(p)) { 1299 /* 1300 * A zombie ptracee is only visible to its ptracer. 1301 * Notification and reaping will be cascaded to the 1302 * real parent when the ptracer detaches. 1303 */ 1304 if (unlikely(ptrace) || likely(!p->ptrace)) 1305 return wait_task_zombie(wo, p); 1306 } 1307 1308 /* 1309 * Allow access to stopped/continued state via zombie by 1310 * falling through. Clearing of notask_error is complex. 1311 * 1312 * When !@ptrace: 1313 * 1314 * If WEXITED is set, notask_error should naturally be 1315 * cleared. If not, subset of WSTOPPED|WCONTINUED is set, 1316 * so, if there are live subthreads, there are events to 1317 * wait for. If all subthreads are dead, it's still safe 1318 * to clear - this function will be called again in finite 1319 * amount time once all the subthreads are released and 1320 * will then return without clearing. 1321 * 1322 * When @ptrace: 1323 * 1324 * Stopped state is per-task and thus can't change once the 1325 * target task dies. Only continued and exited can happen. 1326 * Clear notask_error if WCONTINUED | WEXITED. 1327 */ 1328 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) 1329 wo->notask_error = 0; 1330 } else { 1331 /* 1332 * @p is alive and it's gonna stop, continue or exit, so 1333 * there always is something to wait for. 1334 */ 1335 wo->notask_error = 0; 1336 } 1337 1338 /* 1339 * Wait for stopped. Depending on @ptrace, different stopped state 1340 * is used and the two don't interact with each other. 1341 */ 1342 ret = wait_task_stopped(wo, ptrace, p); 1343 if (ret) 1344 return ret; 1345 1346 /* 1347 * Wait for continued. There's only one continued state and the 1348 * ptracer can consume it which can confuse the real parent. Don't 1349 * use WCONTINUED from ptracer. You don't need or want it. 1350 */ 1351 return wait_task_continued(wo, p); 1352 } 1353 1354 /* 1355 * Do the work of do_wait() for one thread in the group, @tsk. 1356 * 1357 * -ECHILD should be in ->notask_error before the first call. 1358 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1359 * Returns zero if the search for a child should continue; then 1360 * ->notask_error is 0 if there were any eligible children, 1361 * or still -ECHILD. 1362 */ 1363 static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) 1364 { 1365 struct task_struct *p; 1366 1367 list_for_each_entry(p, &tsk->children, sibling) { 1368 int ret = wait_consider_task(wo, 0, p); 1369 1370 if (ret) 1371 return ret; 1372 } 1373 1374 return 0; 1375 } 1376 1377 static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) 1378 { 1379 struct task_struct *p; 1380 1381 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { 1382 int ret = wait_consider_task(wo, 1, p); 1383 1384 if (ret) 1385 return ret; 1386 } 1387 1388 return 0; 1389 } 1390 1391 static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode, 1392 int sync, void *key) 1393 { 1394 struct wait_opts *wo = container_of(wait, struct wait_opts, 1395 child_wait); 1396 struct task_struct *p = key; 1397 1398 if (!eligible_pid(wo, p)) 1399 return 0; 1400 1401 if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent) 1402 return 0; 1403 1404 return default_wake_function(wait, mode, sync, key); 1405 } 1406 1407 void __wake_up_parent(struct task_struct *p, struct task_struct *parent) 1408 { 1409 __wake_up_sync_key(&parent->signal->wait_chldexit, 1410 TASK_INTERRUPTIBLE, p); 1411 } 1412 1413 static long do_wait(struct wait_opts *wo) 1414 { 1415 struct task_struct *tsk; 1416 int retval; 1417 1418 trace_sched_process_wait(wo->wo_pid); 1419 1420 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback); 1421 wo->child_wait.private = current; 1422 add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 1423 repeat: 1424 /* 1425 * If there is nothing that can match our criteria, just get out. 1426 * We will clear ->notask_error to zero if we see any child that 1427 * might later match our criteria, even if we are not able to reap 1428 * it yet. 1429 */ 1430 wo->notask_error = -ECHILD; 1431 if ((wo->wo_type < PIDTYPE_MAX) && 1432 (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type))) 1433 goto notask; 1434 1435 set_current_state(TASK_INTERRUPTIBLE); 1436 read_lock(&tasklist_lock); 1437 tsk = current; 1438 do { 1439 retval = do_wait_thread(wo, tsk); 1440 if (retval) 1441 goto end; 1442 1443 retval = ptrace_do_wait(wo, tsk); 1444 if (retval) 1445 goto end; 1446 1447 if (wo->wo_flags & __WNOTHREAD) 1448 break; 1449 } while_each_thread(current, tsk); 1450 read_unlock(&tasklist_lock); 1451 1452 notask: 1453 retval = wo->notask_error; 1454 if (!retval && !(wo->wo_flags & WNOHANG)) { 1455 retval = -ERESTARTSYS; 1456 if (!signal_pending(current)) { 1457 schedule(); 1458 goto repeat; 1459 } 1460 } 1461 end: 1462 __set_current_state(TASK_RUNNING); 1463 remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 1464 return retval; 1465 } 1466 1467 static struct pid *pidfd_get_pid(unsigned int fd) 1468 { 1469 struct fd f; 1470 struct pid *pid; 1471 1472 f = fdget(fd); 1473 if (!f.file) 1474 return ERR_PTR(-EBADF); 1475 1476 pid = pidfd_pid(f.file); 1477 if (!IS_ERR(pid)) 1478 get_pid(pid); 1479 1480 fdput(f); 1481 return pid; 1482 } 1483 1484 static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop, 1485 int options, struct rusage *ru) 1486 { 1487 struct wait_opts wo; 1488 struct pid *pid = NULL; 1489 enum pid_type type; 1490 long ret; 1491 1492 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED| 1493 __WNOTHREAD|__WCLONE|__WALL)) 1494 return -EINVAL; 1495 if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) 1496 return -EINVAL; 1497 1498 switch (which) { 1499 case P_ALL: 1500 type = PIDTYPE_MAX; 1501 break; 1502 case P_PID: 1503 type = PIDTYPE_PID; 1504 if (upid <= 0) 1505 return -EINVAL; 1506 1507 pid = find_get_pid(upid); 1508 break; 1509 case P_PGID: 1510 type = PIDTYPE_PGID; 1511 if (upid < 0) 1512 return -EINVAL; 1513 1514 if (upid) 1515 pid = find_get_pid(upid); 1516 else 1517 pid = get_task_pid(current, PIDTYPE_PGID); 1518 break; 1519 case P_PIDFD: 1520 type = PIDTYPE_PID; 1521 if (upid < 0) 1522 return -EINVAL; 1523 1524 pid = pidfd_get_pid(upid); 1525 if (IS_ERR(pid)) 1526 return PTR_ERR(pid); 1527 break; 1528 default: 1529 return -EINVAL; 1530 } 1531 1532 wo.wo_type = type; 1533 wo.wo_pid = pid; 1534 wo.wo_flags = options; 1535 wo.wo_info = infop; 1536 wo.wo_rusage = ru; 1537 ret = do_wait(&wo); 1538 1539 put_pid(pid); 1540 return ret; 1541 } 1542 1543 SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, 1544 infop, int, options, struct rusage __user *, ru) 1545 { 1546 struct rusage r; 1547 struct waitid_info info = {.status = 0}; 1548 long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL); 1549 int signo = 0; 1550 1551 if (err > 0) { 1552 signo = SIGCHLD; 1553 err = 0; 1554 if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) 1555 return -EFAULT; 1556 } 1557 if (!infop) 1558 return err; 1559 1560 if (!user_access_begin(infop, sizeof(*infop))) 1561 return -EFAULT; 1562 1563 unsafe_put_user(signo, &infop->si_signo, Efault); 1564 unsafe_put_user(0, &infop->si_errno, Efault); 1565 unsafe_put_user(info.cause, &infop->si_code, Efault); 1566 unsafe_put_user(info.pid, &infop->si_pid, Efault); 1567 unsafe_put_user(info.uid, &infop->si_uid, Efault); 1568 unsafe_put_user(info.status, &infop->si_status, Efault); 1569 user_access_end(); 1570 return err; 1571 Efault: 1572 user_access_end(); 1573 return -EFAULT; 1574 } 1575 1576 long kernel_wait4(pid_t upid, int __user *stat_addr, int options, 1577 struct rusage *ru) 1578 { 1579 struct wait_opts wo; 1580 struct pid *pid = NULL; 1581 enum pid_type type; 1582 long ret; 1583 1584 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| 1585 __WNOTHREAD|__WCLONE|__WALL)) 1586 return -EINVAL; 1587 1588 /* -INT_MIN is not defined */ 1589 if (upid == INT_MIN) 1590 return -ESRCH; 1591 1592 if (upid == -1) 1593 type = PIDTYPE_MAX; 1594 else if (upid < 0) { 1595 type = PIDTYPE_PGID; 1596 pid = find_get_pid(-upid); 1597 } else if (upid == 0) { 1598 type = PIDTYPE_PGID; 1599 pid = get_task_pid(current, PIDTYPE_PGID); 1600 } else /* upid > 0 */ { 1601 type = PIDTYPE_PID; 1602 pid = find_get_pid(upid); 1603 } 1604 1605 wo.wo_type = type; 1606 wo.wo_pid = pid; 1607 wo.wo_flags = options | WEXITED; 1608 wo.wo_info = NULL; 1609 wo.wo_stat = 0; 1610 wo.wo_rusage = ru; 1611 ret = do_wait(&wo); 1612 put_pid(pid); 1613 if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr)) 1614 ret = -EFAULT; 1615 1616 return ret; 1617 } 1618 1619 SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, 1620 int, options, struct rusage __user *, ru) 1621 { 1622 struct rusage r; 1623 long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL); 1624 1625 if (err > 0) { 1626 if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) 1627 return -EFAULT; 1628 } 1629 return err; 1630 } 1631 1632 #ifdef __ARCH_WANT_SYS_WAITPID 1633 1634 /* 1635 * sys_waitpid() remains for compatibility. waitpid() should be 1636 * implemented by calling sys_wait4() from libc.a. 1637 */ 1638 SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options) 1639 { 1640 return kernel_wait4(pid, stat_addr, options, NULL); 1641 } 1642 1643 #endif 1644 1645 #ifdef CONFIG_COMPAT 1646 COMPAT_SYSCALL_DEFINE4(wait4, 1647 compat_pid_t, pid, 1648 compat_uint_t __user *, stat_addr, 1649 int, options, 1650 struct compat_rusage __user *, ru) 1651 { 1652 struct rusage r; 1653 long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL); 1654 if (err > 0) { 1655 if (ru && put_compat_rusage(&r, ru)) 1656 return -EFAULT; 1657 } 1658 return err; 1659 } 1660 1661 COMPAT_SYSCALL_DEFINE5(waitid, 1662 int, which, compat_pid_t, pid, 1663 struct compat_siginfo __user *, infop, int, options, 1664 struct compat_rusage __user *, uru) 1665 { 1666 struct rusage ru; 1667 struct waitid_info info = {.status = 0}; 1668 long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL); 1669 int signo = 0; 1670 if (err > 0) { 1671 signo = SIGCHLD; 1672 err = 0; 1673 if (uru) { 1674 /* kernel_waitid() overwrites everything in ru */ 1675 if (COMPAT_USE_64BIT_TIME) 1676 err = copy_to_user(uru, &ru, sizeof(ru)); 1677 else 1678 err = put_compat_rusage(&ru, uru); 1679 if (err) 1680 return -EFAULT; 1681 } 1682 } 1683 1684 if (!infop) 1685 return err; 1686 1687 if (!user_access_begin(infop, sizeof(*infop))) 1688 return -EFAULT; 1689 1690 unsafe_put_user(signo, &infop->si_signo, Efault); 1691 unsafe_put_user(0, &infop->si_errno, Efault); 1692 unsafe_put_user(info.cause, &infop->si_code, Efault); 1693 unsafe_put_user(info.pid, &infop->si_pid, Efault); 1694 unsafe_put_user(info.uid, &infop->si_uid, Efault); 1695 unsafe_put_user(info.status, &infop->si_status, Efault); 1696 user_access_end(); 1697 return err; 1698 Efault: 1699 user_access_end(); 1700 return -EFAULT; 1701 } 1702 #endif 1703 1704 __weak void abort(void) 1705 { 1706 BUG(); 1707 1708 /* if that doesn't kill us, halt */ 1709 panic("Oops failed to kill thread"); 1710 } 1711 EXPORT_SYMBOL(abort); 1712