Lines Matching +full:wo +full:- +full:data
1 // SPDX-License-Identifier: GPL-2.0-only
44 #include <linux/posix-timers.h>
80 * overflowing 32-bit refcounts or the ldsem writer count.
88 .data = &oops_limit,
125 nr_threads--; in __unhash_process()
132 list_del_rcu(&p->tasks); in __unhash_process()
133 list_del_init(&p->sibling); in __unhash_process()
136 list_del_rcu(&p->thread_group); in __unhash_process()
137 list_del_rcu(&p->thread_node); in __unhash_process()
141 * This function expects the tasklist_lock write-locked.
145 struct signal_struct *sig = tsk->signal; in __exit_signal()
151 sighand = rcu_dereference_check(tsk->sighand, in __exit_signal()
153 spin_lock(&sighand->siglock); in __exit_signal()
162 tty = sig->tty; in __exit_signal()
163 sig->tty = NULL; in __exit_signal()
169 if (sig->notify_count > 0 && !--sig->notify_count) in __exit_signal()
170 wake_up_process(sig->group_exec_task); in __exit_signal()
172 if (tsk == sig->curr_target) in __exit_signal()
173 sig->curr_target = next_thread(tsk); in __exit_signal()
176 add_device_randomness((const void*) &tsk->se.sum_exec_runtime, in __exit_signal()
183 * see the empty ->thread_head list. in __exit_signal()
186 write_seqlock(&sig->stats_lock); in __exit_signal()
187 sig->utime += utime; in __exit_signal()
188 sig->stime += stime; in __exit_signal()
189 sig->gtime += task_gtime(tsk); in __exit_signal()
190 sig->min_flt += tsk->min_flt; in __exit_signal()
191 sig->maj_flt += tsk->maj_flt; in __exit_signal()
192 sig->nvcsw += tsk->nvcsw; in __exit_signal()
193 sig->nivcsw += tsk->nivcsw; in __exit_signal()
194 sig->inblock += task_io_get_inblock(tsk); in __exit_signal()
195 sig->oublock += task_io_get_oublock(tsk); in __exit_signal()
196 task_io_accounting_add(&sig->ioac, &tsk->ioac); in __exit_signal()
197 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; in __exit_signal()
198 sig->nr_threads--; in __exit_signal()
200 write_sequnlock(&sig->stats_lock); in __exit_signal()
203 * Do this under ->siglock, we can race with another thread in __exit_signal()
206 flush_sigqueue(&tsk->pending); in __exit_signal()
207 tsk->sighand = NULL; in __exit_signal()
208 spin_unlock(&sighand->siglock); in __exit_signal()
213 flush_sigqueue(&sig->shared_pending); in __exit_signal()
231 if (refcount_dec_and_test(&task->rcu_users)) in put_task_struct_rcu_user()
232 call_rcu(&task->rcu, delayed_put_task_struct); in put_task_struct_rcu_user()
245 /* don't need to get the RCU readlock here - the process is dead and in release_task()
246 * can't be modifying its own credentials. But shut RCU-lockdep up */ in release_task()
255 thread_pid = get_pid(p->thread_pid); in release_task()
259 * If we are the last non-leader member of the thread in release_task()
264 leader = p->group_leader; in release_task()
266 && leader->exit_state == EXIT_ZOMBIE) { in release_task()
272 zap_leader = do_notify_parent(leader, leader->exit_signal); in release_task()
274 leader->exit_state = EXIT_DEAD; in release_task()
309 task = rcu_dereference(w->task); in rcuwait_wake_up()
321 * by terminal-generated stop signals. Newly orphaned process groups are
333 (p->exit_state && thread_group_empty(p)) || in will_become_orphaned_pgrp()
334 is_global_init(p->real_parent)) in will_become_orphaned_pgrp()
337 if (task_pgrp(p->real_parent) != pgrp && in will_become_orphaned_pgrp()
338 task_session(p->real_parent) == task_session(p)) in will_become_orphaned_pgrp()
361 if (p->signal->flags & SIGNAL_STOP_STOPPED) in has_stopped_jobs()
383 parent = tsk->real_parent; in kill_orphaned_pgrp()
406 * and setting PF_POSTCOREDUMP. The core-inducing thread in coredump_task_exit()
407 * will increment ->nr_threads for each thread in the in coredump_task_exit()
410 spin_lock_irq(&tsk->sighand->siglock); in coredump_task_exit()
411 tsk->flags |= PF_POSTCOREDUMP; in coredump_task_exit()
412 core_state = tsk->signal->core_state; in coredump_task_exit()
413 spin_unlock_irq(&tsk->sighand->siglock); in coredump_task_exit()
417 ((tsk->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)) { in coredump_task_exit()
421 if (self.task->flags & PF_SIGNALED) in coredump_task_exit()
422 self.next = xchg(&core_state->dumper.next, &self); in coredump_task_exit()
427 * to core_state->dumper. in coredump_task_exit()
429 if (atomic_dec_and_test(&core_state->nr_threads)) in coredump_task_exit()
430 complete(&core_state->startup); in coredump_task_exit()
455 if (mm->owner != p) in mm_update_next_owner()
462 if (atomic_read(&mm->mm_users) <= 1) { in mm_update_next_owner()
463 WRITE_ONCE(mm->owner, NULL); in mm_update_next_owner()
471 list_for_each_entry(c, &p->children, sibling) { in mm_update_next_owner()
472 if (c->mm == mm) in mm_update_next_owner()
479 list_for_each_entry(c, &p->real_parent->children, sibling) { in mm_update_next_owner()
480 if (c->mm == mm) in mm_update_next_owner()
488 if (atomic_read(&mm->mm_users) <= 1) in mm_update_next_owner()
490 if (g->flags & PF_KTHREAD) in mm_update_next_owner()
493 if (c->mm == mm) in mm_update_next_owner()
495 if (c->mm) in mm_update_next_owner()
505 WRITE_ONCE(mm->owner, NULL); in mm_update_next_owner()
512 * The task_lock protects c->mm from changing. in mm_update_next_owner()
513 * We always want mm->owner->mm == mm in mm_update_next_owner()
521 if (c->mm != mm) { in mm_update_next_owner()
526 WRITE_ONCE(mm->owner, c); in mm_update_next_owner()
539 struct mm_struct *mm = current->mm; in exit_mm()
547 BUG_ON(mm != current->active_mm); in exit_mm()
553 * tsk->mm, and the loop in membarrier_global_expedited() may in exit_mm()
555 * rq->membarrier_state, so those would not issue an IPI. in exit_mm()
557 * user-space memory, before clearing tsk->mm or the in exit_mm()
558 * rq->membarrier_state. in exit_mm()
562 current->mm = NULL; in exit_mm()
579 if (!(t->flags & PF_EXITING)) in find_alive_thread()
591 struct task_struct *reaper = pid_ns->child_reaper; in find_child_reaper()
599 pid_ns->child_reaper = reaper; in find_child_reaper()
606 list_del_init(&p->ptrace_entry); in find_child_reaper()
617 * When we die, we re-parent all our children, and try to:
632 if (father->signal->has_child_subreaper) { in find_new_reaper()
633 unsigned int ns_level = task_pid(father)->level; in find_new_reaper()
635 * Find the first ->is_child_subreaper ancestor in our pid_ns. in find_new_reaper()
639 * We check pid->level, this is slightly more efficient than in find_new_reaper()
642 for (reaper = father->real_parent; in find_new_reaper()
643 task_pid(reaper)->level == ns_level; in find_new_reaper()
644 reaper = reaper->real_parent) { in find_new_reaper()
647 if (!reaper->signal->is_child_subreaper) in find_new_reaper()
664 if (unlikely(p->exit_state == EXIT_DEAD)) in reparent_leader()
668 p->exit_signal = SIGCHLD; in reparent_leader()
671 if (!p->ptrace && in reparent_leader()
672 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { in reparent_leader()
673 if (do_notify_parent(p, p->exit_signal)) { in reparent_leader()
674 p->exit_state = EXIT_DEAD; in reparent_leader()
675 list_add(&p->ptrace_entry, dead); in reparent_leader()
695 if (unlikely(!list_empty(&father->ptraced))) in forget_original_parent()
700 if (list_empty(&father->children)) in forget_original_parent()
704 list_for_each_entry(p, &father->children, sibling) { in forget_original_parent()
706 RCU_INIT_POINTER(t->real_parent, reaper); in forget_original_parent()
707 BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father)); in forget_original_parent()
708 if (likely(!t->ptrace)) in forget_original_parent()
709 t->parent = t->real_parent; in forget_original_parent()
710 if (t->pdeath_signal) in forget_original_parent()
711 group_send_sig_info(t->pdeath_signal, in forget_original_parent()
722 list_splice_tail_init(&father->children, &reaper->children); in forget_original_parent()
739 kill_orphaned_pgrp(tsk->group_leader, NULL); in exit_notify()
741 tsk->exit_state = EXIT_ZOMBIE; in exit_notify()
742 if (unlikely(tsk->ptrace)) { in exit_notify()
746 tsk->exit_signal : SIGCHLD; in exit_notify()
750 do_notify_parent(tsk, tsk->exit_signal); in exit_notify()
756 tsk->exit_state = EXIT_DEAD; in exit_notify()
757 list_add(&tsk->ptrace_entry, &dead); in exit_notify()
760 /* mt-exec, de_thread() is waiting for group leader */ in exit_notify()
761 if (unlikely(tsk->signal->notify_count < 0)) in exit_notify()
762 wake_up_process(tsk->signal->group_exec_task); in exit_notify()
766 list_del_init(&p->ptrace_entry); in exit_notify()
786 current->comm, task_pid_nr(current), free); in check_stack_usage()
797 struct sighand_struct *sighand = tsk->sighand; in synchronize_group_exit()
798 struct signal_struct *signal = tsk->signal; in synchronize_group_exit()
800 spin_lock_irq(&sighand->siglock); in synchronize_group_exit()
801 signal->quick_threads--; in synchronize_group_exit()
802 if ((signal->quick_threads == 0) && in synchronize_group_exit()
803 !(signal->flags & SIGNAL_GROUP_EXIT)) { in synchronize_group_exit()
804 signal->flags = SIGNAL_GROUP_EXIT; in synchronize_group_exit()
805 signal->group_exit_code = code; in synchronize_group_exit()
806 signal->group_stop_count = 0; in synchronize_group_exit()
808 spin_unlock_irq(&sighand->siglock); in synchronize_group_exit()
820 WARN_ON(tsk->plug); in do_exit()
833 if (tsk->mm) in do_exit()
834 sync_mm_rss(tsk->mm); in do_exit()
836 group_dead = atomic_dec_and_test(&tsk->signal->live); in do_exit()
844 tsk->signal->group_exit_code ?: (int)code); in do_exit()
847 hrtimer_cancel(&tsk->signal->real_timer); in do_exit()
850 if (tsk->mm) in do_exit()
851 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); in do_exit()
858 tsk->exit_code = code; in do_exit()
878 * Flush inherited counters to the parent - before the parent in do_exit()
879 * gets woken up by child-exit notifications. in do_exit()
898 if (unlikely(current->pi_state_cache)) in do_exit()
899 kfree(current->pi_state_cache); in do_exit()
906 if (tsk->io_context) in do_exit()
909 if (tsk->splice_pipe) in do_exit()
910 free_pipe_info(tsk->splice_pipe); in do_exit()
912 if (tsk->task_frag.page) in do_exit()
913 put_page(tsk->task_frag.page); in do_exit()
919 if (tsk->nr_dirtied) in do_exit()
920 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); in do_exit()
944 if (unlikely(!tsk->pid)) in make_task_dead()
949 current->comm, task_pid_nr(current)); in make_task_dead()
954 current->comm, task_pid_nr(current), in make_task_dead()
964 * This means that repeated oopsing can make unexploitable-looking bugs in make_task_dead()
977 if (unlikely(tsk->flags & PF_EXITING)) { in make_task_dead()
980 tsk->exit_state = EXIT_DEAD; in make_task_dead()
981 refcount_inc(&tsk->rcu_users); in make_task_dead()
1000 struct signal_struct *sig = current->signal; in do_group_exit()
1002 if (sig->flags & SIGNAL_GROUP_EXIT) in do_group_exit()
1003 exit_code = sig->group_exit_code; in do_group_exit()
1004 else if (sig->group_exec_task) in do_group_exit()
1007 struct sighand_struct *const sighand = current->sighand; in do_group_exit()
1009 spin_lock_irq(&sighand->siglock); in do_group_exit()
1010 if (sig->flags & SIGNAL_GROUP_EXIT) in do_group_exit()
1012 exit_code = sig->group_exit_code; in do_group_exit()
1013 else if (sig->group_exec_task) in do_group_exit()
1016 sig->group_exit_code = exit_code; in do_group_exit()
1017 sig->flags = SIGNAL_GROUP_EXIT; in do_group_exit()
1020 spin_unlock_irq(&sighand->siglock); in do_group_exit()
1029 * wait4()-ing process will get the correct exit code - even if this
1059 static int eligible_pid(struct wait_opts *wo, struct task_struct *p) in eligible_pid() argument
1061 return wo->wo_type == PIDTYPE_MAX || in eligible_pid()
1062 task_pid_type(p, wo->wo_type) == wo->wo_pid; in eligible_pid()
1066 eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p) in eligible_child() argument
1068 if (!eligible_pid(wo, p)) in eligible_child()
1075 if (ptrace || (wo->wo_flags & __WALL)) in eligible_child()
1080 * otherwise, wait for non-clone children *only*. in eligible_child()
1083 * using a signal other than SIGCHLD, or a non-leader thread which in eligible_child()
1086 if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) in eligible_child()
1098 static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) in wait_task_zombie() argument
1105 if (!likely(wo->wo_flags & WEXITED)) in wait_task_zombie()
1108 if (unlikely(wo->wo_flags & WNOWAIT)) { in wait_task_zombie()
1109 status = (p->signal->flags & SIGNAL_GROUP_EXIT) in wait_task_zombie()
1110 ? p->signal->group_exit_code : p->exit_code; in wait_task_zombie()
1114 if (wo->wo_rusage) in wait_task_zombie()
1115 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); in wait_task_zombie()
1124 if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE) in wait_task_zombie()
1133 * Check thread_group_leader() to exclude the traced sub-threads. in wait_task_zombie()
1136 struct signal_struct *sig = p->signal; in wait_task_zombie()
1137 struct signal_struct *psig = current->signal; in wait_task_zombie()
1149 * p->signal fields because the whole thread group is dead in wait_task_zombie()
1152 * psig->stats_lock also protects us from our sub-threads in wait_task_zombie()
1154 * we change k_getrusage()-like users to rely on this lock in wait_task_zombie()
1155 * we have to take ->siglock as well. in wait_task_zombie()
1162 spin_lock_irq(¤t->sighand->siglock); in wait_task_zombie()
1163 write_seqlock(&psig->stats_lock); in wait_task_zombie()
1164 psig->cutime += tgutime + sig->cutime; in wait_task_zombie()
1165 psig->cstime += tgstime + sig->cstime; in wait_task_zombie()
1166 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime; in wait_task_zombie()
1167 psig->cmin_flt += in wait_task_zombie()
1168 p->min_flt + sig->min_flt + sig->cmin_flt; in wait_task_zombie()
1169 psig->cmaj_flt += in wait_task_zombie()
1170 p->maj_flt + sig->maj_flt + sig->cmaj_flt; in wait_task_zombie()
1171 psig->cnvcsw += in wait_task_zombie()
1172 p->nvcsw + sig->nvcsw + sig->cnvcsw; in wait_task_zombie()
1173 psig->cnivcsw += in wait_task_zombie()
1174 p->nivcsw + sig->nivcsw + sig->cnivcsw; in wait_task_zombie()
1175 psig->cinblock += in wait_task_zombie()
1177 sig->inblock + sig->cinblock; in wait_task_zombie()
1178 psig->coublock += in wait_task_zombie()
1180 sig->oublock + sig->coublock; in wait_task_zombie()
1181 maxrss = max(sig->maxrss, sig->cmaxrss); in wait_task_zombie()
1182 if (psig->cmaxrss < maxrss) in wait_task_zombie()
1183 psig->cmaxrss = maxrss; in wait_task_zombie()
1184 task_io_accounting_add(&psig->ioac, &p->ioac); in wait_task_zombie()
1185 task_io_accounting_add(&psig->ioac, &sig->ioac); in wait_task_zombie()
1186 write_sequnlock(&psig->stats_lock); in wait_task_zombie()
1187 spin_unlock_irq(¤t->sighand->siglock); in wait_task_zombie()
1190 if (wo->wo_rusage) in wait_task_zombie()
1191 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); in wait_task_zombie()
1192 status = (p->signal->flags & SIGNAL_GROUP_EXIT) in wait_task_zombie()
1193 ? p->signal->group_exit_code : p->exit_code; in wait_task_zombie()
1194 wo->wo_stat = status; in wait_task_zombie()
1203 if (do_notify_parent(p, p->exit_signal)) in wait_task_zombie()
1205 p->exit_state = state; in wait_task_zombie()
1212 infop = wo->wo_info; in wait_task_zombie()
1215 infop->cause = CLD_EXITED; in wait_task_zombie()
1216 infop->status = status >> 8; in wait_task_zombie()
1218 infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; in wait_task_zombie()
1219 infop->status = status & 0x7f; in wait_task_zombie()
1221 infop->pid = pid; in wait_task_zombie()
1222 infop->uid = uid; in wait_task_zombie()
1231 if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING)) in task_stopped_code()
1232 return &p->exit_code; in task_stopped_code()
1234 if (p->signal->flags & SIGNAL_STOP_STOPPED) in task_stopped_code()
1235 return &p->signal->group_exit_code; in task_stopped_code()
1241 * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
1242 * @wo: wait options
1250 * non-zero. Also, grabs and releases @p->sighand->siglock.
1254 * should continue. Non-zero return, -errno on failure and @p's pid on
1258 static int wait_task_stopped(struct wait_opts *wo, in wait_task_stopped() argument
1269 if (!ptrace && !(wo->wo_flags & WUNTRACED)) in wait_task_stopped()
1276 spin_lock_irq(&p->sighand->siglock); in wait_task_stopped()
1286 if (!unlikely(wo->wo_flags & WNOWAIT)) in wait_task_stopped()
1291 spin_unlock_irq(&p->sighand->siglock); in wait_task_stopped()
1307 if (wo->wo_rusage) in wait_task_stopped()
1308 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); in wait_task_stopped()
1311 if (likely(!(wo->wo_flags & WNOWAIT))) in wait_task_stopped()
1312 wo->wo_stat = (exit_code << 8) | 0x7f; in wait_task_stopped()
1314 infop = wo->wo_info; in wait_task_stopped()
1316 infop->cause = why; in wait_task_stopped()
1317 infop->status = exit_code; in wait_task_stopped()
1318 infop->pid = pid; in wait_task_stopped()
1319 infop->uid = uid; in wait_task_stopped()
1325 * Handle do_wait work for one task in a live, non-stopped state.
1330 static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) in wait_task_continued() argument
1336 if (!unlikely(wo->wo_flags & WCONTINUED)) in wait_task_continued()
1339 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) in wait_task_continued()
1342 spin_lock_irq(&p->sighand->siglock); in wait_task_continued()
1343 /* Re-check with the lock held. */ in wait_task_continued()
1344 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { in wait_task_continued()
1345 spin_unlock_irq(&p->sighand->siglock); in wait_task_continued()
1348 if (!unlikely(wo->wo_flags & WNOWAIT)) in wait_task_continued()
1349 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; in wait_task_continued()
1351 spin_unlock_irq(&p->sighand->siglock); in wait_task_continued()
1357 if (wo->wo_rusage) in wait_task_continued()
1358 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); in wait_task_continued()
1361 infop = wo->wo_info; in wait_task_continued()
1363 wo->wo_stat = 0xffff; in wait_task_continued()
1365 infop->cause = CLD_CONTINUED; in wait_task_continued()
1366 infop->pid = pid; in wait_task_continued()
1367 infop->uid = uid; in wait_task_continued()
1368 infop->status = SIGCONT; in wait_task_continued()
1376 * -ECHILD should be in ->notask_error before the first call.
1379 * then ->notask_error is 0 if @p is an eligible child,
1380 * or still -ECHILD.
1382 static int wait_consider_task(struct wait_opts *wo, int ptrace, in wait_consider_task() argument
1387 * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition in wait_consider_task()
1390 int exit_state = READ_ONCE(p->exit_state); in wait_consider_task()
1396 ret = eligible_child(wo, ptrace, p); in wait_consider_task()
1406 wo->notask_error = 0; in wait_consider_task()
1410 if (likely(!ptrace) && unlikely(p->ptrace)) { in wait_consider_task()
1435 if (unlikely(ptrace) || likely(!p->ptrace)) in wait_consider_task()
1436 return wait_task_zombie(wo, p); in wait_consider_task()
1449 * to clear - this function will be called again in finite in wait_consider_task()
1455 * Stopped state is per-task and thus can't change once the in wait_consider_task()
1459 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) in wait_consider_task()
1460 wo->notask_error = 0; in wait_consider_task()
1466 wo->notask_error = 0; in wait_consider_task()
1473 ret = wait_task_stopped(wo, ptrace, p); in wait_consider_task()
1482 return wait_task_continued(wo, p); in wait_consider_task()
1488 * -ECHILD should be in ->notask_error before the first call.
1491 * ->notask_error is 0 if there were any eligible children,
1492 * or still -ECHILD.
1494 static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) in do_wait_thread() argument
1498 list_for_each_entry(p, &tsk->children, sibling) { in do_wait_thread()
1499 int ret = wait_consider_task(wo, 0, p); in do_wait_thread()
1508 static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) in ptrace_do_wait() argument
1512 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { in ptrace_do_wait()
1513 int ret = wait_consider_task(wo, 1, p); in ptrace_do_wait()
1525 struct wait_opts *wo = container_of(wait, struct wait_opts, in child_wait_callback() local
1529 if (!eligible_pid(wo, p)) in child_wait_callback()
1532 if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent) in child_wait_callback()
1540 __wake_up_sync_key(&parent->signal->wait_chldexit, in __wake_up_parent()
1544 static bool is_effectively_child(struct wait_opts *wo, bool ptrace, in is_effectively_child() argument
1548 !ptrace ? target->real_parent : target->parent; in is_effectively_child()
1550 return current == parent || (!(wo->wo_flags & __WNOTHREAD) && in is_effectively_child()
1558 static int do_wait_pid(struct wait_opts *wo) in do_wait_pid() argument
1565 target = pid_task(wo->wo_pid, PIDTYPE_TGID); in do_wait_pid()
1566 if (target && is_effectively_child(wo, ptrace, target)) { in do_wait_pid()
1567 retval = wait_consider_task(wo, ptrace, target); in do_wait_pid()
1573 target = pid_task(wo->wo_pid, PIDTYPE_PID); in do_wait_pid()
1574 if (target && target->ptrace && in do_wait_pid()
1575 is_effectively_child(wo, ptrace, target)) { in do_wait_pid()
1576 retval = wait_consider_task(wo, ptrace, target); in do_wait_pid()
1584 static long do_wait(struct wait_opts *wo) in do_wait() argument
1588 trace_sched_process_wait(wo->wo_pid); in do_wait()
1590 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback); in do_wait()
1591 wo->child_wait.private = current; in do_wait()
1592 add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); in do_wait()
1596 * We will clear ->notask_error to zero if we see any child that in do_wait()
1600 wo->notask_error = -ECHILD; in do_wait()
1601 if ((wo->wo_type < PIDTYPE_MAX) && in do_wait()
1602 (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type))) in do_wait()
1608 if (wo->wo_type == PIDTYPE_PID) { in do_wait()
1609 retval = do_wait_pid(wo); in do_wait()
1616 retval = do_wait_thread(wo, tsk); in do_wait()
1620 retval = ptrace_do_wait(wo, tsk); in do_wait()
1624 if (wo->wo_flags & __WNOTHREAD) in do_wait()
1631 retval = wo->notask_error; in do_wait()
1632 if (!retval && !(wo->wo_flags & WNOHANG)) { in do_wait()
1633 retval = -ERESTARTSYS; in do_wait()
1641 remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); in do_wait()
1648 struct wait_opts wo; in kernel_waitid() local
1656 return -EINVAL; in kernel_waitid()
1658 return -EINVAL; in kernel_waitid()
1667 return -EINVAL; in kernel_waitid()
1674 return -EINVAL; in kernel_waitid()
1684 return -EINVAL; in kernel_waitid()
1692 return -EINVAL; in kernel_waitid()
1695 wo.wo_type = type; in kernel_waitid()
1696 wo.wo_pid = pid; in kernel_waitid()
1697 wo.wo_flags = options; in kernel_waitid()
1698 wo.wo_info = infop; in kernel_waitid()
1699 wo.wo_rusage = ru; in kernel_waitid()
1701 wo.wo_flags |= WNOHANG; in kernel_waitid()
1703 ret = do_wait(&wo); in kernel_waitid()
1705 ret = -EAGAIN; in kernel_waitid()
1723 return -EFAULT; in SYSCALL_DEFINE5()
1729 return -EFAULT; in SYSCALL_DEFINE5()
1731 unsafe_put_user(signo, &infop->si_signo, Efault); in SYSCALL_DEFINE5()
1732 unsafe_put_user(0, &infop->si_errno, Efault); in SYSCALL_DEFINE5()
1733 unsafe_put_user(info.cause, &infop->si_code, Efault); in SYSCALL_DEFINE5()
1734 unsafe_put_user(info.pid, &infop->si_pid, Efault); in SYSCALL_DEFINE5()
1735 unsafe_put_user(info.uid, &infop->si_uid, Efault); in SYSCALL_DEFINE5()
1736 unsafe_put_user(info.status, &infop->si_status, Efault); in SYSCALL_DEFINE5()
1741 return -EFAULT; in SYSCALL_DEFINE5()
1747 struct wait_opts wo; in kernel_wait4() local
1754 return -EINVAL; in kernel_wait4()
1756 /* -INT_MIN is not defined */ in kernel_wait4()
1758 return -ESRCH; in kernel_wait4()
1760 if (upid == -1) in kernel_wait4()
1764 pid = find_get_pid(-upid); in kernel_wait4()
1773 wo.wo_type = type; in kernel_wait4()
1774 wo.wo_pid = pid; in kernel_wait4()
1775 wo.wo_flags = options | WEXITED; in kernel_wait4()
1776 wo.wo_info = NULL; in kernel_wait4()
1777 wo.wo_stat = 0; in kernel_wait4()
1778 wo.wo_rusage = ru; in kernel_wait4()
1779 ret = do_wait(&wo); in kernel_wait4()
1781 if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr)) in kernel_wait4()
1782 ret = -EFAULT; in kernel_wait4()
1789 struct wait_opts wo = { in kernel_wait() local
1796 ret = do_wait(&wo); in kernel_wait()
1797 if (ret > 0 && wo.wo_stat) in kernel_wait()
1798 *stat = wo.wo_stat; in kernel_wait()
1799 put_pid(wo.wo_pid); in kernel_wait()
1811 return -EFAULT; in SYSCALL_DEFINE4()
1840 return -EFAULT; in COMPAT_SYSCALL_DEFINE4()
1864 return -EFAULT; in COMPAT_SYSCALL_DEFINE5()
1872 return -EFAULT; in COMPAT_SYSCALL_DEFINE5()
1874 unsafe_put_user(signo, &infop->si_signo, Efault); in COMPAT_SYSCALL_DEFINE5()
1875 unsafe_put_user(0, &infop->si_errno, Efault); in COMPAT_SYSCALL_DEFINE5()
1876 unsafe_put_user(info.cause, &infop->si_code, Efault); in COMPAT_SYSCALL_DEFINE5()
1877 unsafe_put_user(info.pid, &infop->si_pid, Efault); in COMPAT_SYSCALL_DEFINE5()
1878 unsafe_put_user(info.uid, &infop->si_uid, Efault); in COMPAT_SYSCALL_DEFINE5()
1879 unsafe_put_user(info.status, &infop->si_status, Efault); in COMPAT_SYSCALL_DEFINE5()
1884 return -EFAULT; in COMPAT_SYSCALL_DEFINE5()
1889 * thread_group_exited - check that a thread group has exited
1905 (READ_ONCE(task->exit_state) && thread_group_empty(task)); in thread_group_exited()
1915 * -falign-functions=N.