xref: /openbmc/linux/kernel/exit.c (revision c21b37f6)
1 /*
2  *  linux/kernel/exit.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/capability.h>
12 #include <linux/completion.h>
13 #include <linux/personality.h>
14 #include <linux/tty.h>
15 #include <linux/mnt_namespace.h>
16 #include <linux/key.h>
17 #include <linux/security.h>
18 #include <linux/cpu.h>
19 #include <linux/acct.h>
20 #include <linux/tsacct_kern.h>
21 #include <linux/file.h>
22 #include <linux/binfmts.h>
23 #include <linux/nsproxy.h>
24 #include <linux/pid_namespace.h>
25 #include <linux/ptrace.h>
26 #include <linux/profile.h>
27 #include <linux/signalfd.h>
28 #include <linux/mount.h>
29 #include <linux/proc_fs.h>
30 #include <linux/kthread.h>
31 #include <linux/mempolicy.h>
32 #include <linux/taskstats_kern.h>
33 #include <linux/delayacct.h>
34 #include <linux/freezer.h>
35 #include <linux/cpuset.h>
36 #include <linux/syscalls.h>
37 #include <linux/signal.h>
38 #include <linux/posix-timers.h>
39 #include <linux/cn_proc.h>
40 #include <linux/mutex.h>
41 #include <linux/futex.h>
42 #include <linux/compat.h>
43 #include <linux/pipe_fs_i.h>
44 #include <linux/audit.h> /* for audit_free() */
45 #include <linux/resource.h>
46 #include <linux/blkdev.h>
47 #include <linux/task_io_accounting_ops.h>
48 #include <linux/freezer.h>
49 
50 #include <asm/uaccess.h>
51 #include <asm/unistd.h>
52 #include <asm/pgtable.h>
53 #include <asm/mmu_context.h>
54 
55 extern void sem_exit (void);
56 
57 static void exit_mm(struct task_struct * tsk);
58 
59 static void __unhash_process(struct task_struct *p)
60 {
61 	nr_threads--;
62 	detach_pid(p, PIDTYPE_PID);
63 	if (thread_group_leader(p)) {
64 		detach_pid(p, PIDTYPE_PGID);
65 		detach_pid(p, PIDTYPE_SID);
66 
67 		list_del_rcu(&p->tasks);
68 		__get_cpu_var(process_counts)--;
69 	}
70 	list_del_rcu(&p->thread_group);
71 	remove_parent(p);
72 }
73 
74 /*
75  * This function expects the tasklist_lock write-locked.
76  */
77 static void __exit_signal(struct task_struct *tsk)
78 {
79 	struct signal_struct *sig = tsk->signal;
80 	struct sighand_struct *sighand;
81 
82 	BUG_ON(!sig);
83 	BUG_ON(!atomic_read(&sig->count));
84 
85 	rcu_read_lock();
86 	sighand = rcu_dereference(tsk->sighand);
87 	spin_lock(&sighand->siglock);
88 
89 	/*
90 	 * Notify that this sighand has been detached. This must
91 	 * be called with the tsk->sighand lock held. Also, this
92 	 * access tsk->sighand internally, so it must be called
93 	 * before tsk->sighand is reset.
94 	 */
95 	signalfd_detach_locked(tsk);
96 
97 	posix_cpu_timers_exit(tsk);
98 	if (atomic_dec_and_test(&sig->count))
99 		posix_cpu_timers_exit_group(tsk);
100 	else {
101 		/*
102 		 * If there is any task waiting for the group exit
103 		 * then notify it:
104 		 */
105 		if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
106 			wake_up_process(sig->group_exit_task);
107 			sig->group_exit_task = NULL;
108 		}
109 		if (tsk == sig->curr_target)
110 			sig->curr_target = next_thread(tsk);
111 		/*
112 		 * Accumulate here the counters for all threads but the
113 		 * group leader as they die, so they can be added into
114 		 * the process-wide totals when those are taken.
115 		 * The group leader stays around as a zombie as long
116 		 * as there are other threads.  When it gets reaped,
117 		 * the exit.c code will add its counts into these totals.
118 		 * We won't ever get here for the group leader, since it
119 		 * will have been the last reference on the signal_struct.
120 		 */
121 		sig->utime = cputime_add(sig->utime, tsk->utime);
122 		sig->stime = cputime_add(sig->stime, tsk->stime);
123 		sig->min_flt += tsk->min_flt;
124 		sig->maj_flt += tsk->maj_flt;
125 		sig->nvcsw += tsk->nvcsw;
126 		sig->nivcsw += tsk->nivcsw;
127 		sig->inblock += task_io_get_inblock(tsk);
128 		sig->oublock += task_io_get_oublock(tsk);
129 		sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
130 		sig = NULL; /* Marker for below. */
131 	}
132 
133 	__unhash_process(tsk);
134 
135 	tsk->signal = NULL;
136 	tsk->sighand = NULL;
137 	spin_unlock(&sighand->siglock);
138 	rcu_read_unlock();
139 
140 	__cleanup_sighand(sighand);
141 	clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
142 	flush_sigqueue(&tsk->pending);
143 	if (sig) {
144 		flush_sigqueue(&sig->shared_pending);
145 		taskstats_tgid_free(sig);
146 		__cleanup_signal(sig);
147 	}
148 }
149 
150 static void delayed_put_task_struct(struct rcu_head *rhp)
151 {
152 	put_task_struct(container_of(rhp, struct task_struct, rcu));
153 }
154 
155 void release_task(struct task_struct * p)
156 {
157 	struct task_struct *leader;
158 	int zap_leader;
159 repeat:
160 	atomic_dec(&p->user->processes);
161 	write_lock_irq(&tasklist_lock);
162 	ptrace_unlink(p);
163 	BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
164 	__exit_signal(p);
165 
166 	/*
167 	 * If we are the last non-leader member of the thread
168 	 * group, and the leader is zombie, then notify the
169 	 * group leader's parent process. (if it wants notification.)
170 	 */
171 	zap_leader = 0;
172 	leader = p->group_leader;
173 	if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
174 		BUG_ON(leader->exit_signal == -1);
175 		do_notify_parent(leader, leader->exit_signal);
176 		/*
177 		 * If we were the last child thread and the leader has
178 		 * exited already, and the leader's parent ignores SIGCHLD,
179 		 * then we are the one who should release the leader.
180 		 *
181 		 * do_notify_parent() will have marked it self-reaping in
182 		 * that case.
183 		 */
184 		zap_leader = (leader->exit_signal == -1);
185 	}
186 
187 	write_unlock_irq(&tasklist_lock);
188 	proc_flush_task(p);
189 	release_thread(p);
190 	call_rcu(&p->rcu, delayed_put_task_struct);
191 
192 	p = leader;
193 	if (unlikely(zap_leader))
194 		goto repeat;
195 }
196 
197 /*
198  * This checks not only the pgrp, but falls back on the pid if no
199  * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
200  * without this...
201  *
202  * The caller must hold rcu lock or the tasklist lock.
203  */
204 struct pid *session_of_pgrp(struct pid *pgrp)
205 {
206 	struct task_struct *p;
207 	struct pid *sid = NULL;
208 
209 	p = pid_task(pgrp, PIDTYPE_PGID);
210 	if (p == NULL)
211 		p = pid_task(pgrp, PIDTYPE_PID);
212 	if (p != NULL)
213 		sid = task_session(p);
214 
215 	return sid;
216 }
217 
218 /*
219  * Determine if a process group is "orphaned", according to the POSIX
220  * definition in 2.2.2.52.  Orphaned process groups are not to be affected
221  * by terminal-generated stop signals.  Newly orphaned process groups are
222  * to receive a SIGHUP and a SIGCONT.
223  *
224  * "I ask you, have you ever known what it is to be an orphan?"
225  */
226 static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
227 {
228 	struct task_struct *p;
229 	int ret = 1;
230 
231 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
232 		if (p == ignored_task
233 				|| p->exit_state
234 				|| is_init(p->real_parent))
235 			continue;
236 		if (task_pgrp(p->real_parent) != pgrp &&
237 		    task_session(p->real_parent) == task_session(p)) {
238 			ret = 0;
239 			break;
240 		}
241 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
242 	return ret;	/* (sighing) "Often!" */
243 }
244 
245 int is_current_pgrp_orphaned(void)
246 {
247 	int retval;
248 
249 	read_lock(&tasklist_lock);
250 	retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
251 	read_unlock(&tasklist_lock);
252 
253 	return retval;
254 }
255 
256 static int has_stopped_jobs(struct pid *pgrp)
257 {
258 	int retval = 0;
259 	struct task_struct *p;
260 
261 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
262 		if (p->state != TASK_STOPPED)
263 			continue;
264 		retval = 1;
265 		break;
266 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
267 	return retval;
268 }
269 
270 /**
271  * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd
272  *
273  * If a kernel thread is launched as a result of a system call, or if
274  * it ever exits, it should generally reparent itself to kthreadd so it
275  * isn't in the way of other processes and is correctly cleaned up on exit.
276  *
277  * The various task state such as scheduling policy and priority may have
278  * been inherited from a user process, so we reset them to sane values here.
279  *
280  * NOTE that reparent_to_kthreadd() gives the caller full capabilities.
281  */
282 static void reparent_to_kthreadd(void)
283 {
284 	write_lock_irq(&tasklist_lock);
285 
286 	ptrace_unlink(current);
287 	/* Reparent to init */
288 	remove_parent(current);
289 	current->real_parent = current->parent = kthreadd_task;
290 	add_parent(current);
291 
292 	/* Set the exit signal to SIGCHLD so we signal init on exit */
293 	current->exit_signal = SIGCHLD;
294 
295 	if (task_nice(current) < 0)
296 		set_user_nice(current, 0);
297 	/* cpus_allowed? */
298 	/* rt_priority? */
299 	/* signals? */
300 	security_task_reparent_to_init(current);
301 	memcpy(current->signal->rlim, init_task.signal->rlim,
302 	       sizeof(current->signal->rlim));
303 	atomic_inc(&(INIT_USER->__count));
304 	write_unlock_irq(&tasklist_lock);
305 	switch_uid(INIT_USER);
306 }
307 
308 void __set_special_pids(pid_t session, pid_t pgrp)
309 {
310 	struct task_struct *curr = current->group_leader;
311 
312 	if (process_session(curr) != session) {
313 		detach_pid(curr, PIDTYPE_SID);
314 		set_signal_session(curr->signal, session);
315 		attach_pid(curr, PIDTYPE_SID, find_pid(session));
316 	}
317 	if (process_group(curr) != pgrp) {
318 		detach_pid(curr, PIDTYPE_PGID);
319 		curr->signal->pgrp = pgrp;
320 		attach_pid(curr, PIDTYPE_PGID, find_pid(pgrp));
321 	}
322 }
323 
324 static void set_special_pids(pid_t session, pid_t pgrp)
325 {
326 	write_lock_irq(&tasklist_lock);
327 	__set_special_pids(session, pgrp);
328 	write_unlock_irq(&tasklist_lock);
329 }
330 
331 /*
332  * Let kernel threads use this to say that they
333  * allow a certain signal (since daemonize() will
334  * have disabled all of them by default).
335  */
336 int allow_signal(int sig)
337 {
338 	if (!valid_signal(sig) || sig < 1)
339 		return -EINVAL;
340 
341 	spin_lock_irq(&current->sighand->siglock);
342 	sigdelset(&current->blocked, sig);
343 	if (!current->mm) {
344 		/* Kernel threads handle their own signals.
345 		   Let the signal code know it'll be handled, so
346 		   that they don't get converted to SIGKILL or
347 		   just silently dropped */
348 		current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
349 	}
350 	recalc_sigpending();
351 	spin_unlock_irq(&current->sighand->siglock);
352 	return 0;
353 }
354 
355 EXPORT_SYMBOL(allow_signal);
356 
357 int disallow_signal(int sig)
358 {
359 	if (!valid_signal(sig) || sig < 1)
360 		return -EINVAL;
361 
362 	spin_lock_irq(&current->sighand->siglock);
363 	current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;
364 	recalc_sigpending();
365 	spin_unlock_irq(&current->sighand->siglock);
366 	return 0;
367 }
368 
369 EXPORT_SYMBOL(disallow_signal);
370 
371 /*
372  *	Put all the gunge required to become a kernel thread without
373  *	attached user resources in one place where it belongs.
374  */
375 
376 void daemonize(const char *name, ...)
377 {
378 	va_list args;
379 	struct fs_struct *fs;
380 	sigset_t blocked;
381 
382 	va_start(args, name);
383 	vsnprintf(current->comm, sizeof(current->comm), name, args);
384 	va_end(args);
385 
386 	/*
387 	 * If we were started as result of loading a module, close all of the
388 	 * user space pages.  We don't need them, and if we didn't close them
389 	 * they would be locked into memory.
390 	 */
391 	exit_mm(current);
392 	/*
393 	 * We don't want to have TIF_FREEZE set if the system-wide hibernation
394 	 * or suspend transition begins right now.
395 	 */
396 	current->flags |= PF_NOFREEZE;
397 
398 	set_special_pids(1, 1);
399 	proc_clear_tty(current);
400 
401 	/* Block and flush all signals */
402 	sigfillset(&blocked);
403 	sigprocmask(SIG_BLOCK, &blocked, NULL);
404 	flush_signals(current);
405 
406 	/* Become as one with the init task */
407 
408 	exit_fs(current);	/* current->fs->count--; */
409 	fs = init_task.fs;
410 	current->fs = fs;
411 	atomic_inc(&fs->count);
412 
413 	exit_task_namespaces(current);
414 	current->nsproxy = init_task.nsproxy;
415 	get_task_namespaces(current);
416 
417  	exit_files(current);
418 	current->files = init_task.files;
419 	atomic_inc(&current->files->count);
420 
421 	reparent_to_kthreadd();
422 }
423 
424 EXPORT_SYMBOL(daemonize);
425 
426 static void close_files(struct files_struct * files)
427 {
428 	int i, j;
429 	struct fdtable *fdt;
430 
431 	j = 0;
432 
433 	/*
434 	 * It is safe to dereference the fd table without RCU or
435 	 * ->file_lock because this is the last reference to the
436 	 * files structure.
437 	 */
438 	fdt = files_fdtable(files);
439 	for (;;) {
440 		unsigned long set;
441 		i = j * __NFDBITS;
442 		if (i >= fdt->max_fds)
443 			break;
444 		set = fdt->open_fds->fds_bits[j++];
445 		while (set) {
446 			if (set & 1) {
447 				struct file * file = xchg(&fdt->fd[i], NULL);
448 				if (file) {
449 					filp_close(file, files);
450 					cond_resched();
451 				}
452 			}
453 			i++;
454 			set >>= 1;
455 		}
456 	}
457 }
458 
459 struct files_struct *get_files_struct(struct task_struct *task)
460 {
461 	struct files_struct *files;
462 
463 	task_lock(task);
464 	files = task->files;
465 	if (files)
466 		atomic_inc(&files->count);
467 	task_unlock(task);
468 
469 	return files;
470 }
471 
472 void fastcall put_files_struct(struct files_struct *files)
473 {
474 	struct fdtable *fdt;
475 
476 	if (atomic_dec_and_test(&files->count)) {
477 		close_files(files);
478 		/*
479 		 * Free the fd and fdset arrays if we expanded them.
480 		 * If the fdtable was embedded, pass files for freeing
481 		 * at the end of the RCU grace period. Otherwise,
482 		 * you can free files immediately.
483 		 */
484 		fdt = files_fdtable(files);
485 		if (fdt != &files->fdtab)
486 			kmem_cache_free(files_cachep, files);
487 		free_fdtable(fdt);
488 	}
489 }
490 
491 EXPORT_SYMBOL(put_files_struct);
492 
493 void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
494 {
495 	struct files_struct *old;
496 
497 	old = tsk->files;
498 	task_lock(tsk);
499 	tsk->files = files;
500 	task_unlock(tsk);
501 	put_files_struct(old);
502 }
503 EXPORT_SYMBOL(reset_files_struct);
504 
505 static inline void __exit_files(struct task_struct *tsk)
506 {
507 	struct files_struct * files = tsk->files;
508 
509 	if (files) {
510 		task_lock(tsk);
511 		tsk->files = NULL;
512 		task_unlock(tsk);
513 		put_files_struct(files);
514 	}
515 }
516 
517 void exit_files(struct task_struct *tsk)
518 {
519 	__exit_files(tsk);
520 }
521 
522 static inline void __put_fs_struct(struct fs_struct *fs)
523 {
524 	/* No need to hold fs->lock if we are killing it */
525 	if (atomic_dec_and_test(&fs->count)) {
526 		dput(fs->root);
527 		mntput(fs->rootmnt);
528 		dput(fs->pwd);
529 		mntput(fs->pwdmnt);
530 		if (fs->altroot) {
531 			dput(fs->altroot);
532 			mntput(fs->altrootmnt);
533 		}
534 		kmem_cache_free(fs_cachep, fs);
535 	}
536 }
537 
538 void put_fs_struct(struct fs_struct *fs)
539 {
540 	__put_fs_struct(fs);
541 }
542 
543 static inline void __exit_fs(struct task_struct *tsk)
544 {
545 	struct fs_struct * fs = tsk->fs;
546 
547 	if (fs) {
548 		task_lock(tsk);
549 		tsk->fs = NULL;
550 		task_unlock(tsk);
551 		__put_fs_struct(fs);
552 	}
553 }
554 
555 void exit_fs(struct task_struct *tsk)
556 {
557 	__exit_fs(tsk);
558 }
559 
560 EXPORT_SYMBOL_GPL(exit_fs);
561 
562 /*
563  * Turn us into a lazy TLB process if we
564  * aren't already..
565  */
566 static void exit_mm(struct task_struct * tsk)
567 {
568 	struct mm_struct *mm = tsk->mm;
569 
570 	mm_release(tsk, mm);
571 	if (!mm)
572 		return;
573 	/*
574 	 * Serialize with any possible pending coredump.
575 	 * We must hold mmap_sem around checking core_waiters
576 	 * and clearing tsk->mm.  The core-inducing thread
577 	 * will increment core_waiters for each thread in the
578 	 * group with ->mm != NULL.
579 	 */
580 	down_read(&mm->mmap_sem);
581 	if (mm->core_waiters) {
582 		up_read(&mm->mmap_sem);
583 		down_write(&mm->mmap_sem);
584 		if (!--mm->core_waiters)
585 			complete(mm->core_startup_done);
586 		up_write(&mm->mmap_sem);
587 
588 		wait_for_completion(&mm->core_done);
589 		down_read(&mm->mmap_sem);
590 	}
591 	atomic_inc(&mm->mm_count);
592 	BUG_ON(mm != tsk->active_mm);
593 	/* more a memory barrier than a real lock */
594 	task_lock(tsk);
595 	tsk->mm = NULL;
596 	up_read(&mm->mmap_sem);
597 	enter_lazy_tlb(mm, current);
598 	/* We don't want this task to be frozen prematurely */
599 	clear_freeze_flag(tsk);
600 	task_unlock(tsk);
601 	mmput(mm);
602 }
603 
604 static inline void
605 choose_new_parent(struct task_struct *p, struct task_struct *reaper)
606 {
607 	/*
608 	 * Make sure we're not reparenting to ourselves and that
609 	 * the parent is not a zombie.
610 	 */
611 	BUG_ON(p == reaper || reaper->exit_state);
612 	p->real_parent = reaper;
613 }
614 
615 static void
616 reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
617 {
618 	if (p->pdeath_signal)
619 		/* We already hold the tasklist_lock here.  */
620 		group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
621 
622 	/* Move the child from its dying parent to the new one.  */
623 	if (unlikely(traced)) {
624 		/* Preserve ptrace links if someone else is tracing this child.  */
625 		list_del_init(&p->ptrace_list);
626 		if (p->parent != p->real_parent)
627 			list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
628 	} else {
629 		/* If this child is being traced, then we're the one tracing it
630 		 * anyway, so let go of it.
631 		 */
632 		p->ptrace = 0;
633 		remove_parent(p);
634 		p->parent = p->real_parent;
635 		add_parent(p);
636 
637 		if (p->state == TASK_TRACED) {
638 			/*
639 			 * If it was at a trace stop, turn it into
640 			 * a normal stop since it's no longer being
641 			 * traced.
642 			 */
643 			ptrace_untrace(p);
644 		}
645 	}
646 
647 	/* If this is a threaded reparent there is no need to
648 	 * notify anyone anything has happened.
649 	 */
650 	if (p->real_parent->group_leader == father->group_leader)
651 		return;
652 
653 	/* We don't want people slaying init.  */
654 	if (p->exit_signal != -1)
655 		p->exit_signal = SIGCHLD;
656 
657 	/* If we'd notified the old parent about this child's death,
658 	 * also notify the new parent.
659 	 */
660 	if (!traced && p->exit_state == EXIT_ZOMBIE &&
661 	    p->exit_signal != -1 && thread_group_empty(p))
662 		do_notify_parent(p, p->exit_signal);
663 
664 	/*
665 	 * process group orphan check
666 	 * Case ii: Our child is in a different pgrp
667 	 * than we are, and it was the only connection
668 	 * outside, so the child pgrp is now orphaned.
669 	 */
670 	if ((task_pgrp(p) != task_pgrp(father)) &&
671 	    (task_session(p) == task_session(father))) {
672 		struct pid *pgrp = task_pgrp(p);
673 
674 		if (will_become_orphaned_pgrp(pgrp, NULL) &&
675 		    has_stopped_jobs(pgrp)) {
676 			__kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
677 			__kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
678 		}
679 	}
680 }
681 
682 /*
683  * When we die, we re-parent all our children.
684  * Try to give them to another thread in our thread
685  * group, and if no such member exists, give it to
686  * the child reaper process (ie "init") in our pid
687  * space.
688  */
689 static void
690 forget_original_parent(struct task_struct *father, struct list_head *to_release)
691 {
692 	struct task_struct *p, *reaper = father;
693 	struct list_head *_p, *_n;
694 
695 	do {
696 		reaper = next_thread(reaper);
697 		if (reaper == father) {
698 			reaper = child_reaper(father);
699 			break;
700 		}
701 	} while (reaper->exit_state);
702 
703 	/*
704 	 * There are only two places where our children can be:
705 	 *
706 	 * - in our child list
707 	 * - in our ptraced child list
708 	 *
709 	 * Search them and reparent children.
710 	 */
711 	list_for_each_safe(_p, _n, &father->children) {
712 		int ptrace;
713 		p = list_entry(_p, struct task_struct, sibling);
714 
715 		ptrace = p->ptrace;
716 
717 		/* if father isn't the real parent, then ptrace must be enabled */
718 		BUG_ON(father != p->real_parent && !ptrace);
719 
720 		if (father == p->real_parent) {
721 			/* reparent with a reaper, real father it's us */
722 			choose_new_parent(p, reaper);
723 			reparent_thread(p, father, 0);
724 		} else {
725 			/* reparent ptraced task to its real parent */
726 			__ptrace_unlink (p);
727 			if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
728 			    thread_group_empty(p))
729 				do_notify_parent(p, p->exit_signal);
730 		}
731 
732 		/*
733 		 * if the ptraced child is a zombie with exit_signal == -1
734 		 * we must collect it before we exit, or it will remain
735 		 * zombie forever since we prevented it from self-reap itself
736 		 * while it was being traced by us, to be able to see it in wait4.
737 		 */
738 		if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1))
739 			list_add(&p->ptrace_list, to_release);
740 	}
741 	list_for_each_safe(_p, _n, &father->ptrace_children) {
742 		p = list_entry(_p, struct task_struct, ptrace_list);
743 		choose_new_parent(p, reaper);
744 		reparent_thread(p, father, 1);
745 	}
746 }
747 
748 /*
749  * Send signals to all our closest relatives so that they know
750  * to properly mourn us..
751  */
752 static void exit_notify(struct task_struct *tsk)
753 {
754 	int state;
755 	struct task_struct *t;
756 	struct list_head ptrace_dead, *_p, *_n;
757 	struct pid *pgrp;
758 
759 	if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT)
760 	    && !thread_group_empty(tsk)) {
761 		/*
762 		 * This occurs when there was a race between our exit
763 		 * syscall and a group signal choosing us as the one to
764 		 * wake up.  It could be that we are the only thread
765 		 * alerted to check for pending signals, but another thread
766 		 * should be woken now to take the signal since we will not.
767 		 * Now we'll wake all the threads in the group just to make
768 		 * sure someone gets all the pending signals.
769 		 */
770 		read_lock(&tasklist_lock);
771 		spin_lock_irq(&tsk->sighand->siglock);
772 		for (t = next_thread(tsk); t != tsk; t = next_thread(t))
773 			if (!signal_pending(t) && !(t->flags & PF_EXITING))
774 				recalc_sigpending_and_wake(t);
775 		spin_unlock_irq(&tsk->sighand->siglock);
776 		read_unlock(&tasklist_lock);
777 	}
778 
779 	write_lock_irq(&tasklist_lock);
780 
781 	/*
782 	 * This does two things:
783 	 *
784   	 * A.  Make init inherit all the child processes
785 	 * B.  Check to see if any process groups have become orphaned
786 	 *	as a result of our exiting, and if they have any stopped
787 	 *	jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
788 	 */
789 
790 	INIT_LIST_HEAD(&ptrace_dead);
791 	forget_original_parent(tsk, &ptrace_dead);
792 	BUG_ON(!list_empty(&tsk->children));
793 	BUG_ON(!list_empty(&tsk->ptrace_children));
794 
795 	/*
796 	 * Check to see if any process groups have become orphaned
797 	 * as a result of our exiting, and if they have any stopped
798 	 * jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
799 	 *
800 	 * Case i: Our father is in a different pgrp than we are
801 	 * and we were the only connection outside, so our pgrp
802 	 * is about to become orphaned.
803 	 */
804 
805 	t = tsk->real_parent;
806 
807 	pgrp = task_pgrp(tsk);
808 	if ((task_pgrp(t) != pgrp) &&
809 	    (task_session(t) == task_session(tsk)) &&
810 	    will_become_orphaned_pgrp(pgrp, tsk) &&
811 	    has_stopped_jobs(pgrp)) {
812 		__kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
813 		__kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
814 	}
815 
816 	/* Let father know we died
817 	 *
818 	 * Thread signals are configurable, but you aren't going to use
819 	 * that to send signals to arbitary processes.
820 	 * That stops right now.
821 	 *
822 	 * If the parent exec id doesn't match the exec id we saved
823 	 * when we started then we know the parent has changed security
824 	 * domain.
825 	 *
826 	 * If our self_exec id doesn't match our parent_exec_id then
827 	 * we have changed execution domain as these two values started
828 	 * the same after a fork.
829 	 *
830 	 */
831 
832 	if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 &&
833 	    ( tsk->parent_exec_id != t->self_exec_id  ||
834 	      tsk->self_exec_id != tsk->parent_exec_id)
835 	    && !capable(CAP_KILL))
836 		tsk->exit_signal = SIGCHLD;
837 
838 
839 	/* If something other than our normal parent is ptracing us, then
840 	 * send it a SIGCHLD instead of honoring exit_signal.  exit_signal
841 	 * only has special meaning to our real parent.
842 	 */
843 	if (tsk->exit_signal != -1 && thread_group_empty(tsk)) {
844 		int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
845 		do_notify_parent(tsk, signal);
846 	} else if (tsk->ptrace) {
847 		do_notify_parent(tsk, SIGCHLD);
848 	}
849 
850 	state = EXIT_ZOMBIE;
851 	if (tsk->exit_signal == -1 &&
852 	    (likely(tsk->ptrace == 0) ||
853 	     unlikely(tsk->parent->signal->flags & SIGNAL_GROUP_EXIT)))
854 		state = EXIT_DEAD;
855 	tsk->exit_state = state;
856 
857 	write_unlock_irq(&tasklist_lock);
858 
859 	list_for_each_safe(_p, _n, &ptrace_dead) {
860 		list_del_init(_p);
861 		t = list_entry(_p, struct task_struct, ptrace_list);
862 		release_task(t);
863 	}
864 
865 	/* If the process is dead, release it - nobody will wait for it */
866 	if (state == EXIT_DEAD)
867 		release_task(tsk);
868 }
869 
870 #ifdef CONFIG_DEBUG_STACK_USAGE
871 static void check_stack_usage(void)
872 {
873 	static DEFINE_SPINLOCK(low_water_lock);
874 	static int lowest_to_date = THREAD_SIZE;
875 	unsigned long *n = end_of_stack(current);
876 	unsigned long free;
877 
878 	while (*n == 0)
879 		n++;
880 	free = (unsigned long)n - (unsigned long)end_of_stack(current);
881 
882 	if (free >= lowest_to_date)
883 		return;
884 
885 	spin_lock(&low_water_lock);
886 	if (free < lowest_to_date) {
887 		printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
888 				"left\n",
889 				current->comm, free);
890 		lowest_to_date = free;
891 	}
892 	spin_unlock(&low_water_lock);
893 }
894 #else
895 static inline void check_stack_usage(void) {}
896 #endif
897 
898 fastcall NORET_TYPE void do_exit(long code)
899 {
900 	struct task_struct *tsk = current;
901 	int group_dead;
902 
903 	profile_task_exit(tsk);
904 
905 	WARN_ON(atomic_read(&tsk->fs_excl));
906 
907 	if (unlikely(in_interrupt()))
908 		panic("Aiee, killing interrupt handler!");
909 	if (unlikely(!tsk->pid))
910 		panic("Attempted to kill the idle task!");
911 	if (unlikely(tsk == child_reaper(tsk))) {
912 		if (tsk->nsproxy->pid_ns != &init_pid_ns)
913 			tsk->nsproxy->pid_ns->child_reaper = init_pid_ns.child_reaper;
914 		else
915 			panic("Attempted to kill init!");
916 	}
917 
918 
919 	if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
920 		current->ptrace_message = code;
921 		ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
922 	}
923 
924 	/*
925 	 * We're taking recursive faults here in do_exit. Safest is to just
926 	 * leave this task alone and wait for reboot.
927 	 */
928 	if (unlikely(tsk->flags & PF_EXITING)) {
929 		printk(KERN_ALERT
930 			"Fixing recursive fault but reboot is needed!\n");
931 		/*
932 		 * We can do this unlocked here. The futex code uses
933 		 * this flag just to verify whether the pi state
934 		 * cleanup has been done or not. In the worst case it
935 		 * loops once more. We pretend that the cleanup was
936 		 * done as there is no way to return. Either the
937 		 * OWNER_DIED bit is set by now or we push the blocked
938 		 * task into the wait for ever nirwana as well.
939 		 */
940 		tsk->flags |= PF_EXITPIDONE;
941 		if (tsk->io_context)
942 			exit_io_context();
943 		set_current_state(TASK_UNINTERRUPTIBLE);
944 		schedule();
945 	}
946 
947 	/*
948 	 * tsk->flags are checked in the futex code to protect against
949 	 * an exiting task cleaning up the robust pi futexes.
950 	 */
951 	spin_lock_irq(&tsk->pi_lock);
952 	tsk->flags |= PF_EXITING;
953 	spin_unlock_irq(&tsk->pi_lock);
954 
955 	if (unlikely(in_atomic()))
956 		printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
957 				current->comm, current->pid,
958 				preempt_count());
959 
960 	acct_update_integrals(tsk);
961 	if (tsk->mm) {
962 		update_hiwater_rss(tsk->mm);
963 		update_hiwater_vm(tsk->mm);
964 	}
965 	group_dead = atomic_dec_and_test(&tsk->signal->live);
966 	if (group_dead) {
967 		hrtimer_cancel(&tsk->signal->real_timer);
968 		exit_itimers(tsk->signal);
969 	}
970 	acct_collect(code, group_dead);
971 	if (unlikely(tsk->robust_list))
972 		exit_robust_list(tsk);
973 #if defined(CONFIG_FUTEX) && defined(CONFIG_COMPAT)
974 	if (unlikely(tsk->compat_robust_list))
975 		compat_exit_robust_list(tsk);
976 #endif
977 	if (group_dead)
978 		tty_audit_exit();
979 	if (unlikely(tsk->audit_context))
980 		audit_free(tsk);
981 
982 	taskstats_exit(tsk, group_dead);
983 
984 	exit_mm(tsk);
985 
986 	if (group_dead)
987 		acct_process();
988 	exit_sem(tsk);
989 	__exit_files(tsk);
990 	__exit_fs(tsk);
991 	check_stack_usage();
992 	exit_thread();
993 	cpuset_exit(tsk);
994 	exit_keys(tsk);
995 
996 	if (group_dead && tsk->signal->leader)
997 		disassociate_ctty(1);
998 
999 	module_put(task_thread_info(tsk)->exec_domain->module);
1000 	if (tsk->binfmt)
1001 		module_put(tsk->binfmt->module);
1002 
1003 	tsk->exit_code = code;
1004 	proc_exit_connector(tsk);
1005 	exit_task_namespaces(tsk);
1006 	exit_notify(tsk);
1007 #ifdef CONFIG_NUMA
1008 	mpol_free(tsk->mempolicy);
1009 	tsk->mempolicy = NULL;
1010 #endif
1011 	/*
1012 	 * This must happen late, after the PID is not
1013 	 * hashed anymore:
1014 	 */
1015 	if (unlikely(!list_empty(&tsk->pi_state_list)))
1016 		exit_pi_state_list(tsk);
1017 	if (unlikely(current->pi_state_cache))
1018 		kfree(current->pi_state_cache);
1019 	/*
1020 	 * Make sure we are holding no locks:
1021 	 */
1022 	debug_check_no_locks_held(tsk);
1023 	/*
1024 	 * We can do this unlocked here. The futex code uses this flag
1025 	 * just to verify whether the pi state cleanup has been done
1026 	 * or not. In the worst case it loops once more.
1027 	 */
1028 	tsk->flags |= PF_EXITPIDONE;
1029 
1030 	if (tsk->io_context)
1031 		exit_io_context();
1032 
1033 	if (tsk->splice_pipe)
1034 		__free_pipe_info(tsk->splice_pipe);
1035 
1036 	preempt_disable();
1037 	/* causes final put_task_struct in finish_task_switch(). */
1038 	tsk->state = TASK_DEAD;
1039 
1040 	schedule();
1041 	BUG();
1042 	/* Avoid "noreturn function does return".  */
1043 	for (;;)
1044 		cpu_relax();	/* For when BUG is null */
1045 }
1046 
1047 EXPORT_SYMBOL_GPL(do_exit);
1048 
1049 NORET_TYPE void complete_and_exit(struct completion *comp, long code)
1050 {
1051 	if (comp)
1052 		complete(comp);
1053 
1054 	do_exit(code);
1055 }
1056 
1057 EXPORT_SYMBOL(complete_and_exit);
1058 
1059 asmlinkage long sys_exit(int error_code)
1060 {
1061 	do_exit((error_code&0xff)<<8);
1062 }
1063 
1064 /*
1065  * Take down every thread in the group.  This is called by fatal signals
1066  * as well as by sys_exit_group (below).
1067  */
1068 NORET_TYPE void
1069 do_group_exit(int exit_code)
1070 {
1071 	BUG_ON(exit_code & 0x80); /* core dumps don't get here */
1072 
1073 	if (current->signal->flags & SIGNAL_GROUP_EXIT)
1074 		exit_code = current->signal->group_exit_code;
1075 	else if (!thread_group_empty(current)) {
1076 		struct signal_struct *const sig = current->signal;
1077 		struct sighand_struct *const sighand = current->sighand;
1078 		spin_lock_irq(&sighand->siglock);
1079 		if (sig->flags & SIGNAL_GROUP_EXIT)
1080 			/* Another thread got here before we took the lock.  */
1081 			exit_code = sig->group_exit_code;
1082 		else {
1083 			sig->group_exit_code = exit_code;
1084 			zap_other_threads(current);
1085 		}
1086 		spin_unlock_irq(&sighand->siglock);
1087 	}
1088 
1089 	do_exit(exit_code);
1090 	/* NOTREACHED */
1091 }
1092 
1093 /*
1094  * this kills every thread in the thread group. Note that any externally
1095  * wait4()-ing process will get the correct exit code - even if this
1096  * thread is not the thread group leader.
1097  */
1098 asmlinkage void sys_exit_group(int error_code)
1099 {
1100 	do_group_exit((error_code & 0xff) << 8);
1101 }
1102 
1103 static int eligible_child(pid_t pid, int options, struct task_struct *p)
1104 {
1105 	int err;
1106 
1107 	if (pid > 0) {
1108 		if (p->pid != pid)
1109 			return 0;
1110 	} else if (!pid) {
1111 		if (process_group(p) != process_group(current))
1112 			return 0;
1113 	} else if (pid != -1) {
1114 		if (process_group(p) != -pid)
1115 			return 0;
1116 	}
1117 
1118 	/*
1119 	 * Do not consider detached threads that are
1120 	 * not ptraced:
1121 	 */
1122 	if (p->exit_signal == -1 && !p->ptrace)
1123 		return 0;
1124 
1125 	/* Wait for all children (clone and not) if __WALL is set;
1126 	 * otherwise, wait for clone children *only* if __WCLONE is
1127 	 * set; otherwise, wait for non-clone children *only*.  (Note:
1128 	 * A "clone" child here is one that reports to its parent
1129 	 * using a signal other than SIGCHLD.) */
1130 	if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
1131 	    && !(options & __WALL))
1132 		return 0;
1133 	/*
1134 	 * Do not consider thread group leaders that are
1135 	 * in a non-empty thread group:
1136 	 */
1137 	if (delay_group_leader(p))
1138 		return 2;
1139 
1140 	err = security_task_wait(p);
1141 	if (err)
1142 		return err;
1143 
1144 	return 1;
1145 }
1146 
1147 static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
1148 			       int why, int status,
1149 			       struct siginfo __user *infop,
1150 			       struct rusage __user *rusagep)
1151 {
1152 	int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
1153 
1154 	put_task_struct(p);
1155 	if (!retval)
1156 		retval = put_user(SIGCHLD, &infop->si_signo);
1157 	if (!retval)
1158 		retval = put_user(0, &infop->si_errno);
1159 	if (!retval)
1160 		retval = put_user((short)why, &infop->si_code);
1161 	if (!retval)
1162 		retval = put_user(pid, &infop->si_pid);
1163 	if (!retval)
1164 		retval = put_user(uid, &infop->si_uid);
1165 	if (!retval)
1166 		retval = put_user(status, &infop->si_status);
1167 	if (!retval)
1168 		retval = pid;
1169 	return retval;
1170 }
1171 
1172 /*
1173  * Handle sys_wait4 work for one task in state EXIT_ZOMBIE.  We hold
1174  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1175  * the lock and this task is uninteresting.  If we return nonzero, we have
1176  * released the lock and the system call should return.
1177  */
1178 static int wait_task_zombie(struct task_struct *p, int noreap,
1179 			    struct siginfo __user *infop,
1180 			    int __user *stat_addr, struct rusage __user *ru)
1181 {
1182 	unsigned long state;
1183 	int retval;
1184 	int status;
1185 
1186 	if (unlikely(noreap)) {
1187 		pid_t pid = p->pid;
1188 		uid_t uid = p->uid;
1189 		int exit_code = p->exit_code;
1190 		int why, status;
1191 
1192 		if (unlikely(p->exit_state != EXIT_ZOMBIE))
1193 			return 0;
1194 		if (unlikely(p->exit_signal == -1 && p->ptrace == 0))
1195 			return 0;
1196 		get_task_struct(p);
1197 		read_unlock(&tasklist_lock);
1198 		if ((exit_code & 0x7f) == 0) {
1199 			why = CLD_EXITED;
1200 			status = exit_code >> 8;
1201 		} else {
1202 			why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
1203 			status = exit_code & 0x7f;
1204 		}
1205 		return wait_noreap_copyout(p, pid, uid, why,
1206 					   status, infop, ru);
1207 	}
1208 
1209 	/*
1210 	 * Try to move the task's state to DEAD
1211 	 * only one thread is allowed to do this:
1212 	 */
1213 	state = xchg(&p->exit_state, EXIT_DEAD);
1214 	if (state != EXIT_ZOMBIE) {
1215 		BUG_ON(state != EXIT_DEAD);
1216 		return 0;
1217 	}
1218 	if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) {
1219 		/*
1220 		 * This can only happen in a race with a ptraced thread
1221 		 * dying on another processor.
1222 		 */
1223 		return 0;
1224 	}
1225 
1226 	if (likely(p->real_parent == p->parent) && likely(p->signal)) {
1227 		struct signal_struct *psig;
1228 		struct signal_struct *sig;
1229 
1230 		/*
1231 		 * The resource counters for the group leader are in its
1232 		 * own task_struct.  Those for dead threads in the group
1233 		 * are in its signal_struct, as are those for the child
1234 		 * processes it has previously reaped.  All these
1235 		 * accumulate in the parent's signal_struct c* fields.
1236 		 *
1237 		 * We don't bother to take a lock here to protect these
1238 		 * p->signal fields, because they are only touched by
1239 		 * __exit_signal, which runs with tasklist_lock
1240 		 * write-locked anyway, and so is excluded here.  We do
1241 		 * need to protect the access to p->parent->signal fields,
1242 		 * as other threads in the parent group can be right
1243 		 * here reaping other children at the same time.
1244 		 */
1245 		spin_lock_irq(&p->parent->sighand->siglock);
1246 		psig = p->parent->signal;
1247 		sig = p->signal;
1248 		psig->cutime =
1249 			cputime_add(psig->cutime,
1250 			cputime_add(p->utime,
1251 			cputime_add(sig->utime,
1252 				    sig->cutime)));
1253 		psig->cstime =
1254 			cputime_add(psig->cstime,
1255 			cputime_add(p->stime,
1256 			cputime_add(sig->stime,
1257 				    sig->cstime)));
1258 		psig->cmin_flt +=
1259 			p->min_flt + sig->min_flt + sig->cmin_flt;
1260 		psig->cmaj_flt +=
1261 			p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1262 		psig->cnvcsw +=
1263 			p->nvcsw + sig->nvcsw + sig->cnvcsw;
1264 		psig->cnivcsw +=
1265 			p->nivcsw + sig->nivcsw + sig->cnivcsw;
1266 		psig->cinblock +=
1267 			task_io_get_inblock(p) +
1268 			sig->inblock + sig->cinblock;
1269 		psig->coublock +=
1270 			task_io_get_oublock(p) +
1271 			sig->oublock + sig->coublock;
1272 		spin_unlock_irq(&p->parent->sighand->siglock);
1273 	}
1274 
1275 	/*
1276 	 * Now we are sure this task is interesting, and no other
1277 	 * thread can reap it because we set its state to EXIT_DEAD.
1278 	 */
1279 	read_unlock(&tasklist_lock);
1280 
1281 	retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1282 	status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1283 		? p->signal->group_exit_code : p->exit_code;
1284 	if (!retval && stat_addr)
1285 		retval = put_user(status, stat_addr);
1286 	if (!retval && infop)
1287 		retval = put_user(SIGCHLD, &infop->si_signo);
1288 	if (!retval && infop)
1289 		retval = put_user(0, &infop->si_errno);
1290 	if (!retval && infop) {
1291 		int why;
1292 
1293 		if ((status & 0x7f) == 0) {
1294 			why = CLD_EXITED;
1295 			status >>= 8;
1296 		} else {
1297 			why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1298 			status &= 0x7f;
1299 		}
1300 		retval = put_user((short)why, &infop->si_code);
1301 		if (!retval)
1302 			retval = put_user(status, &infop->si_status);
1303 	}
1304 	if (!retval && infop)
1305 		retval = put_user(p->pid, &infop->si_pid);
1306 	if (!retval && infop)
1307 		retval = put_user(p->uid, &infop->si_uid);
1308 	if (retval) {
1309 		// TODO: is this safe?
1310 		p->exit_state = EXIT_ZOMBIE;
1311 		return retval;
1312 	}
1313 	retval = p->pid;
1314 	if (p->real_parent != p->parent) {
1315 		write_lock_irq(&tasklist_lock);
1316 		/* Double-check with lock held.  */
1317 		if (p->real_parent != p->parent) {
1318 			__ptrace_unlink(p);
1319 			// TODO: is this safe?
1320 			p->exit_state = EXIT_ZOMBIE;
1321 			/*
1322 			 * If this is not a detached task, notify the parent.
1323 			 * If it's still not detached after that, don't release
1324 			 * it now.
1325 			 */
1326 			if (p->exit_signal != -1) {
1327 				do_notify_parent(p, p->exit_signal);
1328 				if (p->exit_signal != -1)
1329 					p = NULL;
1330 			}
1331 		}
1332 		write_unlock_irq(&tasklist_lock);
1333 	}
1334 	if (p != NULL)
1335 		release_task(p);
1336 	BUG_ON(!retval);
1337 	return retval;
1338 }
1339 
1340 /*
1341  * Handle sys_wait4 work for one task in state TASK_STOPPED.  We hold
1342  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1343  * the lock and this task is uninteresting.  If we return nonzero, we have
1344  * released the lock and the system call should return.
1345  */
1346 static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
1347 			     int noreap, struct siginfo __user *infop,
1348 			     int __user *stat_addr, struct rusage __user *ru)
1349 {
1350 	int retval, exit_code;
1351 
1352 	if (!p->exit_code)
1353 		return 0;
1354 	if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
1355 	    p->signal && p->signal->group_stop_count > 0)
1356 		/*
1357 		 * A group stop is in progress and this is the group leader.
1358 		 * We won't report until all threads have stopped.
1359 		 */
1360 		return 0;
1361 
1362 	/*
1363 	 * Now we are pretty sure this task is interesting.
1364 	 * Make sure it doesn't get reaped out from under us while we
1365 	 * give up the lock and then examine it below.  We don't want to
1366 	 * keep holding onto the tasklist_lock while we call getrusage and
1367 	 * possibly take page faults for user memory.
1368 	 */
1369 	get_task_struct(p);
1370 	read_unlock(&tasklist_lock);
1371 
1372 	if (unlikely(noreap)) {
1373 		pid_t pid = p->pid;
1374 		uid_t uid = p->uid;
1375 		int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
1376 
1377 		exit_code = p->exit_code;
1378 		if (unlikely(!exit_code) ||
1379 		    unlikely(p->state & TASK_TRACED))
1380 			goto bail_ref;
1381 		return wait_noreap_copyout(p, pid, uid,
1382 					   why, (exit_code << 8) | 0x7f,
1383 					   infop, ru);
1384 	}
1385 
1386 	write_lock_irq(&tasklist_lock);
1387 
1388 	/*
1389 	 * This uses xchg to be atomic with the thread resuming and setting
1390 	 * it.  It must also be done with the write lock held to prevent a
1391 	 * race with the EXIT_ZOMBIE case.
1392 	 */
1393 	exit_code = xchg(&p->exit_code, 0);
1394 	if (unlikely(p->exit_state)) {
1395 		/*
1396 		 * The task resumed and then died.  Let the next iteration
1397 		 * catch it in EXIT_ZOMBIE.  Note that exit_code might
1398 		 * already be zero here if it resumed and did _exit(0).
1399 		 * The task itself is dead and won't touch exit_code again;
1400 		 * other processors in this function are locked out.
1401 		 */
1402 		p->exit_code = exit_code;
1403 		exit_code = 0;
1404 	}
1405 	if (unlikely(exit_code == 0)) {
1406 		/*
1407 		 * Another thread in this function got to it first, or it
1408 		 * resumed, or it resumed and then died.
1409 		 */
1410 		write_unlock_irq(&tasklist_lock);
1411 bail_ref:
1412 		put_task_struct(p);
1413 		/*
1414 		 * We are returning to the wait loop without having successfully
1415 		 * removed the process and having released the lock. We cannot
1416 		 * continue, since the "p" task pointer is potentially stale.
1417 		 *
1418 		 * Return -EAGAIN, and do_wait() will restart the loop from the
1419 		 * beginning. Do _not_ re-acquire the lock.
1420 		 */
1421 		return -EAGAIN;
1422 	}
1423 
1424 	/* move to end of parent's list to avoid starvation */
1425 	remove_parent(p);
1426 	add_parent(p);
1427 
1428 	write_unlock_irq(&tasklist_lock);
1429 
1430 	retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1431 	if (!retval && stat_addr)
1432 		retval = put_user((exit_code << 8) | 0x7f, stat_addr);
1433 	if (!retval && infop)
1434 		retval = put_user(SIGCHLD, &infop->si_signo);
1435 	if (!retval && infop)
1436 		retval = put_user(0, &infop->si_errno);
1437 	if (!retval && infop)
1438 		retval = put_user((short)((p->ptrace & PT_PTRACED)
1439 					  ? CLD_TRAPPED : CLD_STOPPED),
1440 				  &infop->si_code);
1441 	if (!retval && infop)
1442 		retval = put_user(exit_code, &infop->si_status);
1443 	if (!retval && infop)
1444 		retval = put_user(p->pid, &infop->si_pid);
1445 	if (!retval && infop)
1446 		retval = put_user(p->uid, &infop->si_uid);
1447 	if (!retval)
1448 		retval = p->pid;
1449 	put_task_struct(p);
1450 
1451 	BUG_ON(!retval);
1452 	return retval;
1453 }
1454 
1455 /*
1456  * Handle do_wait work for one task in a live, non-stopped state.
1457  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1458  * the lock and this task is uninteresting.  If we return nonzero, we have
1459  * released the lock and the system call should return.
1460  */
1461 static int wait_task_continued(struct task_struct *p, int noreap,
1462 			       struct siginfo __user *infop,
1463 			       int __user *stat_addr, struct rusage __user *ru)
1464 {
1465 	int retval;
1466 	pid_t pid;
1467 	uid_t uid;
1468 
1469 	if (unlikely(!p->signal))
1470 		return 0;
1471 
1472 	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1473 		return 0;
1474 
1475 	spin_lock_irq(&p->sighand->siglock);
1476 	/* Re-check with the lock held.  */
1477 	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1478 		spin_unlock_irq(&p->sighand->siglock);
1479 		return 0;
1480 	}
1481 	if (!noreap)
1482 		p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1483 	spin_unlock_irq(&p->sighand->siglock);
1484 
1485 	pid = p->pid;
1486 	uid = p->uid;
1487 	get_task_struct(p);
1488 	read_unlock(&tasklist_lock);
1489 
1490 	if (!infop) {
1491 		retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1492 		put_task_struct(p);
1493 		if (!retval && stat_addr)
1494 			retval = put_user(0xffff, stat_addr);
1495 		if (!retval)
1496 			retval = p->pid;
1497 	} else {
1498 		retval = wait_noreap_copyout(p, pid, uid,
1499 					     CLD_CONTINUED, SIGCONT,
1500 					     infop, ru);
1501 		BUG_ON(retval == 0);
1502 	}
1503 
1504 	return retval;
1505 }
1506 
1507 
1508 static inline int my_ptrace_child(struct task_struct *p)
1509 {
1510 	if (!(p->ptrace & PT_PTRACED))
1511 		return 0;
1512 	if (!(p->ptrace & PT_ATTACHED))
1513 		return 1;
1514 	/*
1515 	 * This child was PTRACE_ATTACH'd.  We should be seeing it only if
1516 	 * we are the attacher.  If we are the real parent, this is a race
1517 	 * inside ptrace_attach.  It is waiting for the tasklist_lock,
1518 	 * which we have to switch the parent links, but has already set
1519 	 * the flags in p->ptrace.
1520 	 */
1521 	return (p->parent != p->real_parent);
1522 }
1523 
1524 static long do_wait(pid_t pid, int options, struct siginfo __user *infop,
1525 		    int __user *stat_addr, struct rusage __user *ru)
1526 {
1527 	DECLARE_WAITQUEUE(wait, current);
1528 	struct task_struct *tsk;
1529 	int flag, retval;
1530 	int allowed, denied;
1531 
1532 	add_wait_queue(&current->signal->wait_chldexit,&wait);
1533 repeat:
1534 	/*
1535 	 * We will set this flag if we see any child that might later
1536 	 * match our criteria, even if we are not able to reap it yet.
1537 	 */
1538 	flag = 0;
1539 	allowed = denied = 0;
1540 	current->state = TASK_INTERRUPTIBLE;
1541 	read_lock(&tasklist_lock);
1542 	tsk = current;
1543 	do {
1544 		struct task_struct *p;
1545 		struct list_head *_p;
1546 		int ret;
1547 
1548 		list_for_each(_p,&tsk->children) {
1549 			p = list_entry(_p, struct task_struct, sibling);
1550 
1551 			ret = eligible_child(pid, options, p);
1552 			if (!ret)
1553 				continue;
1554 
1555 			if (unlikely(ret < 0)) {
1556 				denied = ret;
1557 				continue;
1558 			}
1559 			allowed = 1;
1560 
1561 			switch (p->state) {
1562 			case TASK_TRACED:
1563 				/*
1564 				 * When we hit the race with PTRACE_ATTACH,
1565 				 * we will not report this child.  But the
1566 				 * race means it has not yet been moved to
1567 				 * our ptrace_children list, so we need to
1568 				 * set the flag here to avoid a spurious ECHILD
1569 				 * when the race happens with the only child.
1570 				 */
1571 				flag = 1;
1572 				if (!my_ptrace_child(p))
1573 					continue;
1574 				/*FALLTHROUGH*/
1575 			case TASK_STOPPED:
1576 				/*
1577 				 * It's stopped now, so it might later
1578 				 * continue, exit, or stop again.
1579 				 */
1580 				flag = 1;
1581 				if (!(options & WUNTRACED) &&
1582 				    !my_ptrace_child(p))
1583 					continue;
1584 				retval = wait_task_stopped(p, ret == 2,
1585 							   (options & WNOWAIT),
1586 							   infop,
1587 							   stat_addr, ru);
1588 				if (retval == -EAGAIN)
1589 					goto repeat;
1590 				if (retval != 0) /* He released the lock.  */
1591 					goto end;
1592 				break;
1593 			default:
1594 			// case EXIT_DEAD:
1595 				if (p->exit_state == EXIT_DEAD)
1596 					continue;
1597 			// case EXIT_ZOMBIE:
1598 				if (p->exit_state == EXIT_ZOMBIE) {
1599 					/*
1600 					 * Eligible but we cannot release
1601 					 * it yet:
1602 					 */
1603 					if (ret == 2)
1604 						goto check_continued;
1605 					if (!likely(options & WEXITED))
1606 						continue;
1607 					retval = wait_task_zombie(
1608 						p, (options & WNOWAIT),
1609 						infop, stat_addr, ru);
1610 					/* He released the lock.  */
1611 					if (retval != 0)
1612 						goto end;
1613 					break;
1614 				}
1615 check_continued:
1616 				/*
1617 				 * It's running now, so it might later
1618 				 * exit, stop, or stop and then continue.
1619 				 */
1620 				flag = 1;
1621 				if (!unlikely(options & WCONTINUED))
1622 					continue;
1623 				retval = wait_task_continued(
1624 					p, (options & WNOWAIT),
1625 					infop, stat_addr, ru);
1626 				if (retval != 0) /* He released the lock.  */
1627 					goto end;
1628 				break;
1629 			}
1630 		}
1631 		if (!flag) {
1632 			list_for_each(_p, &tsk->ptrace_children) {
1633 				p = list_entry(_p, struct task_struct,
1634 						ptrace_list);
1635 				if (!eligible_child(pid, options, p))
1636 					continue;
1637 				flag = 1;
1638 				break;
1639 			}
1640 		}
1641 		if (options & __WNOTHREAD)
1642 			break;
1643 		tsk = next_thread(tsk);
1644 		BUG_ON(tsk->signal != current->signal);
1645 	} while (tsk != current);
1646 
1647 	read_unlock(&tasklist_lock);
1648 	if (flag) {
1649 		retval = 0;
1650 		if (options & WNOHANG)
1651 			goto end;
1652 		retval = -ERESTARTSYS;
1653 		if (signal_pending(current))
1654 			goto end;
1655 		schedule();
1656 		goto repeat;
1657 	}
1658 	retval = -ECHILD;
1659 	if (unlikely(denied) && !allowed)
1660 		retval = denied;
1661 end:
1662 	current->state = TASK_RUNNING;
1663 	remove_wait_queue(&current->signal->wait_chldexit,&wait);
1664 	if (infop) {
1665 		if (retval > 0)
1666 		retval = 0;
1667 		else {
1668 			/*
1669 			 * For a WNOHANG return, clear out all the fields
1670 			 * we would set so the user can easily tell the
1671 			 * difference.
1672 			 */
1673 			if (!retval)
1674 				retval = put_user(0, &infop->si_signo);
1675 			if (!retval)
1676 				retval = put_user(0, &infop->si_errno);
1677 			if (!retval)
1678 				retval = put_user(0, &infop->si_code);
1679 			if (!retval)
1680 				retval = put_user(0, &infop->si_pid);
1681 			if (!retval)
1682 				retval = put_user(0, &infop->si_uid);
1683 			if (!retval)
1684 				retval = put_user(0, &infop->si_status);
1685 		}
1686 	}
1687 	return retval;
1688 }
1689 
1690 asmlinkage long sys_waitid(int which, pid_t pid,
1691 			   struct siginfo __user *infop, int options,
1692 			   struct rusage __user *ru)
1693 {
1694 	long ret;
1695 
1696 	if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
1697 		return -EINVAL;
1698 	if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1699 		return -EINVAL;
1700 
1701 	switch (which) {
1702 	case P_ALL:
1703 		pid = -1;
1704 		break;
1705 	case P_PID:
1706 		if (pid <= 0)
1707 			return -EINVAL;
1708 		break;
1709 	case P_PGID:
1710 		if (pid <= 0)
1711 			return -EINVAL;
1712 		pid = -pid;
1713 		break;
1714 	default:
1715 		return -EINVAL;
1716 	}
1717 
1718 	ret = do_wait(pid, options, infop, NULL, ru);
1719 
1720 	/* avoid REGPARM breakage on x86: */
1721 	prevent_tail_call(ret);
1722 	return ret;
1723 }
1724 
1725 asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
1726 			  int options, struct rusage __user *ru)
1727 {
1728 	long ret;
1729 
1730 	if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1731 			__WNOTHREAD|__WCLONE|__WALL))
1732 		return -EINVAL;
1733 	ret = do_wait(pid, options | WEXITED, NULL, stat_addr, ru);
1734 
1735 	/* avoid REGPARM breakage on x86: */
1736 	prevent_tail_call(ret);
1737 	return ret;
1738 }
1739 
1740 #ifdef __ARCH_WANT_SYS_WAITPID
1741 
1742 /*
1743  * sys_waitpid() remains for compatibility. waitpid() should be
1744  * implemented by calling sys_wait4() from libc.a.
1745  */
1746 asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options)
1747 {
1748 	return sys_wait4(pid, stat_addr, options, NULL);
1749 }
1750 
1751 #endif
1752