xref: /openbmc/linux/kernel/exit.c (revision 42b2dd0a02c512cf59c96f5c227bf54bfe5bbf08)
1 /*
2  *  linux/kernel/exit.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/capability.h>
12 #include <linux/completion.h>
13 #include <linux/personality.h>
14 #include <linux/tty.h>
15 #include <linux/mnt_namespace.h>
16 #include <linux/key.h>
17 #include <linux/security.h>
18 #include <linux/cpu.h>
19 #include <linux/acct.h>
20 #include <linux/tsacct_kern.h>
21 #include <linux/file.h>
22 #include <linux/binfmts.h>
23 #include <linux/nsproxy.h>
24 #include <linux/pid_namespace.h>
25 #include <linux/ptrace.h>
26 #include <linux/profile.h>
27 #include <linux/mount.h>
28 #include <linux/proc_fs.h>
29 #include <linux/kthread.h>
30 #include <linux/mempolicy.h>
31 #include <linux/taskstats_kern.h>
32 #include <linux/delayacct.h>
33 #include <linux/freezer.h>
34 #include <linux/cpuset.h>
35 #include <linux/syscalls.h>
36 #include <linux/signal.h>
37 #include <linux/posix-timers.h>
38 #include <linux/cn_proc.h>
39 #include <linux/mutex.h>
40 #include <linux/futex.h>
41 #include <linux/compat.h>
42 #include <linux/pipe_fs_i.h>
43 #include <linux/audit.h> /* for audit_free() */
44 #include <linux/resource.h>
45 #include <linux/blkdev.h>
46 #include <linux/task_io_accounting_ops.h>
47 
48 #include <asm/uaccess.h>
49 #include <asm/unistd.h>
50 #include <asm/pgtable.h>
51 #include <asm/mmu_context.h>
52 
53 extern void sem_exit (void);
54 
55 static void exit_mm(struct task_struct * tsk);
56 
57 static void __unhash_process(struct task_struct *p)
58 {
59 	nr_threads--;
60 	detach_pid(p, PIDTYPE_PID);
61 	if (thread_group_leader(p)) {
62 		detach_pid(p, PIDTYPE_PGID);
63 		detach_pid(p, PIDTYPE_SID);
64 
65 		list_del_rcu(&p->tasks);
66 		__get_cpu_var(process_counts)--;
67 	}
68 	list_del_rcu(&p->thread_group);
69 	remove_parent(p);
70 }
71 
72 /*
73  * This function expects the tasklist_lock write-locked.
74  */
75 static void __exit_signal(struct task_struct *tsk)
76 {
77 	struct signal_struct *sig = tsk->signal;
78 	struct sighand_struct *sighand;
79 
80 	BUG_ON(!sig);
81 	BUG_ON(!atomic_read(&sig->count));
82 
83 	rcu_read_lock();
84 	sighand = rcu_dereference(tsk->sighand);
85 	spin_lock(&sighand->siglock);
86 
87 	posix_cpu_timers_exit(tsk);
88 	if (atomic_dec_and_test(&sig->count))
89 		posix_cpu_timers_exit_group(tsk);
90 	else {
91 		/*
92 		 * If there is any task waiting for the group exit
93 		 * then notify it:
94 		 */
95 		if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count)
96 			wake_up_process(sig->group_exit_task);
97 
98 		if (tsk == sig->curr_target)
99 			sig->curr_target = next_thread(tsk);
100 		/*
101 		 * Accumulate here the counters for all threads but the
102 		 * group leader as they die, so they can be added into
103 		 * the process-wide totals when those are taken.
104 		 * The group leader stays around as a zombie as long
105 		 * as there are other threads.  When it gets reaped,
106 		 * the exit.c code will add its counts into these totals.
107 		 * We won't ever get here for the group leader, since it
108 		 * will have been the last reference on the signal_struct.
109 		 */
110 		sig->utime = cputime_add(sig->utime, tsk->utime);
111 		sig->stime = cputime_add(sig->stime, tsk->stime);
112 		sig->gtime = cputime_add(sig->gtime, tsk->gtime);
113 		sig->min_flt += tsk->min_flt;
114 		sig->maj_flt += tsk->maj_flt;
115 		sig->nvcsw += tsk->nvcsw;
116 		sig->nivcsw += tsk->nivcsw;
117 		sig->inblock += task_io_get_inblock(tsk);
118 		sig->oublock += task_io_get_oublock(tsk);
119 		sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
120 		sig = NULL; /* Marker for below. */
121 	}
122 
123 	__unhash_process(tsk);
124 
125 	tsk->signal = NULL;
126 	tsk->sighand = NULL;
127 	spin_unlock(&sighand->siglock);
128 	rcu_read_unlock();
129 
130 	__cleanup_sighand(sighand);
131 	clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
132 	flush_sigqueue(&tsk->pending);
133 	if (sig) {
134 		flush_sigqueue(&sig->shared_pending);
135 		taskstats_tgid_free(sig);
136 		__cleanup_signal(sig);
137 	}
138 }
139 
140 static void delayed_put_task_struct(struct rcu_head *rhp)
141 {
142 	put_task_struct(container_of(rhp, struct task_struct, rcu));
143 }
144 
145 void release_task(struct task_struct * p)
146 {
147 	struct task_struct *leader;
148 	int zap_leader;
149 repeat:
150 	atomic_dec(&p->user->processes);
151 	write_lock_irq(&tasklist_lock);
152 	ptrace_unlink(p);
153 	BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
154 	__exit_signal(p);
155 
156 	/*
157 	 * If we are the last non-leader member of the thread
158 	 * group, and the leader is zombie, then notify the
159 	 * group leader's parent process. (if it wants notification.)
160 	 */
161 	zap_leader = 0;
162 	leader = p->group_leader;
163 	if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
164 		BUG_ON(leader->exit_signal == -1);
165 		do_notify_parent(leader, leader->exit_signal);
166 		/*
167 		 * If we were the last child thread and the leader has
168 		 * exited already, and the leader's parent ignores SIGCHLD,
169 		 * then we are the one who should release the leader.
170 		 *
171 		 * do_notify_parent() will have marked it self-reaping in
172 		 * that case.
173 		 */
174 		zap_leader = (leader->exit_signal == -1);
175 	}
176 
177 	write_unlock_irq(&tasklist_lock);
178 	proc_flush_task(p);
179 	release_thread(p);
180 	call_rcu(&p->rcu, delayed_put_task_struct);
181 
182 	p = leader;
183 	if (unlikely(zap_leader))
184 		goto repeat;
185 }
186 
187 /*
188  * This checks not only the pgrp, but falls back on the pid if no
189  * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
190  * without this...
191  *
192  * The caller must hold rcu lock or the tasklist lock.
193  */
194 struct pid *session_of_pgrp(struct pid *pgrp)
195 {
196 	struct task_struct *p;
197 	struct pid *sid = NULL;
198 
199 	p = pid_task(pgrp, PIDTYPE_PGID);
200 	if (p == NULL)
201 		p = pid_task(pgrp, PIDTYPE_PID);
202 	if (p != NULL)
203 		sid = task_session(p);
204 
205 	return sid;
206 }
207 
208 /*
209  * Determine if a process group is "orphaned", according to the POSIX
210  * definition in 2.2.2.52.  Orphaned process groups are not to be affected
211  * by terminal-generated stop signals.  Newly orphaned process groups are
212  * to receive a SIGHUP and a SIGCONT.
213  *
214  * "I ask you, have you ever known what it is to be an orphan?"
215  */
216 static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
217 {
218 	struct task_struct *p;
219 	int ret = 1;
220 
221 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
222 		if (p == ignored_task
223 				|| p->exit_state
224 				|| is_init(p->real_parent))
225 			continue;
226 		if (task_pgrp(p->real_parent) != pgrp &&
227 		    task_session(p->real_parent) == task_session(p)) {
228 			ret = 0;
229 			break;
230 		}
231 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
232 	return ret;	/* (sighing) "Often!" */
233 }
234 
235 int is_current_pgrp_orphaned(void)
236 {
237 	int retval;
238 
239 	read_lock(&tasklist_lock);
240 	retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
241 	read_unlock(&tasklist_lock);
242 
243 	return retval;
244 }
245 
246 static int has_stopped_jobs(struct pid *pgrp)
247 {
248 	int retval = 0;
249 	struct task_struct *p;
250 
251 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
252 		if (p->state != TASK_STOPPED)
253 			continue;
254 		retval = 1;
255 		break;
256 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
257 	return retval;
258 }
259 
260 /**
261  * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd
262  *
263  * If a kernel thread is launched as a result of a system call, or if
264  * it ever exits, it should generally reparent itself to kthreadd so it
265  * isn't in the way of other processes and is correctly cleaned up on exit.
266  *
267  * The various task state such as scheduling policy and priority may have
268  * been inherited from a user process, so we reset them to sane values here.
269  *
270  * NOTE that reparent_to_kthreadd() gives the caller full capabilities.
271  */
272 static void reparent_to_kthreadd(void)
273 {
274 	write_lock_irq(&tasklist_lock);
275 
276 	ptrace_unlink(current);
277 	/* Reparent to init */
278 	remove_parent(current);
279 	current->real_parent = current->parent = kthreadd_task;
280 	add_parent(current);
281 
282 	/* Set the exit signal to SIGCHLD so we signal init on exit */
283 	current->exit_signal = SIGCHLD;
284 
285 	if (task_nice(current) < 0)
286 		set_user_nice(current, 0);
287 	/* cpus_allowed? */
288 	/* rt_priority? */
289 	/* signals? */
290 	security_task_reparent_to_init(current);
291 	memcpy(current->signal->rlim, init_task.signal->rlim,
292 	       sizeof(current->signal->rlim));
293 	atomic_inc(&(INIT_USER->__count));
294 	write_unlock_irq(&tasklist_lock);
295 	switch_uid(INIT_USER);
296 }
297 
298 void __set_special_pids(pid_t session, pid_t pgrp)
299 {
300 	struct task_struct *curr = current->group_leader;
301 
302 	if (process_session(curr) != session) {
303 		detach_pid(curr, PIDTYPE_SID);
304 		set_signal_session(curr->signal, session);
305 		attach_pid(curr, PIDTYPE_SID, find_pid(session));
306 	}
307 	if (process_group(curr) != pgrp) {
308 		detach_pid(curr, PIDTYPE_PGID);
309 		curr->signal->pgrp = pgrp;
310 		attach_pid(curr, PIDTYPE_PGID, find_pid(pgrp));
311 	}
312 }
313 
314 static void set_special_pids(pid_t session, pid_t pgrp)
315 {
316 	write_lock_irq(&tasklist_lock);
317 	__set_special_pids(session, pgrp);
318 	write_unlock_irq(&tasklist_lock);
319 }
320 
321 /*
322  * Let kernel threads use this to say that they
323  * allow a certain signal (since daemonize() will
324  * have disabled all of them by default).
325  */
326 int allow_signal(int sig)
327 {
328 	if (!valid_signal(sig) || sig < 1)
329 		return -EINVAL;
330 
331 	spin_lock_irq(&current->sighand->siglock);
332 	sigdelset(&current->blocked, sig);
333 	if (!current->mm) {
334 		/* Kernel threads handle their own signals.
335 		   Let the signal code know it'll be handled, so
336 		   that they don't get converted to SIGKILL or
337 		   just silently dropped */
338 		current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
339 	}
340 	recalc_sigpending();
341 	spin_unlock_irq(&current->sighand->siglock);
342 	return 0;
343 }
344 
345 EXPORT_SYMBOL(allow_signal);
346 
347 int disallow_signal(int sig)
348 {
349 	if (!valid_signal(sig) || sig < 1)
350 		return -EINVAL;
351 
352 	spin_lock_irq(&current->sighand->siglock);
353 	current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;
354 	recalc_sigpending();
355 	spin_unlock_irq(&current->sighand->siglock);
356 	return 0;
357 }
358 
359 EXPORT_SYMBOL(disallow_signal);
360 
361 /*
362  *	Put all the gunge required to become a kernel thread without
363  *	attached user resources in one place where it belongs.
364  */
365 
366 void daemonize(const char *name, ...)
367 {
368 	va_list args;
369 	struct fs_struct *fs;
370 	sigset_t blocked;
371 
372 	va_start(args, name);
373 	vsnprintf(current->comm, sizeof(current->comm), name, args);
374 	va_end(args);
375 
376 	/*
377 	 * If we were started as result of loading a module, close all of the
378 	 * user space pages.  We don't need them, and if we didn't close them
379 	 * they would be locked into memory.
380 	 */
381 	exit_mm(current);
382 	/*
383 	 * We don't want to have TIF_FREEZE set if the system-wide hibernation
384 	 * or suspend transition begins right now.
385 	 */
386 	current->flags |= PF_NOFREEZE;
387 
388 	set_special_pids(1, 1);
389 	proc_clear_tty(current);
390 
391 	/* Block and flush all signals */
392 	sigfillset(&blocked);
393 	sigprocmask(SIG_BLOCK, &blocked, NULL);
394 	flush_signals(current);
395 
396 	/* Become as one with the init task */
397 
398 	exit_fs(current);	/* current->fs->count--; */
399 	fs = init_task.fs;
400 	current->fs = fs;
401 	atomic_inc(&fs->count);
402 
403 	exit_task_namespaces(current);
404 	current->nsproxy = init_task.nsproxy;
405 	get_task_namespaces(current);
406 
407  	exit_files(current);
408 	current->files = init_task.files;
409 	atomic_inc(&current->files->count);
410 
411 	reparent_to_kthreadd();
412 }
413 
414 EXPORT_SYMBOL(daemonize);
415 
416 static void close_files(struct files_struct * files)
417 {
418 	int i, j;
419 	struct fdtable *fdt;
420 
421 	j = 0;
422 
423 	/*
424 	 * It is safe to dereference the fd table without RCU or
425 	 * ->file_lock because this is the last reference to the
426 	 * files structure.
427 	 */
428 	fdt = files_fdtable(files);
429 	for (;;) {
430 		unsigned long set;
431 		i = j * __NFDBITS;
432 		if (i >= fdt->max_fds)
433 			break;
434 		set = fdt->open_fds->fds_bits[j++];
435 		while (set) {
436 			if (set & 1) {
437 				struct file * file = xchg(&fdt->fd[i], NULL);
438 				if (file) {
439 					filp_close(file, files);
440 					cond_resched();
441 				}
442 			}
443 			i++;
444 			set >>= 1;
445 		}
446 	}
447 }
448 
449 struct files_struct *get_files_struct(struct task_struct *task)
450 {
451 	struct files_struct *files;
452 
453 	task_lock(task);
454 	files = task->files;
455 	if (files)
456 		atomic_inc(&files->count);
457 	task_unlock(task);
458 
459 	return files;
460 }
461 
462 void fastcall put_files_struct(struct files_struct *files)
463 {
464 	struct fdtable *fdt;
465 
466 	if (atomic_dec_and_test(&files->count)) {
467 		close_files(files);
468 		/*
469 		 * Free the fd and fdset arrays if we expanded them.
470 		 * If the fdtable was embedded, pass files for freeing
471 		 * at the end of the RCU grace period. Otherwise,
472 		 * you can free files immediately.
473 		 */
474 		fdt = files_fdtable(files);
475 		if (fdt != &files->fdtab)
476 			kmem_cache_free(files_cachep, files);
477 		free_fdtable(fdt);
478 	}
479 }
480 
481 EXPORT_SYMBOL(put_files_struct);
482 
483 void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
484 {
485 	struct files_struct *old;
486 
487 	old = tsk->files;
488 	task_lock(tsk);
489 	tsk->files = files;
490 	task_unlock(tsk);
491 	put_files_struct(old);
492 }
493 EXPORT_SYMBOL(reset_files_struct);
494 
495 static inline void __exit_files(struct task_struct *tsk)
496 {
497 	struct files_struct * files = tsk->files;
498 
499 	if (files) {
500 		task_lock(tsk);
501 		tsk->files = NULL;
502 		task_unlock(tsk);
503 		put_files_struct(files);
504 	}
505 }
506 
507 void exit_files(struct task_struct *tsk)
508 {
509 	__exit_files(tsk);
510 }
511 
512 static inline void __put_fs_struct(struct fs_struct *fs)
513 {
514 	/* No need to hold fs->lock if we are killing it */
515 	if (atomic_dec_and_test(&fs->count)) {
516 		dput(fs->root);
517 		mntput(fs->rootmnt);
518 		dput(fs->pwd);
519 		mntput(fs->pwdmnt);
520 		if (fs->altroot) {
521 			dput(fs->altroot);
522 			mntput(fs->altrootmnt);
523 		}
524 		kmem_cache_free(fs_cachep, fs);
525 	}
526 }
527 
528 void put_fs_struct(struct fs_struct *fs)
529 {
530 	__put_fs_struct(fs);
531 }
532 
533 static inline void __exit_fs(struct task_struct *tsk)
534 {
535 	struct fs_struct * fs = tsk->fs;
536 
537 	if (fs) {
538 		task_lock(tsk);
539 		tsk->fs = NULL;
540 		task_unlock(tsk);
541 		__put_fs_struct(fs);
542 	}
543 }
544 
545 void exit_fs(struct task_struct *tsk)
546 {
547 	__exit_fs(tsk);
548 }
549 
550 EXPORT_SYMBOL_GPL(exit_fs);
551 
552 /*
553  * Turn us into a lazy TLB process if we
554  * aren't already..
555  */
556 static void exit_mm(struct task_struct * tsk)
557 {
558 	struct mm_struct *mm = tsk->mm;
559 
560 	mm_release(tsk, mm);
561 	if (!mm)
562 		return;
563 	/*
564 	 * Serialize with any possible pending coredump.
565 	 * We must hold mmap_sem around checking core_waiters
566 	 * and clearing tsk->mm.  The core-inducing thread
567 	 * will increment core_waiters for each thread in the
568 	 * group with ->mm != NULL.
569 	 */
570 	down_read(&mm->mmap_sem);
571 	if (mm->core_waiters) {
572 		up_read(&mm->mmap_sem);
573 		down_write(&mm->mmap_sem);
574 		if (!--mm->core_waiters)
575 			complete(mm->core_startup_done);
576 		up_write(&mm->mmap_sem);
577 
578 		wait_for_completion(&mm->core_done);
579 		down_read(&mm->mmap_sem);
580 	}
581 	atomic_inc(&mm->mm_count);
582 	BUG_ON(mm != tsk->active_mm);
583 	/* more a memory barrier than a real lock */
584 	task_lock(tsk);
585 	tsk->mm = NULL;
586 	up_read(&mm->mmap_sem);
587 	enter_lazy_tlb(mm, current);
588 	/* We don't want this task to be frozen prematurely */
589 	clear_freeze_flag(tsk);
590 	task_unlock(tsk);
591 	mmput(mm);
592 }
593 
594 static void
595 reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
596 {
597 	if (p->pdeath_signal)
598 		/* We already hold the tasklist_lock here.  */
599 		group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
600 
601 	/* Move the child from its dying parent to the new one.  */
602 	if (unlikely(traced)) {
603 		/* Preserve ptrace links if someone else is tracing this child.  */
604 		list_del_init(&p->ptrace_list);
605 		if (p->parent != p->real_parent)
606 			list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
607 	} else {
608 		/* If this child is being traced, then we're the one tracing it
609 		 * anyway, so let go of it.
610 		 */
611 		p->ptrace = 0;
612 		remove_parent(p);
613 		p->parent = p->real_parent;
614 		add_parent(p);
615 
616 		if (p->state == TASK_TRACED) {
617 			/*
618 			 * If it was at a trace stop, turn it into
619 			 * a normal stop since it's no longer being
620 			 * traced.
621 			 */
622 			ptrace_untrace(p);
623 		}
624 	}
625 
626 	/* If this is a threaded reparent there is no need to
627 	 * notify anyone anything has happened.
628 	 */
629 	if (p->real_parent->group_leader == father->group_leader)
630 		return;
631 
632 	/* We don't want people slaying init.  */
633 	if (p->exit_signal != -1)
634 		p->exit_signal = SIGCHLD;
635 
636 	/* If we'd notified the old parent about this child's death,
637 	 * also notify the new parent.
638 	 */
639 	if (!traced && p->exit_state == EXIT_ZOMBIE &&
640 	    p->exit_signal != -1 && thread_group_empty(p))
641 		do_notify_parent(p, p->exit_signal);
642 
643 	/*
644 	 * process group orphan check
645 	 * Case ii: Our child is in a different pgrp
646 	 * than we are, and it was the only connection
647 	 * outside, so the child pgrp is now orphaned.
648 	 */
649 	if ((task_pgrp(p) != task_pgrp(father)) &&
650 	    (task_session(p) == task_session(father))) {
651 		struct pid *pgrp = task_pgrp(p);
652 
653 		if (will_become_orphaned_pgrp(pgrp, NULL) &&
654 		    has_stopped_jobs(pgrp)) {
655 			__kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
656 			__kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
657 		}
658 	}
659 }
660 
661 /*
662  * When we die, we re-parent all our children.
663  * Try to give them to another thread in our thread
664  * group, and if no such member exists, give it to
665  * the child reaper process (ie "init") in our pid
666  * space.
667  */
668 static void
669 forget_original_parent(struct task_struct *father, struct list_head *to_release)
670 {
671 	struct task_struct *p, *reaper = father;
672 	struct list_head *_p, *_n;
673 
674 	do {
675 		reaper = next_thread(reaper);
676 		if (reaper == father) {
677 			reaper = child_reaper(father);
678 			break;
679 		}
680 	} while (reaper->exit_state);
681 
682 	/*
683 	 * There are only two places where our children can be:
684 	 *
685 	 * - in our child list
686 	 * - in our ptraced child list
687 	 *
688 	 * Search them and reparent children.
689 	 */
690 	list_for_each_safe(_p, _n, &father->children) {
691 		int ptrace;
692 		p = list_entry(_p, struct task_struct, sibling);
693 
694 		ptrace = p->ptrace;
695 
696 		/* if father isn't the real parent, then ptrace must be enabled */
697 		BUG_ON(father != p->real_parent && !ptrace);
698 
699 		if (father == p->real_parent) {
700 			/* reparent with a reaper, real father it's us */
701 			p->real_parent = reaper;
702 			reparent_thread(p, father, 0);
703 		} else {
704 			/* reparent ptraced task to its real parent */
705 			__ptrace_unlink (p);
706 			if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
707 			    thread_group_empty(p))
708 				do_notify_parent(p, p->exit_signal);
709 		}
710 
711 		/*
712 		 * if the ptraced child is a zombie with exit_signal == -1
713 		 * we must collect it before we exit, or it will remain
714 		 * zombie forever since we prevented it from self-reap itself
715 		 * while it was being traced by us, to be able to see it in wait4.
716 		 */
717 		if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1))
718 			list_add(&p->ptrace_list, to_release);
719 	}
720 	list_for_each_safe(_p, _n, &father->ptrace_children) {
721 		p = list_entry(_p, struct task_struct, ptrace_list);
722 		p->real_parent = reaper;
723 		reparent_thread(p, father, 1);
724 	}
725 }
726 
727 /*
728  * Send signals to all our closest relatives so that they know
729  * to properly mourn us..
730  */
731 static void exit_notify(struct task_struct *tsk)
732 {
733 	int state;
734 	struct task_struct *t;
735 	struct list_head ptrace_dead, *_p, *_n;
736 	struct pid *pgrp;
737 
738 	if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT)
739 	    && !thread_group_empty(tsk)) {
740 		/*
741 		 * This occurs when there was a race between our exit
742 		 * syscall and a group signal choosing us as the one to
743 		 * wake up.  It could be that we are the only thread
744 		 * alerted to check for pending signals, but another thread
745 		 * should be woken now to take the signal since we will not.
746 		 * Now we'll wake all the threads in the group just to make
747 		 * sure someone gets all the pending signals.
748 		 */
749 		spin_lock_irq(&tsk->sighand->siglock);
750 		for (t = next_thread(tsk); t != tsk; t = next_thread(t))
751 			if (!signal_pending(t) && !(t->flags & PF_EXITING))
752 				recalc_sigpending_and_wake(t);
753 		spin_unlock_irq(&tsk->sighand->siglock);
754 	}
755 
756 	write_lock_irq(&tasklist_lock);
757 
758 	/*
759 	 * This does two things:
760 	 *
761   	 * A.  Make init inherit all the child processes
762 	 * B.  Check to see if any process groups have become orphaned
763 	 *	as a result of our exiting, and if they have any stopped
764 	 *	jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
765 	 */
766 
767 	INIT_LIST_HEAD(&ptrace_dead);
768 	forget_original_parent(tsk, &ptrace_dead);
769 	BUG_ON(!list_empty(&tsk->children));
770 	BUG_ON(!list_empty(&tsk->ptrace_children));
771 
772 	/*
773 	 * Check to see if any process groups have become orphaned
774 	 * as a result of our exiting, and if they have any stopped
775 	 * jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
776 	 *
777 	 * Case i: Our father is in a different pgrp than we are
778 	 * and we were the only connection outside, so our pgrp
779 	 * is about to become orphaned.
780 	 */
781 	t = tsk->real_parent;
782 
783 	pgrp = task_pgrp(tsk);
784 	if ((task_pgrp(t) != pgrp) &&
785 	    (task_session(t) == task_session(tsk)) &&
786 	    will_become_orphaned_pgrp(pgrp, tsk) &&
787 	    has_stopped_jobs(pgrp)) {
788 		__kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
789 		__kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
790 	}
791 
792 	/* Let father know we died
793 	 *
794 	 * Thread signals are configurable, but you aren't going to use
795 	 * that to send signals to arbitary processes.
796 	 * That stops right now.
797 	 *
798 	 * If the parent exec id doesn't match the exec id we saved
799 	 * when we started then we know the parent has changed security
800 	 * domain.
801 	 *
802 	 * If our self_exec id doesn't match our parent_exec_id then
803 	 * we have changed execution domain as these two values started
804 	 * the same after a fork.
805 	 */
806 	if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 &&
807 	    ( tsk->parent_exec_id != t->self_exec_id  ||
808 	      tsk->self_exec_id != tsk->parent_exec_id)
809 	    && !capable(CAP_KILL))
810 		tsk->exit_signal = SIGCHLD;
811 
812 
813 	/* If something other than our normal parent is ptracing us, then
814 	 * send it a SIGCHLD instead of honoring exit_signal.  exit_signal
815 	 * only has special meaning to our real parent.
816 	 */
817 	if (tsk->exit_signal != -1 && thread_group_empty(tsk)) {
818 		int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
819 		do_notify_parent(tsk, signal);
820 	} else if (tsk->ptrace) {
821 		do_notify_parent(tsk, SIGCHLD);
822 	}
823 
824 	state = EXIT_ZOMBIE;
825 	if (tsk->exit_signal == -1 && likely(!tsk->ptrace))
826 		state = EXIT_DEAD;
827 	tsk->exit_state = state;
828 
829 	if (thread_group_leader(tsk) &&
830 	    tsk->signal->notify_count < 0 &&
831 	    tsk->signal->group_exit_task)
832 		wake_up_process(tsk->signal->group_exit_task);
833 
834 	write_unlock_irq(&tasklist_lock);
835 
836 	list_for_each_safe(_p, _n, &ptrace_dead) {
837 		list_del_init(_p);
838 		t = list_entry(_p, struct task_struct, ptrace_list);
839 		release_task(t);
840 	}
841 
842 	/* If the process is dead, release it - nobody will wait for it */
843 	if (state == EXIT_DEAD)
844 		release_task(tsk);
845 }
846 
847 #ifdef CONFIG_DEBUG_STACK_USAGE
848 static void check_stack_usage(void)
849 {
850 	static DEFINE_SPINLOCK(low_water_lock);
851 	static int lowest_to_date = THREAD_SIZE;
852 	unsigned long *n = end_of_stack(current);
853 	unsigned long free;
854 
855 	while (*n == 0)
856 		n++;
857 	free = (unsigned long)n - (unsigned long)end_of_stack(current);
858 
859 	if (free >= lowest_to_date)
860 		return;
861 
862 	spin_lock(&low_water_lock);
863 	if (free < lowest_to_date) {
864 		printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
865 				"left\n",
866 				current->comm, free);
867 		lowest_to_date = free;
868 	}
869 	spin_unlock(&low_water_lock);
870 }
871 #else
872 static inline void check_stack_usage(void) {}
873 #endif
874 
875 static inline void exit_child_reaper(struct task_struct *tsk)
876 {
877 	if (likely(tsk->group_leader != child_reaper(tsk)))
878 		return;
879 
880 	panic("Attempted to kill init!");
881 }
882 
883 fastcall NORET_TYPE void do_exit(long code)
884 {
885 	struct task_struct *tsk = current;
886 	int group_dead;
887 
888 	profile_task_exit(tsk);
889 
890 	WARN_ON(atomic_read(&tsk->fs_excl));
891 
892 	if (unlikely(in_interrupt()))
893 		panic("Aiee, killing interrupt handler!");
894 	if (unlikely(!tsk->pid))
895 		panic("Attempted to kill the idle task!");
896 
897 	if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
898 		current->ptrace_message = code;
899 		ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
900 	}
901 
902 	/*
903 	 * We're taking recursive faults here in do_exit. Safest is to just
904 	 * leave this task alone and wait for reboot.
905 	 */
906 	if (unlikely(tsk->flags & PF_EXITING)) {
907 		printk(KERN_ALERT
908 			"Fixing recursive fault but reboot is needed!\n");
909 		/*
910 		 * We can do this unlocked here. The futex code uses
911 		 * this flag just to verify whether the pi state
912 		 * cleanup has been done or not. In the worst case it
913 		 * loops once more. We pretend that the cleanup was
914 		 * done as there is no way to return. Either the
915 		 * OWNER_DIED bit is set by now or we push the blocked
916 		 * task into the wait for ever nirwana as well.
917 		 */
918 		tsk->flags |= PF_EXITPIDONE;
919 		if (tsk->io_context)
920 			exit_io_context();
921 		set_current_state(TASK_UNINTERRUPTIBLE);
922 		schedule();
923 	}
924 
925 	tsk->flags |= PF_EXITING;
926 	/*
927 	 * tsk->flags are checked in the futex code to protect against
928 	 * an exiting task cleaning up the robust pi futexes.
929 	 */
930 	smp_mb();
931 	spin_unlock_wait(&tsk->pi_lock);
932 
933 	if (unlikely(in_atomic()))
934 		printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
935 				current->comm, current->pid,
936 				preempt_count());
937 
938 	acct_update_integrals(tsk);
939 	if (tsk->mm) {
940 		update_hiwater_rss(tsk->mm);
941 		update_hiwater_vm(tsk->mm);
942 	}
943 	group_dead = atomic_dec_and_test(&tsk->signal->live);
944 	if (group_dead) {
945 		exit_child_reaper(tsk);
946 		hrtimer_cancel(&tsk->signal->real_timer);
947 		exit_itimers(tsk->signal);
948 	}
949 	acct_collect(code, group_dead);
950 #ifdef CONFIG_FUTEX
951 	if (unlikely(tsk->robust_list))
952 		exit_robust_list(tsk);
953 #ifdef CONFIG_COMPAT
954 	if (unlikely(tsk->compat_robust_list))
955 		compat_exit_robust_list(tsk);
956 #endif
957 #endif
958 	if (group_dead)
959 		tty_audit_exit();
960 	if (unlikely(tsk->audit_context))
961 		audit_free(tsk);
962 
963 	tsk->exit_code = code;
964 	taskstats_exit(tsk, group_dead);
965 
966 	exit_mm(tsk);
967 
968 	if (group_dead)
969 		acct_process();
970 	exit_sem(tsk);
971 	__exit_files(tsk);
972 	__exit_fs(tsk);
973 	check_stack_usage();
974 	exit_thread();
975 	cpuset_exit(tsk);
976 	exit_keys(tsk);
977 
978 	if (group_dead && tsk->signal->leader)
979 		disassociate_ctty(1);
980 
981 	module_put(task_thread_info(tsk)->exec_domain->module);
982 	if (tsk->binfmt)
983 		module_put(tsk->binfmt->module);
984 
985 	proc_exit_connector(tsk);
986 	exit_task_namespaces(tsk);
987 	exit_notify(tsk);
988 #ifdef CONFIG_NUMA
989 	mpol_free(tsk->mempolicy);
990 	tsk->mempolicy = NULL;
991 #endif
992 #ifdef CONFIG_FUTEX
993 	/*
994 	 * This must happen late, after the PID is not
995 	 * hashed anymore:
996 	 */
997 	if (unlikely(!list_empty(&tsk->pi_state_list)))
998 		exit_pi_state_list(tsk);
999 	if (unlikely(current->pi_state_cache))
1000 		kfree(current->pi_state_cache);
1001 #endif
1002 	/*
1003 	 * Make sure we are holding no locks:
1004 	 */
1005 	debug_check_no_locks_held(tsk);
1006 	/*
1007 	 * We can do this unlocked here. The futex code uses this flag
1008 	 * just to verify whether the pi state cleanup has been done
1009 	 * or not. In the worst case it loops once more.
1010 	 */
1011 	tsk->flags |= PF_EXITPIDONE;
1012 
1013 	if (tsk->io_context)
1014 		exit_io_context();
1015 
1016 	if (tsk->splice_pipe)
1017 		__free_pipe_info(tsk->splice_pipe);
1018 
1019 	preempt_disable();
1020 	/* causes final put_task_struct in finish_task_switch(). */
1021 	tsk->state = TASK_DEAD;
1022 
1023 	schedule();
1024 	BUG();
1025 	/* Avoid "noreturn function does return".  */
1026 	for (;;)
1027 		cpu_relax();	/* For when BUG is null */
1028 }
1029 
1030 EXPORT_SYMBOL_GPL(do_exit);
1031 
1032 NORET_TYPE void complete_and_exit(struct completion *comp, long code)
1033 {
1034 	if (comp)
1035 		complete(comp);
1036 
1037 	do_exit(code);
1038 }
1039 
1040 EXPORT_SYMBOL(complete_and_exit);
1041 
1042 asmlinkage long sys_exit(int error_code)
1043 {
1044 	do_exit((error_code&0xff)<<8);
1045 }
1046 
1047 /*
1048  * Take down every thread in the group.  This is called by fatal signals
1049  * as well as by sys_exit_group (below).
1050  */
1051 NORET_TYPE void
1052 do_group_exit(int exit_code)
1053 {
1054 	BUG_ON(exit_code & 0x80); /* core dumps don't get here */
1055 
1056 	if (current->signal->flags & SIGNAL_GROUP_EXIT)
1057 		exit_code = current->signal->group_exit_code;
1058 	else if (!thread_group_empty(current)) {
1059 		struct signal_struct *const sig = current->signal;
1060 		struct sighand_struct *const sighand = current->sighand;
1061 		spin_lock_irq(&sighand->siglock);
1062 		if (sig->flags & SIGNAL_GROUP_EXIT)
1063 			/* Another thread got here before we took the lock.  */
1064 			exit_code = sig->group_exit_code;
1065 		else {
1066 			sig->group_exit_code = exit_code;
1067 			zap_other_threads(current);
1068 		}
1069 		spin_unlock_irq(&sighand->siglock);
1070 	}
1071 
1072 	do_exit(exit_code);
1073 	/* NOTREACHED */
1074 }
1075 
1076 /*
1077  * this kills every thread in the thread group. Note that any externally
1078  * wait4()-ing process will get the correct exit code - even if this
1079  * thread is not the thread group leader.
1080  */
1081 asmlinkage void sys_exit_group(int error_code)
1082 {
1083 	do_group_exit((error_code & 0xff) << 8);
1084 }
1085 
1086 static int eligible_child(pid_t pid, int options, struct task_struct *p)
1087 {
1088 	int err;
1089 
1090 	if (pid > 0) {
1091 		if (p->pid != pid)
1092 			return 0;
1093 	} else if (!pid) {
1094 		if (process_group(p) != process_group(current))
1095 			return 0;
1096 	} else if (pid != -1) {
1097 		if (process_group(p) != -pid)
1098 			return 0;
1099 	}
1100 
1101 	/*
1102 	 * Do not consider detached threads that are
1103 	 * not ptraced:
1104 	 */
1105 	if (p->exit_signal == -1 && !p->ptrace)
1106 		return 0;
1107 
1108 	/* Wait for all children (clone and not) if __WALL is set;
1109 	 * otherwise, wait for clone children *only* if __WCLONE is
1110 	 * set; otherwise, wait for non-clone children *only*.  (Note:
1111 	 * A "clone" child here is one that reports to its parent
1112 	 * using a signal other than SIGCHLD.) */
1113 	if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
1114 	    && !(options & __WALL))
1115 		return 0;
1116 	/*
1117 	 * Do not consider thread group leaders that are
1118 	 * in a non-empty thread group:
1119 	 */
1120 	if (delay_group_leader(p))
1121 		return 2;
1122 
1123 	err = security_task_wait(p);
1124 	if (err)
1125 		return err;
1126 
1127 	return 1;
1128 }
1129 
1130 static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
1131 			       int why, int status,
1132 			       struct siginfo __user *infop,
1133 			       struct rusage __user *rusagep)
1134 {
1135 	int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
1136 
1137 	put_task_struct(p);
1138 	if (!retval)
1139 		retval = put_user(SIGCHLD, &infop->si_signo);
1140 	if (!retval)
1141 		retval = put_user(0, &infop->si_errno);
1142 	if (!retval)
1143 		retval = put_user((short)why, &infop->si_code);
1144 	if (!retval)
1145 		retval = put_user(pid, &infop->si_pid);
1146 	if (!retval)
1147 		retval = put_user(uid, &infop->si_uid);
1148 	if (!retval)
1149 		retval = put_user(status, &infop->si_status);
1150 	if (!retval)
1151 		retval = pid;
1152 	return retval;
1153 }
1154 
1155 /*
1156  * Handle sys_wait4 work for one task in state EXIT_ZOMBIE.  We hold
1157  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1158  * the lock and this task is uninteresting.  If we return nonzero, we have
1159  * released the lock and the system call should return.
1160  */
1161 static int wait_task_zombie(struct task_struct *p, int noreap,
1162 			    struct siginfo __user *infop,
1163 			    int __user *stat_addr, struct rusage __user *ru)
1164 {
1165 	unsigned long state;
1166 	int retval, status, traced;
1167 
1168 	if (unlikely(noreap)) {
1169 		pid_t pid = p->pid;
1170 		uid_t uid = p->uid;
1171 		int exit_code = p->exit_code;
1172 		int why, status;
1173 
1174 		if (unlikely(p->exit_state != EXIT_ZOMBIE))
1175 			return 0;
1176 		if (unlikely(p->exit_signal == -1 && p->ptrace == 0))
1177 			return 0;
1178 		get_task_struct(p);
1179 		read_unlock(&tasklist_lock);
1180 		if ((exit_code & 0x7f) == 0) {
1181 			why = CLD_EXITED;
1182 			status = exit_code >> 8;
1183 		} else {
1184 			why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
1185 			status = exit_code & 0x7f;
1186 		}
1187 		return wait_noreap_copyout(p, pid, uid, why,
1188 					   status, infop, ru);
1189 	}
1190 
1191 	/*
1192 	 * Try to move the task's state to DEAD
1193 	 * only one thread is allowed to do this:
1194 	 */
1195 	state = xchg(&p->exit_state, EXIT_DEAD);
1196 	if (state != EXIT_ZOMBIE) {
1197 		BUG_ON(state != EXIT_DEAD);
1198 		return 0;
1199 	}
1200 
1201 	/* traced means p->ptrace, but not vice versa */
1202 	traced = (p->real_parent != p->parent);
1203 
1204 	if (likely(!traced)) {
1205 		struct signal_struct *psig;
1206 		struct signal_struct *sig;
1207 
1208 		/*
1209 		 * The resource counters for the group leader are in its
1210 		 * own task_struct.  Those for dead threads in the group
1211 		 * are in its signal_struct, as are those for the child
1212 		 * processes it has previously reaped.  All these
1213 		 * accumulate in the parent's signal_struct c* fields.
1214 		 *
1215 		 * We don't bother to take a lock here to protect these
1216 		 * p->signal fields, because they are only touched by
1217 		 * __exit_signal, which runs with tasklist_lock
1218 		 * write-locked anyway, and so is excluded here.  We do
1219 		 * need to protect the access to p->parent->signal fields,
1220 		 * as other threads in the parent group can be right
1221 		 * here reaping other children at the same time.
1222 		 */
1223 		spin_lock_irq(&p->parent->sighand->siglock);
1224 		psig = p->parent->signal;
1225 		sig = p->signal;
1226 		psig->cutime =
1227 			cputime_add(psig->cutime,
1228 			cputime_add(p->utime,
1229 			cputime_add(sig->utime,
1230 				    sig->cutime)));
1231 		psig->cstime =
1232 			cputime_add(psig->cstime,
1233 			cputime_add(p->stime,
1234 			cputime_add(sig->stime,
1235 				    sig->cstime)));
1236 		psig->cgtime =
1237 			cputime_add(psig->cgtime,
1238 			cputime_add(p->gtime,
1239 			cputime_add(sig->gtime,
1240 				    sig->cgtime)));
1241 		psig->cmin_flt +=
1242 			p->min_flt + sig->min_flt + sig->cmin_flt;
1243 		psig->cmaj_flt +=
1244 			p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1245 		psig->cnvcsw +=
1246 			p->nvcsw + sig->nvcsw + sig->cnvcsw;
1247 		psig->cnivcsw +=
1248 			p->nivcsw + sig->nivcsw + sig->cnivcsw;
1249 		psig->cinblock +=
1250 			task_io_get_inblock(p) +
1251 			sig->inblock + sig->cinblock;
1252 		psig->coublock +=
1253 			task_io_get_oublock(p) +
1254 			sig->oublock + sig->coublock;
1255 		spin_unlock_irq(&p->parent->sighand->siglock);
1256 	}
1257 
1258 	/*
1259 	 * Now we are sure this task is interesting, and no other
1260 	 * thread can reap it because we set its state to EXIT_DEAD.
1261 	 */
1262 	read_unlock(&tasklist_lock);
1263 
1264 	retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1265 	status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1266 		? p->signal->group_exit_code : p->exit_code;
1267 	if (!retval && stat_addr)
1268 		retval = put_user(status, stat_addr);
1269 	if (!retval && infop)
1270 		retval = put_user(SIGCHLD, &infop->si_signo);
1271 	if (!retval && infop)
1272 		retval = put_user(0, &infop->si_errno);
1273 	if (!retval && infop) {
1274 		int why;
1275 
1276 		if ((status & 0x7f) == 0) {
1277 			why = CLD_EXITED;
1278 			status >>= 8;
1279 		} else {
1280 			why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1281 			status &= 0x7f;
1282 		}
1283 		retval = put_user((short)why, &infop->si_code);
1284 		if (!retval)
1285 			retval = put_user(status, &infop->si_status);
1286 	}
1287 	if (!retval && infop)
1288 		retval = put_user(p->pid, &infop->si_pid);
1289 	if (!retval && infop)
1290 		retval = put_user(p->uid, &infop->si_uid);
1291 	if (!retval)
1292 		retval = p->pid;
1293 
1294 	if (traced) {
1295 		write_lock_irq(&tasklist_lock);
1296 		/* We dropped tasklist, ptracer could die and untrace */
1297 		ptrace_unlink(p);
1298 		/*
1299 		 * If this is not a detached task, notify the parent.
1300 		 * If it's still not detached after that, don't release
1301 		 * it now.
1302 		 */
1303 		if (p->exit_signal != -1) {
1304 			do_notify_parent(p, p->exit_signal);
1305 			if (p->exit_signal != -1) {
1306 				p->exit_state = EXIT_ZOMBIE;
1307 				p = NULL;
1308 			}
1309 		}
1310 		write_unlock_irq(&tasklist_lock);
1311 	}
1312 	if (p != NULL)
1313 		release_task(p);
1314 
1315 	return retval;
1316 }
1317 
1318 /*
1319  * Handle sys_wait4 work for one task in state TASK_STOPPED.  We hold
1320  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1321  * the lock and this task is uninteresting.  If we return nonzero, we have
1322  * released the lock and the system call should return.
1323  */
1324 static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
1325 			     int noreap, struct siginfo __user *infop,
1326 			     int __user *stat_addr, struct rusage __user *ru)
1327 {
1328 	int retval, exit_code;
1329 
1330 	if (!p->exit_code)
1331 		return 0;
1332 	if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
1333 	    p->signal->group_stop_count > 0)
1334 		/*
1335 		 * A group stop is in progress and this is the group leader.
1336 		 * We won't report until all threads have stopped.
1337 		 */
1338 		return 0;
1339 
1340 	/*
1341 	 * Now we are pretty sure this task is interesting.
1342 	 * Make sure it doesn't get reaped out from under us while we
1343 	 * give up the lock and then examine it below.  We don't want to
1344 	 * keep holding onto the tasklist_lock while we call getrusage and
1345 	 * possibly take page faults for user memory.
1346 	 */
1347 	get_task_struct(p);
1348 	read_unlock(&tasklist_lock);
1349 
1350 	if (unlikely(noreap)) {
1351 		pid_t pid = p->pid;
1352 		uid_t uid = p->uid;
1353 		int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
1354 
1355 		exit_code = p->exit_code;
1356 		if (unlikely(!exit_code) ||
1357 		    unlikely(p->state & TASK_TRACED))
1358 			goto bail_ref;
1359 		return wait_noreap_copyout(p, pid, uid,
1360 					   why, (exit_code << 8) | 0x7f,
1361 					   infop, ru);
1362 	}
1363 
1364 	write_lock_irq(&tasklist_lock);
1365 
1366 	/*
1367 	 * This uses xchg to be atomic with the thread resuming and setting
1368 	 * it.  It must also be done with the write lock held to prevent a
1369 	 * race with the EXIT_ZOMBIE case.
1370 	 */
1371 	exit_code = xchg(&p->exit_code, 0);
1372 	if (unlikely(p->exit_state)) {
1373 		/*
1374 		 * The task resumed and then died.  Let the next iteration
1375 		 * catch it in EXIT_ZOMBIE.  Note that exit_code might
1376 		 * already be zero here if it resumed and did _exit(0).
1377 		 * The task itself is dead and won't touch exit_code again;
1378 		 * other processors in this function are locked out.
1379 		 */
1380 		p->exit_code = exit_code;
1381 		exit_code = 0;
1382 	}
1383 	if (unlikely(exit_code == 0)) {
1384 		/*
1385 		 * Another thread in this function got to it first, or it
1386 		 * resumed, or it resumed and then died.
1387 		 */
1388 		write_unlock_irq(&tasklist_lock);
1389 bail_ref:
1390 		put_task_struct(p);
1391 		/*
1392 		 * We are returning to the wait loop without having successfully
1393 		 * removed the process and having released the lock. We cannot
1394 		 * continue, since the "p" task pointer is potentially stale.
1395 		 *
1396 		 * Return -EAGAIN, and do_wait() will restart the loop from the
1397 		 * beginning. Do _not_ re-acquire the lock.
1398 		 */
1399 		return -EAGAIN;
1400 	}
1401 
1402 	/* move to end of parent's list to avoid starvation */
1403 	remove_parent(p);
1404 	add_parent(p);
1405 
1406 	write_unlock_irq(&tasklist_lock);
1407 
1408 	retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1409 	if (!retval && stat_addr)
1410 		retval = put_user((exit_code << 8) | 0x7f, stat_addr);
1411 	if (!retval && infop)
1412 		retval = put_user(SIGCHLD, &infop->si_signo);
1413 	if (!retval && infop)
1414 		retval = put_user(0, &infop->si_errno);
1415 	if (!retval && infop)
1416 		retval = put_user((short)((p->ptrace & PT_PTRACED)
1417 					  ? CLD_TRAPPED : CLD_STOPPED),
1418 				  &infop->si_code);
1419 	if (!retval && infop)
1420 		retval = put_user(exit_code, &infop->si_status);
1421 	if (!retval && infop)
1422 		retval = put_user(p->pid, &infop->si_pid);
1423 	if (!retval && infop)
1424 		retval = put_user(p->uid, &infop->si_uid);
1425 	if (!retval)
1426 		retval = p->pid;
1427 	put_task_struct(p);
1428 
1429 	BUG_ON(!retval);
1430 	return retval;
1431 }
1432 
1433 /*
1434  * Handle do_wait work for one task in a live, non-stopped state.
1435  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1436  * the lock and this task is uninteresting.  If we return nonzero, we have
1437  * released the lock and the system call should return.
1438  */
1439 static int wait_task_continued(struct task_struct *p, int noreap,
1440 			       struct siginfo __user *infop,
1441 			       int __user *stat_addr, struct rusage __user *ru)
1442 {
1443 	int retval;
1444 	pid_t pid;
1445 	uid_t uid;
1446 
1447 	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1448 		return 0;
1449 
1450 	spin_lock_irq(&p->sighand->siglock);
1451 	/* Re-check with the lock held.  */
1452 	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1453 		spin_unlock_irq(&p->sighand->siglock);
1454 		return 0;
1455 	}
1456 	if (!noreap)
1457 		p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1458 	spin_unlock_irq(&p->sighand->siglock);
1459 
1460 	pid = p->pid;
1461 	uid = p->uid;
1462 	get_task_struct(p);
1463 	read_unlock(&tasklist_lock);
1464 
1465 	if (!infop) {
1466 		retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1467 		put_task_struct(p);
1468 		if (!retval && stat_addr)
1469 			retval = put_user(0xffff, stat_addr);
1470 		if (!retval)
1471 			retval = p->pid;
1472 	} else {
1473 		retval = wait_noreap_copyout(p, pid, uid,
1474 					     CLD_CONTINUED, SIGCONT,
1475 					     infop, ru);
1476 		BUG_ON(retval == 0);
1477 	}
1478 
1479 	return retval;
1480 }
1481 
1482 
1483 static inline int my_ptrace_child(struct task_struct *p)
1484 {
1485 	if (!(p->ptrace & PT_PTRACED))
1486 		return 0;
1487 	if (!(p->ptrace & PT_ATTACHED))
1488 		return 1;
1489 	/*
1490 	 * This child was PTRACE_ATTACH'd.  We should be seeing it only if
1491 	 * we are the attacher.  If we are the real parent, this is a race
1492 	 * inside ptrace_attach.  It is waiting for the tasklist_lock,
1493 	 * which we have to switch the parent links, but has already set
1494 	 * the flags in p->ptrace.
1495 	 */
1496 	return (p->parent != p->real_parent);
1497 }
1498 
1499 static long do_wait(pid_t pid, int options, struct siginfo __user *infop,
1500 		    int __user *stat_addr, struct rusage __user *ru)
1501 {
1502 	DECLARE_WAITQUEUE(wait, current);
1503 	struct task_struct *tsk;
1504 	int flag, retval;
1505 	int allowed, denied;
1506 
1507 	add_wait_queue(&current->signal->wait_chldexit,&wait);
1508 repeat:
1509 	/*
1510 	 * We will set this flag if we see any child that might later
1511 	 * match our criteria, even if we are not able to reap it yet.
1512 	 */
1513 	flag = 0;
1514 	allowed = denied = 0;
1515 	current->state = TASK_INTERRUPTIBLE;
1516 	read_lock(&tasklist_lock);
1517 	tsk = current;
1518 	do {
1519 		struct task_struct *p;
1520 		struct list_head *_p;
1521 		int ret;
1522 
1523 		list_for_each(_p,&tsk->children) {
1524 			p = list_entry(_p, struct task_struct, sibling);
1525 
1526 			ret = eligible_child(pid, options, p);
1527 			if (!ret)
1528 				continue;
1529 
1530 			if (unlikely(ret < 0)) {
1531 				denied = ret;
1532 				continue;
1533 			}
1534 			allowed = 1;
1535 
1536 			switch (p->state) {
1537 			case TASK_TRACED:
1538 				/*
1539 				 * When we hit the race with PTRACE_ATTACH,
1540 				 * we will not report this child.  But the
1541 				 * race means it has not yet been moved to
1542 				 * our ptrace_children list, so we need to
1543 				 * set the flag here to avoid a spurious ECHILD
1544 				 * when the race happens with the only child.
1545 				 */
1546 				flag = 1;
1547 				if (!my_ptrace_child(p))
1548 					continue;
1549 				/*FALLTHROUGH*/
1550 			case TASK_STOPPED:
1551 				/*
1552 				 * It's stopped now, so it might later
1553 				 * continue, exit, or stop again.
1554 				 */
1555 				flag = 1;
1556 				if (!(options & WUNTRACED) &&
1557 				    !my_ptrace_child(p))
1558 					continue;
1559 				retval = wait_task_stopped(p, ret == 2,
1560 							   (options & WNOWAIT),
1561 							   infop,
1562 							   stat_addr, ru);
1563 				if (retval == -EAGAIN)
1564 					goto repeat;
1565 				if (retval != 0) /* He released the lock.  */
1566 					goto end;
1567 				break;
1568 			default:
1569 			// case EXIT_DEAD:
1570 				if (p->exit_state == EXIT_DEAD)
1571 					continue;
1572 			// case EXIT_ZOMBIE:
1573 				if (p->exit_state == EXIT_ZOMBIE) {
1574 					/*
1575 					 * Eligible but we cannot release
1576 					 * it yet:
1577 					 */
1578 					if (ret == 2)
1579 						goto check_continued;
1580 					if (!likely(options & WEXITED))
1581 						continue;
1582 					retval = wait_task_zombie(
1583 						p, (options & WNOWAIT),
1584 						infop, stat_addr, ru);
1585 					/* He released the lock.  */
1586 					if (retval != 0)
1587 						goto end;
1588 					break;
1589 				}
1590 check_continued:
1591 				/*
1592 				 * It's running now, so it might later
1593 				 * exit, stop, or stop and then continue.
1594 				 */
1595 				flag = 1;
1596 				if (!unlikely(options & WCONTINUED))
1597 					continue;
1598 				retval = wait_task_continued(
1599 					p, (options & WNOWAIT),
1600 					infop, stat_addr, ru);
1601 				if (retval != 0) /* He released the lock.  */
1602 					goto end;
1603 				break;
1604 			}
1605 		}
1606 		if (!flag) {
1607 			list_for_each(_p, &tsk->ptrace_children) {
1608 				p = list_entry(_p, struct task_struct,
1609 						ptrace_list);
1610 				if (!eligible_child(pid, options, p))
1611 					continue;
1612 				flag = 1;
1613 				break;
1614 			}
1615 		}
1616 		if (options & __WNOTHREAD)
1617 			break;
1618 		tsk = next_thread(tsk);
1619 		BUG_ON(tsk->signal != current->signal);
1620 	} while (tsk != current);
1621 
1622 	read_unlock(&tasklist_lock);
1623 	if (flag) {
1624 		retval = 0;
1625 		if (options & WNOHANG)
1626 			goto end;
1627 		retval = -ERESTARTSYS;
1628 		if (signal_pending(current))
1629 			goto end;
1630 		schedule();
1631 		goto repeat;
1632 	}
1633 	retval = -ECHILD;
1634 	if (unlikely(denied) && !allowed)
1635 		retval = denied;
1636 end:
1637 	current->state = TASK_RUNNING;
1638 	remove_wait_queue(&current->signal->wait_chldexit,&wait);
1639 	if (infop) {
1640 		if (retval > 0)
1641 		retval = 0;
1642 		else {
1643 			/*
1644 			 * For a WNOHANG return, clear out all the fields
1645 			 * we would set so the user can easily tell the
1646 			 * difference.
1647 			 */
1648 			if (!retval)
1649 				retval = put_user(0, &infop->si_signo);
1650 			if (!retval)
1651 				retval = put_user(0, &infop->si_errno);
1652 			if (!retval)
1653 				retval = put_user(0, &infop->si_code);
1654 			if (!retval)
1655 				retval = put_user(0, &infop->si_pid);
1656 			if (!retval)
1657 				retval = put_user(0, &infop->si_uid);
1658 			if (!retval)
1659 				retval = put_user(0, &infop->si_status);
1660 		}
1661 	}
1662 	return retval;
1663 }
1664 
1665 asmlinkage long sys_waitid(int which, pid_t pid,
1666 			   struct siginfo __user *infop, int options,
1667 			   struct rusage __user *ru)
1668 {
1669 	long ret;
1670 
1671 	if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
1672 		return -EINVAL;
1673 	if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1674 		return -EINVAL;
1675 
1676 	switch (which) {
1677 	case P_ALL:
1678 		pid = -1;
1679 		break;
1680 	case P_PID:
1681 		if (pid <= 0)
1682 			return -EINVAL;
1683 		break;
1684 	case P_PGID:
1685 		if (pid <= 0)
1686 			return -EINVAL;
1687 		pid = -pid;
1688 		break;
1689 	default:
1690 		return -EINVAL;
1691 	}
1692 
1693 	ret = do_wait(pid, options, infop, NULL, ru);
1694 
1695 	/* avoid REGPARM breakage on x86: */
1696 	prevent_tail_call(ret);
1697 	return ret;
1698 }
1699 
1700 asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
1701 			  int options, struct rusage __user *ru)
1702 {
1703 	long ret;
1704 
1705 	if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1706 			__WNOTHREAD|__WCLONE|__WALL))
1707 		return -EINVAL;
1708 	ret = do_wait(pid, options | WEXITED, NULL, stat_addr, ru);
1709 
1710 	/* avoid REGPARM breakage on x86: */
1711 	prevent_tail_call(ret);
1712 	return ret;
1713 }
1714 
1715 #ifdef __ARCH_WANT_SYS_WAITPID
1716 
1717 /*
1718  * sys_waitpid() remains for compatibility. waitpid() should be
1719  * implemented by calling sys_wait4() from libc.a.
1720  */
1721 asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options)
1722 {
1723 	return sys_wait4(pid, stat_addr, options, NULL);
1724 }
1725 
1726 #endif
1727