xref: /openbmc/linux/kernel/exit.c (revision 384740dc)
1 /*
2  *  linux/kernel/exit.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/capability.h>
12 #include <linux/completion.h>
13 #include <linux/personality.h>
14 #include <linux/tty.h>
15 #include <linux/mnt_namespace.h>
16 #include <linux/iocontext.h>
17 #include <linux/key.h>
18 #include <linux/security.h>
19 #include <linux/cpu.h>
20 #include <linux/acct.h>
21 #include <linux/tsacct_kern.h>
22 #include <linux/file.h>
23 #include <linux/fdtable.h>
24 #include <linux/binfmts.h>
25 #include <linux/nsproxy.h>
26 #include <linux/pid_namespace.h>
27 #include <linux/ptrace.h>
28 #include <linux/profile.h>
29 #include <linux/mount.h>
30 #include <linux/proc_fs.h>
31 #include <linux/kthread.h>
32 #include <linux/mempolicy.h>
33 #include <linux/taskstats_kern.h>
34 #include <linux/delayacct.h>
35 #include <linux/freezer.h>
36 #include <linux/cgroup.h>
37 #include <linux/syscalls.h>
38 #include <linux/signal.h>
39 #include <linux/posix-timers.h>
40 #include <linux/cn_proc.h>
41 #include <linux/mutex.h>
42 #include <linux/futex.h>
43 #include <linux/compat.h>
44 #include <linux/pipe_fs_i.h>
45 #include <linux/audit.h> /* for audit_free() */
46 #include <linux/resource.h>
47 #include <linux/blkdev.h>
48 #include <linux/task_io_accounting_ops.h>
49 #include <linux/tracehook.h>
50 
51 #include <asm/uaccess.h>
52 #include <asm/unistd.h>
53 #include <asm/pgtable.h>
54 #include <asm/mmu_context.h>
55 
56 static void exit_mm(struct task_struct * tsk);
57 
58 static inline int task_detached(struct task_struct *p)
59 {
60 	return p->exit_signal == -1;
61 }
62 
63 static void __unhash_process(struct task_struct *p)
64 {
65 	nr_threads--;
66 	detach_pid(p, PIDTYPE_PID);
67 	if (thread_group_leader(p)) {
68 		detach_pid(p, PIDTYPE_PGID);
69 		detach_pid(p, PIDTYPE_SID);
70 
71 		list_del_rcu(&p->tasks);
72 		__get_cpu_var(process_counts)--;
73 	}
74 	list_del_rcu(&p->thread_group);
75 	list_del_init(&p->sibling);
76 }
77 
78 /*
79  * This function expects the tasklist_lock write-locked.
80  */
81 static void __exit_signal(struct task_struct *tsk)
82 {
83 	struct signal_struct *sig = tsk->signal;
84 	struct sighand_struct *sighand;
85 
86 	BUG_ON(!sig);
87 	BUG_ON(!atomic_read(&sig->count));
88 
89 	sighand = rcu_dereference(tsk->sighand);
90 	spin_lock(&sighand->siglock);
91 
92 	posix_cpu_timers_exit(tsk);
93 	if (atomic_dec_and_test(&sig->count))
94 		posix_cpu_timers_exit_group(tsk);
95 	else {
96 		/*
97 		 * If there is any task waiting for the group exit
98 		 * then notify it:
99 		 */
100 		if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count)
101 			wake_up_process(sig->group_exit_task);
102 
103 		if (tsk == sig->curr_target)
104 			sig->curr_target = next_thread(tsk);
105 		/*
106 		 * Accumulate here the counters for all threads but the
107 		 * group leader as they die, so they can be added into
108 		 * the process-wide totals when those are taken.
109 		 * The group leader stays around as a zombie as long
110 		 * as there are other threads.  When it gets reaped,
111 		 * the exit.c code will add its counts into these totals.
112 		 * We won't ever get here for the group leader, since it
113 		 * will have been the last reference on the signal_struct.
114 		 */
115 		sig->utime = cputime_add(sig->utime, task_utime(tsk));
116 		sig->stime = cputime_add(sig->stime, task_stime(tsk));
117 		sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
118 		sig->min_flt += tsk->min_flt;
119 		sig->maj_flt += tsk->maj_flt;
120 		sig->nvcsw += tsk->nvcsw;
121 		sig->nivcsw += tsk->nivcsw;
122 		sig->inblock += task_io_get_inblock(tsk);
123 		sig->oublock += task_io_get_oublock(tsk);
124 		task_io_accounting_add(&sig->ioac, &tsk->ioac);
125 		sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
126 		sig = NULL; /* Marker for below. */
127 	}
128 
129 	__unhash_process(tsk);
130 
131 	/*
132 	 * Do this under ->siglock, we can race with another thread
133 	 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
134 	 */
135 	flush_sigqueue(&tsk->pending);
136 
137 	tsk->signal = NULL;
138 	tsk->sighand = NULL;
139 	spin_unlock(&sighand->siglock);
140 
141 	__cleanup_sighand(sighand);
142 	clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
143 	if (sig) {
144 		flush_sigqueue(&sig->shared_pending);
145 		taskstats_tgid_free(sig);
146 		__cleanup_signal(sig);
147 	}
148 }
149 
150 static void delayed_put_task_struct(struct rcu_head *rhp)
151 {
152 	put_task_struct(container_of(rhp, struct task_struct, rcu));
153 }
154 
155 
156 void release_task(struct task_struct * p)
157 {
158 	struct task_struct *leader;
159 	int zap_leader;
160 repeat:
161 	tracehook_prepare_release_task(p);
162 	atomic_dec(&p->user->processes);
163 	proc_flush_task(p);
164 	write_lock_irq(&tasklist_lock);
165 	tracehook_finish_release_task(p);
166 	__exit_signal(p);
167 
168 	/*
169 	 * If we are the last non-leader member of the thread
170 	 * group, and the leader is zombie, then notify the
171 	 * group leader's parent process. (if it wants notification.)
172 	 */
173 	zap_leader = 0;
174 	leader = p->group_leader;
175 	if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
176 		BUG_ON(task_detached(leader));
177 		do_notify_parent(leader, leader->exit_signal);
178 		/*
179 		 * If we were the last child thread and the leader has
180 		 * exited already, and the leader's parent ignores SIGCHLD,
181 		 * then we are the one who should release the leader.
182 		 *
183 		 * do_notify_parent() will have marked it self-reaping in
184 		 * that case.
185 		 */
186 		zap_leader = task_detached(leader);
187 
188 		/*
189 		 * This maintains the invariant that release_task()
190 		 * only runs on a task in EXIT_DEAD, just for sanity.
191 		 */
192 		if (zap_leader)
193 			leader->exit_state = EXIT_DEAD;
194 	}
195 
196 	write_unlock_irq(&tasklist_lock);
197 	release_thread(p);
198 	call_rcu(&p->rcu, delayed_put_task_struct);
199 
200 	p = leader;
201 	if (unlikely(zap_leader))
202 		goto repeat;
203 }
204 
205 /*
206  * This checks not only the pgrp, but falls back on the pid if no
207  * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
208  * without this...
209  *
210  * The caller must hold rcu lock or the tasklist lock.
211  */
212 struct pid *session_of_pgrp(struct pid *pgrp)
213 {
214 	struct task_struct *p;
215 	struct pid *sid = NULL;
216 
217 	p = pid_task(pgrp, PIDTYPE_PGID);
218 	if (p == NULL)
219 		p = pid_task(pgrp, PIDTYPE_PID);
220 	if (p != NULL)
221 		sid = task_session(p);
222 
223 	return sid;
224 }
225 
226 /*
227  * Determine if a process group is "orphaned", according to the POSIX
228  * definition in 2.2.2.52.  Orphaned process groups are not to be affected
229  * by terminal-generated stop signals.  Newly orphaned process groups are
230  * to receive a SIGHUP and a SIGCONT.
231  *
232  * "I ask you, have you ever known what it is to be an orphan?"
233  */
234 static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
235 {
236 	struct task_struct *p;
237 
238 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
239 		if ((p == ignored_task) ||
240 		    (p->exit_state && thread_group_empty(p)) ||
241 		    is_global_init(p->real_parent))
242 			continue;
243 
244 		if (task_pgrp(p->real_parent) != pgrp &&
245 		    task_session(p->real_parent) == task_session(p))
246 			return 0;
247 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
248 
249 	return 1;
250 }
251 
252 int is_current_pgrp_orphaned(void)
253 {
254 	int retval;
255 
256 	read_lock(&tasklist_lock);
257 	retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
258 	read_unlock(&tasklist_lock);
259 
260 	return retval;
261 }
262 
263 static int has_stopped_jobs(struct pid *pgrp)
264 {
265 	int retval = 0;
266 	struct task_struct *p;
267 
268 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
269 		if (!task_is_stopped(p))
270 			continue;
271 		retval = 1;
272 		break;
273 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
274 	return retval;
275 }
276 
277 /*
278  * Check to see if any process groups have become orphaned as
279  * a result of our exiting, and if they have any stopped jobs,
280  * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
281  */
282 static void
283 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
284 {
285 	struct pid *pgrp = task_pgrp(tsk);
286 	struct task_struct *ignored_task = tsk;
287 
288 	if (!parent)
289 		 /* exit: our father is in a different pgrp than
290 		  * we are and we were the only connection outside.
291 		  */
292 		parent = tsk->real_parent;
293 	else
294 		/* reparent: our child is in a different pgrp than
295 		 * we are, and it was the only connection outside.
296 		 */
297 		ignored_task = NULL;
298 
299 	if (task_pgrp(parent) != pgrp &&
300 	    task_session(parent) == task_session(tsk) &&
301 	    will_become_orphaned_pgrp(pgrp, ignored_task) &&
302 	    has_stopped_jobs(pgrp)) {
303 		__kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
304 		__kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
305 	}
306 }
307 
308 /**
309  * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd
310  *
311  * If a kernel thread is launched as a result of a system call, or if
312  * it ever exits, it should generally reparent itself to kthreadd so it
313  * isn't in the way of other processes and is correctly cleaned up on exit.
314  *
315  * The various task state such as scheduling policy and priority may have
316  * been inherited from a user process, so we reset them to sane values here.
317  *
318  * NOTE that reparent_to_kthreadd() gives the caller full capabilities.
319  */
320 static void reparent_to_kthreadd(void)
321 {
322 	write_lock_irq(&tasklist_lock);
323 
324 	ptrace_unlink(current);
325 	/* Reparent to init */
326 	current->real_parent = current->parent = kthreadd_task;
327 	list_move_tail(&current->sibling, &current->real_parent->children);
328 
329 	/* Set the exit signal to SIGCHLD so we signal init on exit */
330 	current->exit_signal = SIGCHLD;
331 
332 	if (task_nice(current) < 0)
333 		set_user_nice(current, 0);
334 	/* cpus_allowed? */
335 	/* rt_priority? */
336 	/* signals? */
337 	security_task_reparent_to_init(current);
338 	memcpy(current->signal->rlim, init_task.signal->rlim,
339 	       sizeof(current->signal->rlim));
340 	atomic_inc(&(INIT_USER->__count));
341 	write_unlock_irq(&tasklist_lock);
342 	switch_uid(INIT_USER);
343 }
344 
345 void __set_special_pids(struct pid *pid)
346 {
347 	struct task_struct *curr = current->group_leader;
348 	pid_t nr = pid_nr(pid);
349 
350 	if (task_session(curr) != pid) {
351 		change_pid(curr, PIDTYPE_SID, pid);
352 		set_task_session(curr, nr);
353 	}
354 	if (task_pgrp(curr) != pid) {
355 		change_pid(curr, PIDTYPE_PGID, pid);
356 		set_task_pgrp(curr, nr);
357 	}
358 }
359 
360 static void set_special_pids(struct pid *pid)
361 {
362 	write_lock_irq(&tasklist_lock);
363 	__set_special_pids(pid);
364 	write_unlock_irq(&tasklist_lock);
365 }
366 
367 /*
368  * Let kernel threads use this to say that they
369  * allow a certain signal (since daemonize() will
370  * have disabled all of them by default).
371  */
372 int allow_signal(int sig)
373 {
374 	if (!valid_signal(sig) || sig < 1)
375 		return -EINVAL;
376 
377 	spin_lock_irq(&current->sighand->siglock);
378 	sigdelset(&current->blocked, sig);
379 	if (!current->mm) {
380 		/* Kernel threads handle their own signals.
381 		   Let the signal code know it'll be handled, so
382 		   that they don't get converted to SIGKILL or
383 		   just silently dropped */
384 		current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
385 	}
386 	recalc_sigpending();
387 	spin_unlock_irq(&current->sighand->siglock);
388 	return 0;
389 }
390 
391 EXPORT_SYMBOL(allow_signal);
392 
393 int disallow_signal(int sig)
394 {
395 	if (!valid_signal(sig) || sig < 1)
396 		return -EINVAL;
397 
398 	spin_lock_irq(&current->sighand->siglock);
399 	current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;
400 	recalc_sigpending();
401 	spin_unlock_irq(&current->sighand->siglock);
402 	return 0;
403 }
404 
405 EXPORT_SYMBOL(disallow_signal);
406 
407 /*
408  *	Put all the gunge required to become a kernel thread without
409  *	attached user resources in one place where it belongs.
410  */
411 
412 void daemonize(const char *name, ...)
413 {
414 	va_list args;
415 	struct fs_struct *fs;
416 	sigset_t blocked;
417 
418 	va_start(args, name);
419 	vsnprintf(current->comm, sizeof(current->comm), name, args);
420 	va_end(args);
421 
422 	/*
423 	 * If we were started as result of loading a module, close all of the
424 	 * user space pages.  We don't need them, and if we didn't close them
425 	 * they would be locked into memory.
426 	 */
427 	exit_mm(current);
428 	/*
429 	 * We don't want to have TIF_FREEZE set if the system-wide hibernation
430 	 * or suspend transition begins right now.
431 	 */
432 	current->flags |= (PF_NOFREEZE | PF_KTHREAD);
433 
434 	if (current->nsproxy != &init_nsproxy) {
435 		get_nsproxy(&init_nsproxy);
436 		switch_task_namespaces(current, &init_nsproxy);
437 	}
438 	set_special_pids(&init_struct_pid);
439 	proc_clear_tty(current);
440 
441 	/* Block and flush all signals */
442 	sigfillset(&blocked);
443 	sigprocmask(SIG_BLOCK, &blocked, NULL);
444 	flush_signals(current);
445 
446 	/* Become as one with the init task */
447 
448 	exit_fs(current);	/* current->fs->count--; */
449 	fs = init_task.fs;
450 	current->fs = fs;
451 	atomic_inc(&fs->count);
452 
453 	exit_files(current);
454 	current->files = init_task.files;
455 	atomic_inc(&current->files->count);
456 
457 	reparent_to_kthreadd();
458 }
459 
460 EXPORT_SYMBOL(daemonize);
461 
462 static void close_files(struct files_struct * files)
463 {
464 	int i, j;
465 	struct fdtable *fdt;
466 
467 	j = 0;
468 
469 	/*
470 	 * It is safe to dereference the fd table without RCU or
471 	 * ->file_lock because this is the last reference to the
472 	 * files structure.
473 	 */
474 	fdt = files_fdtable(files);
475 	for (;;) {
476 		unsigned long set;
477 		i = j * __NFDBITS;
478 		if (i >= fdt->max_fds)
479 			break;
480 		set = fdt->open_fds->fds_bits[j++];
481 		while (set) {
482 			if (set & 1) {
483 				struct file * file = xchg(&fdt->fd[i], NULL);
484 				if (file) {
485 					filp_close(file, files);
486 					cond_resched();
487 				}
488 			}
489 			i++;
490 			set >>= 1;
491 		}
492 	}
493 }
494 
495 struct files_struct *get_files_struct(struct task_struct *task)
496 {
497 	struct files_struct *files;
498 
499 	task_lock(task);
500 	files = task->files;
501 	if (files)
502 		atomic_inc(&files->count);
503 	task_unlock(task);
504 
505 	return files;
506 }
507 
508 void put_files_struct(struct files_struct *files)
509 {
510 	struct fdtable *fdt;
511 
512 	if (atomic_dec_and_test(&files->count)) {
513 		close_files(files);
514 		/*
515 		 * Free the fd and fdset arrays if we expanded them.
516 		 * If the fdtable was embedded, pass files for freeing
517 		 * at the end of the RCU grace period. Otherwise,
518 		 * you can free files immediately.
519 		 */
520 		fdt = files_fdtable(files);
521 		if (fdt != &files->fdtab)
522 			kmem_cache_free(files_cachep, files);
523 		free_fdtable(fdt);
524 	}
525 }
526 
527 void reset_files_struct(struct files_struct *files)
528 {
529 	struct task_struct *tsk = current;
530 	struct files_struct *old;
531 
532 	old = tsk->files;
533 	task_lock(tsk);
534 	tsk->files = files;
535 	task_unlock(tsk);
536 	put_files_struct(old);
537 }
538 
539 void exit_files(struct task_struct *tsk)
540 {
541 	struct files_struct * files = tsk->files;
542 
543 	if (files) {
544 		task_lock(tsk);
545 		tsk->files = NULL;
546 		task_unlock(tsk);
547 		put_files_struct(files);
548 	}
549 }
550 
551 void put_fs_struct(struct fs_struct *fs)
552 {
553 	/* No need to hold fs->lock if we are killing it */
554 	if (atomic_dec_and_test(&fs->count)) {
555 		path_put(&fs->root);
556 		path_put(&fs->pwd);
557 		kmem_cache_free(fs_cachep, fs);
558 	}
559 }
560 
561 void exit_fs(struct task_struct *tsk)
562 {
563 	struct fs_struct * fs = tsk->fs;
564 
565 	if (fs) {
566 		task_lock(tsk);
567 		tsk->fs = NULL;
568 		task_unlock(tsk);
569 		put_fs_struct(fs);
570 	}
571 }
572 
573 EXPORT_SYMBOL_GPL(exit_fs);
574 
575 #ifdef CONFIG_MM_OWNER
576 /*
577  * Task p is exiting and it owned mm, lets find a new owner for it
578  */
579 static inline int
580 mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
581 {
582 	/*
583 	 * If there are other users of the mm and the owner (us) is exiting
584 	 * we need to find a new owner to take on the responsibility.
585 	 */
586 	if (atomic_read(&mm->mm_users) <= 1)
587 		return 0;
588 	if (mm->owner != p)
589 		return 0;
590 	return 1;
591 }
592 
593 void mm_update_next_owner(struct mm_struct *mm)
594 {
595 	struct task_struct *c, *g, *p = current;
596 
597 retry:
598 	if (!mm_need_new_owner(mm, p))
599 		return;
600 
601 	read_lock(&tasklist_lock);
602 	/*
603 	 * Search in the children
604 	 */
605 	list_for_each_entry(c, &p->children, sibling) {
606 		if (c->mm == mm)
607 			goto assign_new_owner;
608 	}
609 
610 	/*
611 	 * Search in the siblings
612 	 */
613 	list_for_each_entry(c, &p->parent->children, sibling) {
614 		if (c->mm == mm)
615 			goto assign_new_owner;
616 	}
617 
618 	/*
619 	 * Search through everything else. We should not get
620 	 * here often
621 	 */
622 	do_each_thread(g, c) {
623 		if (c->mm == mm)
624 			goto assign_new_owner;
625 	} while_each_thread(g, c);
626 
627 	read_unlock(&tasklist_lock);
628 	/*
629 	 * We found no owner yet mm_users > 1: this implies that we are
630 	 * most likely racing with swapoff (try_to_unuse()) or /proc or
631 	 * ptrace or page migration (get_task_mm()).  Mark owner as NULL,
632 	 * so that subsystems can understand the callback and take action.
633 	 */
634 	down_write(&mm->mmap_sem);
635 	cgroup_mm_owner_callbacks(mm->owner, NULL);
636 	mm->owner = NULL;
637 	up_write(&mm->mmap_sem);
638 	return;
639 
640 assign_new_owner:
641 	BUG_ON(c == p);
642 	get_task_struct(c);
643 	/*
644 	 * The task_lock protects c->mm from changing.
645 	 * We always want mm->owner->mm == mm
646 	 */
647 	task_lock(c);
648 	/*
649 	 * Delay read_unlock() till we have the task_lock()
650 	 * to ensure that c does not slip away underneath us
651 	 */
652 	read_unlock(&tasklist_lock);
653 	if (c->mm != mm) {
654 		task_unlock(c);
655 		put_task_struct(c);
656 		goto retry;
657 	}
658 	cgroup_mm_owner_callbacks(mm->owner, c);
659 	mm->owner = c;
660 	task_unlock(c);
661 	put_task_struct(c);
662 }
663 #endif /* CONFIG_MM_OWNER */
664 
665 /*
666  * Turn us into a lazy TLB process if we
667  * aren't already..
668  */
669 static void exit_mm(struct task_struct * tsk)
670 {
671 	struct mm_struct *mm = tsk->mm;
672 	struct core_state *core_state;
673 
674 	mm_release(tsk, mm);
675 	if (!mm)
676 		return;
677 	/*
678 	 * Serialize with any possible pending coredump.
679 	 * We must hold mmap_sem around checking core_state
680 	 * and clearing tsk->mm.  The core-inducing thread
681 	 * will increment ->nr_threads for each thread in the
682 	 * group with ->mm != NULL.
683 	 */
684 	down_read(&mm->mmap_sem);
685 	core_state = mm->core_state;
686 	if (core_state) {
687 		struct core_thread self;
688 		up_read(&mm->mmap_sem);
689 
690 		self.task = tsk;
691 		self.next = xchg(&core_state->dumper.next, &self);
692 		/*
693 		 * Implies mb(), the result of xchg() must be visible
694 		 * to core_state->dumper.
695 		 */
696 		if (atomic_dec_and_test(&core_state->nr_threads))
697 			complete(&core_state->startup);
698 
699 		for (;;) {
700 			set_task_state(tsk, TASK_UNINTERRUPTIBLE);
701 			if (!self.task) /* see coredump_finish() */
702 				break;
703 			schedule();
704 		}
705 		__set_task_state(tsk, TASK_RUNNING);
706 		down_read(&mm->mmap_sem);
707 	}
708 	atomic_inc(&mm->mm_count);
709 	BUG_ON(mm != tsk->active_mm);
710 	/* more a memory barrier than a real lock */
711 	task_lock(tsk);
712 	tsk->mm = NULL;
713 	up_read(&mm->mmap_sem);
714 	enter_lazy_tlb(mm, current);
715 	/* We don't want this task to be frozen prematurely */
716 	clear_freeze_flag(tsk);
717 	task_unlock(tsk);
718 	mm_update_next_owner(mm);
719 	mmput(mm);
720 }
721 
722 /*
723  * Return nonzero if @parent's children should reap themselves.
724  *
725  * Called with write_lock_irq(&tasklist_lock) held.
726  */
727 static int ignoring_children(struct task_struct *parent)
728 {
729 	int ret;
730 	struct sighand_struct *psig = parent->sighand;
731 	unsigned long flags;
732 	spin_lock_irqsave(&psig->siglock, flags);
733 	ret = (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
734 	       (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT));
735 	spin_unlock_irqrestore(&psig->siglock, flags);
736 	return ret;
737 }
738 
739 /*
740  * Detach all tasks we were using ptrace on.
741  * Any that need to be release_task'd are put on the @dead list.
742  *
743  * Called with write_lock(&tasklist_lock) held.
744  */
745 static void ptrace_exit(struct task_struct *parent, struct list_head *dead)
746 {
747 	struct task_struct *p, *n;
748 	int ign = -1;
749 
750 	list_for_each_entry_safe(p, n, &parent->ptraced, ptrace_entry) {
751 		__ptrace_unlink(p);
752 
753 		if (p->exit_state != EXIT_ZOMBIE)
754 			continue;
755 
756 		/*
757 		 * If it's a zombie, our attachedness prevented normal
758 		 * parent notification or self-reaping.  Do notification
759 		 * now if it would have happened earlier.  If it should
760 		 * reap itself, add it to the @dead list.  We can't call
761 		 * release_task() here because we already hold tasklist_lock.
762 		 *
763 		 * If it's our own child, there is no notification to do.
764 		 * But if our normal children self-reap, then this child
765 		 * was prevented by ptrace and we must reap it now.
766 		 */
767 		if (!task_detached(p) && thread_group_empty(p)) {
768 			if (!same_thread_group(p->real_parent, parent))
769 				do_notify_parent(p, p->exit_signal);
770 			else {
771 				if (ign < 0)
772 					ign = ignoring_children(parent);
773 				if (ign)
774 					p->exit_signal = -1;
775 			}
776 		}
777 
778 		if (task_detached(p)) {
779 			/*
780 			 * Mark it as in the process of being reaped.
781 			 */
782 			p->exit_state = EXIT_DEAD;
783 			list_add(&p->ptrace_entry, dead);
784 		}
785 	}
786 }
787 
788 /*
789  * Finish up exit-time ptrace cleanup.
790  *
791  * Called without locks.
792  */
793 static void ptrace_exit_finish(struct task_struct *parent,
794 			       struct list_head *dead)
795 {
796 	struct task_struct *p, *n;
797 
798 	BUG_ON(!list_empty(&parent->ptraced));
799 
800 	list_for_each_entry_safe(p, n, dead, ptrace_entry) {
801 		list_del_init(&p->ptrace_entry);
802 		release_task(p);
803 	}
804 }
805 
806 static void reparent_thread(struct task_struct *p, struct task_struct *father)
807 {
808 	if (p->pdeath_signal)
809 		/* We already hold the tasklist_lock here.  */
810 		group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
811 
812 	list_move_tail(&p->sibling, &p->real_parent->children);
813 
814 	/* If this is a threaded reparent there is no need to
815 	 * notify anyone anything has happened.
816 	 */
817 	if (same_thread_group(p->real_parent, father))
818 		return;
819 
820 	/* We don't want people slaying init.  */
821 	if (!task_detached(p))
822 		p->exit_signal = SIGCHLD;
823 
824 	/* If we'd notified the old parent about this child's death,
825 	 * also notify the new parent.
826 	 */
827 	if (!ptrace_reparented(p) &&
828 	    p->exit_state == EXIT_ZOMBIE &&
829 	    !task_detached(p) && thread_group_empty(p))
830 		do_notify_parent(p, p->exit_signal);
831 
832 	kill_orphaned_pgrp(p, father);
833 }
834 
835 /*
836  * When we die, we re-parent all our children.
837  * Try to give them to another thread in our thread
838  * group, and if no such member exists, give it to
839  * the child reaper process (ie "init") in our pid
840  * space.
841  */
842 static struct task_struct *find_new_reaper(struct task_struct *father)
843 {
844 	struct pid_namespace *pid_ns = task_active_pid_ns(father);
845 	struct task_struct *thread;
846 
847 	thread = father;
848 	while_each_thread(father, thread) {
849 		if (thread->flags & PF_EXITING)
850 			continue;
851 		if (unlikely(pid_ns->child_reaper == father))
852 			pid_ns->child_reaper = thread;
853 		return thread;
854 	}
855 
856 	if (unlikely(pid_ns->child_reaper == father)) {
857 		write_unlock_irq(&tasklist_lock);
858 		if (unlikely(pid_ns == &init_pid_ns))
859 			panic("Attempted to kill init!");
860 
861 		zap_pid_ns_processes(pid_ns);
862 		write_lock_irq(&tasklist_lock);
863 		/*
864 		 * We can not clear ->child_reaper or leave it alone.
865 		 * There may by stealth EXIT_DEAD tasks on ->children,
866 		 * forget_original_parent() must move them somewhere.
867 		 */
868 		pid_ns->child_reaper = init_pid_ns.child_reaper;
869 	}
870 
871 	return pid_ns->child_reaper;
872 }
873 
874 static void forget_original_parent(struct task_struct *father)
875 {
876 	struct task_struct *p, *n, *reaper;
877 	LIST_HEAD(ptrace_dead);
878 
879 	write_lock_irq(&tasklist_lock);
880 	reaper = find_new_reaper(father);
881 	/*
882 	 * First clean up ptrace if we were using it.
883 	 */
884 	ptrace_exit(father, &ptrace_dead);
885 
886 	list_for_each_entry_safe(p, n, &father->children, sibling) {
887 		p->real_parent = reaper;
888 		if (p->parent == father) {
889 			BUG_ON(p->ptrace);
890 			p->parent = p->real_parent;
891 		}
892 		reparent_thread(p, father);
893 	}
894 
895 	write_unlock_irq(&tasklist_lock);
896 	BUG_ON(!list_empty(&father->children));
897 
898 	ptrace_exit_finish(father, &ptrace_dead);
899 }
900 
901 /*
902  * Send signals to all our closest relatives so that they know
903  * to properly mourn us..
904  */
905 static void exit_notify(struct task_struct *tsk, int group_dead)
906 {
907 	int signal;
908 	void *cookie;
909 
910 	/*
911 	 * This does two things:
912 	 *
913   	 * A.  Make init inherit all the child processes
914 	 * B.  Check to see if any process groups have become orphaned
915 	 *	as a result of our exiting, and if they have any stopped
916 	 *	jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
917 	 */
918 	forget_original_parent(tsk);
919 	exit_task_namespaces(tsk);
920 
921 	write_lock_irq(&tasklist_lock);
922 	if (group_dead)
923 		kill_orphaned_pgrp(tsk->group_leader, NULL);
924 
925 	/* Let father know we died
926 	 *
927 	 * Thread signals are configurable, but you aren't going to use
928 	 * that to send signals to arbitary processes.
929 	 * That stops right now.
930 	 *
931 	 * If the parent exec id doesn't match the exec id we saved
932 	 * when we started then we know the parent has changed security
933 	 * domain.
934 	 *
935 	 * If our self_exec id doesn't match our parent_exec_id then
936 	 * we have changed execution domain as these two values started
937 	 * the same after a fork.
938 	 */
939 	if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
940 	    (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
941 	     tsk->self_exec_id != tsk->parent_exec_id) &&
942 	    !capable(CAP_KILL))
943 		tsk->exit_signal = SIGCHLD;
944 
945 	signal = tracehook_notify_death(tsk, &cookie, group_dead);
946 	if (signal >= 0)
947 		signal = do_notify_parent(tsk, signal);
948 
949 	tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
950 
951 	/* mt-exec, de_thread() is waiting for us */
952 	if (thread_group_leader(tsk) &&
953 	    tsk->signal->group_exit_task &&
954 	    tsk->signal->notify_count < 0)
955 		wake_up_process(tsk->signal->group_exit_task);
956 
957 	write_unlock_irq(&tasklist_lock);
958 
959 	tracehook_report_death(tsk, signal, cookie, group_dead);
960 
961 	/* If the process is dead, release it - nobody will wait for it */
962 	if (signal == DEATH_REAP)
963 		release_task(tsk);
964 }
965 
966 #ifdef CONFIG_DEBUG_STACK_USAGE
967 static void check_stack_usage(void)
968 {
969 	static DEFINE_SPINLOCK(low_water_lock);
970 	static int lowest_to_date = THREAD_SIZE;
971 	unsigned long *n = end_of_stack(current);
972 	unsigned long free;
973 
974 	while (*n == 0)
975 		n++;
976 	free = (unsigned long)n - (unsigned long)end_of_stack(current);
977 
978 	if (free >= lowest_to_date)
979 		return;
980 
981 	spin_lock(&low_water_lock);
982 	if (free < lowest_to_date) {
983 		printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
984 				"left\n",
985 				current->comm, free);
986 		lowest_to_date = free;
987 	}
988 	spin_unlock(&low_water_lock);
989 }
990 #else
991 static inline void check_stack_usage(void) {}
992 #endif
993 
994 NORET_TYPE void do_exit(long code)
995 {
996 	struct task_struct *tsk = current;
997 	int group_dead;
998 
999 	profile_task_exit(tsk);
1000 
1001 	WARN_ON(atomic_read(&tsk->fs_excl));
1002 
1003 	if (unlikely(in_interrupt()))
1004 		panic("Aiee, killing interrupt handler!");
1005 	if (unlikely(!tsk->pid))
1006 		panic("Attempted to kill the idle task!");
1007 
1008 	tracehook_report_exit(&code);
1009 
1010 	/*
1011 	 * We're taking recursive faults here in do_exit. Safest is to just
1012 	 * leave this task alone and wait for reboot.
1013 	 */
1014 	if (unlikely(tsk->flags & PF_EXITING)) {
1015 		printk(KERN_ALERT
1016 			"Fixing recursive fault but reboot is needed!\n");
1017 		/*
1018 		 * We can do this unlocked here. The futex code uses
1019 		 * this flag just to verify whether the pi state
1020 		 * cleanup has been done or not. In the worst case it
1021 		 * loops once more. We pretend that the cleanup was
1022 		 * done as there is no way to return. Either the
1023 		 * OWNER_DIED bit is set by now or we push the blocked
1024 		 * task into the wait for ever nirwana as well.
1025 		 */
1026 		tsk->flags |= PF_EXITPIDONE;
1027 		if (tsk->io_context)
1028 			exit_io_context();
1029 		set_current_state(TASK_UNINTERRUPTIBLE);
1030 		schedule();
1031 	}
1032 
1033 	exit_signals(tsk);  /* sets PF_EXITING */
1034 	/*
1035 	 * tsk->flags are checked in the futex code to protect against
1036 	 * an exiting task cleaning up the robust pi futexes.
1037 	 */
1038 	smp_mb();
1039 	spin_unlock_wait(&tsk->pi_lock);
1040 
1041 	if (unlikely(in_atomic()))
1042 		printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
1043 				current->comm, task_pid_nr(current),
1044 				preempt_count());
1045 
1046 	acct_update_integrals(tsk);
1047 	if (tsk->mm) {
1048 		update_hiwater_rss(tsk->mm);
1049 		update_hiwater_vm(tsk->mm);
1050 	}
1051 	group_dead = atomic_dec_and_test(&tsk->signal->live);
1052 	if (group_dead) {
1053 		hrtimer_cancel(&tsk->signal->real_timer);
1054 		exit_itimers(tsk->signal);
1055 	}
1056 	acct_collect(code, group_dead);
1057 #ifdef CONFIG_FUTEX
1058 	if (unlikely(tsk->robust_list))
1059 		exit_robust_list(tsk);
1060 #ifdef CONFIG_COMPAT
1061 	if (unlikely(tsk->compat_robust_list))
1062 		compat_exit_robust_list(tsk);
1063 #endif
1064 #endif
1065 	if (group_dead)
1066 		tty_audit_exit();
1067 	if (unlikely(tsk->audit_context))
1068 		audit_free(tsk);
1069 
1070 	tsk->exit_code = code;
1071 	taskstats_exit(tsk, group_dead);
1072 
1073 	exit_mm(tsk);
1074 
1075 	if (group_dead)
1076 		acct_process();
1077 	exit_sem(tsk);
1078 	exit_files(tsk);
1079 	exit_fs(tsk);
1080 	check_stack_usage();
1081 	exit_thread();
1082 	cgroup_exit(tsk, 1);
1083 	exit_keys(tsk);
1084 
1085 	if (group_dead && tsk->signal->leader)
1086 		disassociate_ctty(1);
1087 
1088 	module_put(task_thread_info(tsk)->exec_domain->module);
1089 	if (tsk->binfmt)
1090 		module_put(tsk->binfmt->module);
1091 
1092 	proc_exit_connector(tsk);
1093 	exit_notify(tsk, group_dead);
1094 #ifdef CONFIG_NUMA
1095 	mpol_put(tsk->mempolicy);
1096 	tsk->mempolicy = NULL;
1097 #endif
1098 #ifdef CONFIG_FUTEX
1099 	/*
1100 	 * This must happen late, after the PID is not
1101 	 * hashed anymore:
1102 	 */
1103 	if (unlikely(!list_empty(&tsk->pi_state_list)))
1104 		exit_pi_state_list(tsk);
1105 	if (unlikely(current->pi_state_cache))
1106 		kfree(current->pi_state_cache);
1107 #endif
1108 	/*
1109 	 * Make sure we are holding no locks:
1110 	 */
1111 	debug_check_no_locks_held(tsk);
1112 	/*
1113 	 * We can do this unlocked here. The futex code uses this flag
1114 	 * just to verify whether the pi state cleanup has been done
1115 	 * or not. In the worst case it loops once more.
1116 	 */
1117 	tsk->flags |= PF_EXITPIDONE;
1118 
1119 	if (tsk->io_context)
1120 		exit_io_context();
1121 
1122 	if (tsk->splice_pipe)
1123 		__free_pipe_info(tsk->splice_pipe);
1124 
1125 	preempt_disable();
1126 	/* causes final put_task_struct in finish_task_switch(). */
1127 	tsk->state = TASK_DEAD;
1128 
1129 	schedule();
1130 	BUG();
1131 	/* Avoid "noreturn function does return".  */
1132 	for (;;)
1133 		cpu_relax();	/* For when BUG is null */
1134 }
1135 
1136 EXPORT_SYMBOL_GPL(do_exit);
1137 
1138 NORET_TYPE void complete_and_exit(struct completion *comp, long code)
1139 {
1140 	if (comp)
1141 		complete(comp);
1142 
1143 	do_exit(code);
1144 }
1145 
1146 EXPORT_SYMBOL(complete_and_exit);
1147 
1148 asmlinkage long sys_exit(int error_code)
1149 {
1150 	do_exit((error_code&0xff)<<8);
1151 }
1152 
1153 /*
1154  * Take down every thread in the group.  This is called by fatal signals
1155  * as well as by sys_exit_group (below).
1156  */
1157 NORET_TYPE void
1158 do_group_exit(int exit_code)
1159 {
1160 	struct signal_struct *sig = current->signal;
1161 
1162 	BUG_ON(exit_code & 0x80); /* core dumps don't get here */
1163 
1164 	if (signal_group_exit(sig))
1165 		exit_code = sig->group_exit_code;
1166 	else if (!thread_group_empty(current)) {
1167 		struct sighand_struct *const sighand = current->sighand;
1168 		spin_lock_irq(&sighand->siglock);
1169 		if (signal_group_exit(sig))
1170 			/* Another thread got here before we took the lock.  */
1171 			exit_code = sig->group_exit_code;
1172 		else {
1173 			sig->group_exit_code = exit_code;
1174 			sig->flags = SIGNAL_GROUP_EXIT;
1175 			zap_other_threads(current);
1176 		}
1177 		spin_unlock_irq(&sighand->siglock);
1178 	}
1179 
1180 	do_exit(exit_code);
1181 	/* NOTREACHED */
1182 }
1183 
1184 /*
1185  * this kills every thread in the thread group. Note that any externally
1186  * wait4()-ing process will get the correct exit code - even if this
1187  * thread is not the thread group leader.
1188  */
1189 asmlinkage void sys_exit_group(int error_code)
1190 {
1191 	do_group_exit((error_code & 0xff) << 8);
1192 }
1193 
1194 static struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
1195 {
1196 	struct pid *pid = NULL;
1197 	if (type == PIDTYPE_PID)
1198 		pid = task->pids[type].pid;
1199 	else if (type < PIDTYPE_MAX)
1200 		pid = task->group_leader->pids[type].pid;
1201 	return pid;
1202 }
1203 
1204 static int eligible_child(enum pid_type type, struct pid *pid, int options,
1205 			  struct task_struct *p)
1206 {
1207 	int err;
1208 
1209 	if (type < PIDTYPE_MAX) {
1210 		if (task_pid_type(p, type) != pid)
1211 			return 0;
1212 	}
1213 
1214 	/* Wait for all children (clone and not) if __WALL is set;
1215 	 * otherwise, wait for clone children *only* if __WCLONE is
1216 	 * set; otherwise, wait for non-clone children *only*.  (Note:
1217 	 * A "clone" child here is one that reports to its parent
1218 	 * using a signal other than SIGCHLD.) */
1219 	if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
1220 	    && !(options & __WALL))
1221 		return 0;
1222 
1223 	err = security_task_wait(p);
1224 	if (err)
1225 		return err;
1226 
1227 	return 1;
1228 }
1229 
1230 static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
1231 			       int why, int status,
1232 			       struct siginfo __user *infop,
1233 			       struct rusage __user *rusagep)
1234 {
1235 	int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
1236 
1237 	put_task_struct(p);
1238 	if (!retval)
1239 		retval = put_user(SIGCHLD, &infop->si_signo);
1240 	if (!retval)
1241 		retval = put_user(0, &infop->si_errno);
1242 	if (!retval)
1243 		retval = put_user((short)why, &infop->si_code);
1244 	if (!retval)
1245 		retval = put_user(pid, &infop->si_pid);
1246 	if (!retval)
1247 		retval = put_user(uid, &infop->si_uid);
1248 	if (!retval)
1249 		retval = put_user(status, &infop->si_status);
1250 	if (!retval)
1251 		retval = pid;
1252 	return retval;
1253 }
1254 
1255 /*
1256  * Handle sys_wait4 work for one task in state EXIT_ZOMBIE.  We hold
1257  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1258  * the lock and this task is uninteresting.  If we return nonzero, we have
1259  * released the lock and the system call should return.
1260  */
1261 static int wait_task_zombie(struct task_struct *p, int options,
1262 			    struct siginfo __user *infop,
1263 			    int __user *stat_addr, struct rusage __user *ru)
1264 {
1265 	unsigned long state;
1266 	int retval, status, traced;
1267 	pid_t pid = task_pid_vnr(p);
1268 
1269 	if (!likely(options & WEXITED))
1270 		return 0;
1271 
1272 	if (unlikely(options & WNOWAIT)) {
1273 		uid_t uid = p->uid;
1274 		int exit_code = p->exit_code;
1275 		int why, status;
1276 
1277 		get_task_struct(p);
1278 		read_unlock(&tasklist_lock);
1279 		if ((exit_code & 0x7f) == 0) {
1280 			why = CLD_EXITED;
1281 			status = exit_code >> 8;
1282 		} else {
1283 			why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
1284 			status = exit_code & 0x7f;
1285 		}
1286 		return wait_noreap_copyout(p, pid, uid, why,
1287 					   status, infop, ru);
1288 	}
1289 
1290 	/*
1291 	 * Try to move the task's state to DEAD
1292 	 * only one thread is allowed to do this:
1293 	 */
1294 	state = xchg(&p->exit_state, EXIT_DEAD);
1295 	if (state != EXIT_ZOMBIE) {
1296 		BUG_ON(state != EXIT_DEAD);
1297 		return 0;
1298 	}
1299 
1300 	traced = ptrace_reparented(p);
1301 
1302 	if (likely(!traced)) {
1303 		struct signal_struct *psig;
1304 		struct signal_struct *sig;
1305 
1306 		/*
1307 		 * The resource counters for the group leader are in its
1308 		 * own task_struct.  Those for dead threads in the group
1309 		 * are in its signal_struct, as are those for the child
1310 		 * processes it has previously reaped.  All these
1311 		 * accumulate in the parent's signal_struct c* fields.
1312 		 *
1313 		 * We don't bother to take a lock here to protect these
1314 		 * p->signal fields, because they are only touched by
1315 		 * __exit_signal, which runs with tasklist_lock
1316 		 * write-locked anyway, and so is excluded here.  We do
1317 		 * need to protect the access to p->parent->signal fields,
1318 		 * as other threads in the parent group can be right
1319 		 * here reaping other children at the same time.
1320 		 */
1321 		spin_lock_irq(&p->parent->sighand->siglock);
1322 		psig = p->parent->signal;
1323 		sig = p->signal;
1324 		psig->cutime =
1325 			cputime_add(psig->cutime,
1326 			cputime_add(p->utime,
1327 			cputime_add(sig->utime,
1328 				    sig->cutime)));
1329 		psig->cstime =
1330 			cputime_add(psig->cstime,
1331 			cputime_add(p->stime,
1332 			cputime_add(sig->stime,
1333 				    sig->cstime)));
1334 		psig->cgtime =
1335 			cputime_add(psig->cgtime,
1336 			cputime_add(p->gtime,
1337 			cputime_add(sig->gtime,
1338 				    sig->cgtime)));
1339 		psig->cmin_flt +=
1340 			p->min_flt + sig->min_flt + sig->cmin_flt;
1341 		psig->cmaj_flt +=
1342 			p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1343 		psig->cnvcsw +=
1344 			p->nvcsw + sig->nvcsw + sig->cnvcsw;
1345 		psig->cnivcsw +=
1346 			p->nivcsw + sig->nivcsw + sig->cnivcsw;
1347 		psig->cinblock +=
1348 			task_io_get_inblock(p) +
1349 			sig->inblock + sig->cinblock;
1350 		psig->coublock +=
1351 			task_io_get_oublock(p) +
1352 			sig->oublock + sig->coublock;
1353 		task_io_accounting_add(&psig->ioac, &p->ioac);
1354 		task_io_accounting_add(&psig->ioac, &sig->ioac);
1355 		spin_unlock_irq(&p->parent->sighand->siglock);
1356 	}
1357 
1358 	/*
1359 	 * Now we are sure this task is interesting, and no other
1360 	 * thread can reap it because we set its state to EXIT_DEAD.
1361 	 */
1362 	read_unlock(&tasklist_lock);
1363 
1364 	retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1365 	status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1366 		? p->signal->group_exit_code : p->exit_code;
1367 	if (!retval && stat_addr)
1368 		retval = put_user(status, stat_addr);
1369 	if (!retval && infop)
1370 		retval = put_user(SIGCHLD, &infop->si_signo);
1371 	if (!retval && infop)
1372 		retval = put_user(0, &infop->si_errno);
1373 	if (!retval && infop) {
1374 		int why;
1375 
1376 		if ((status & 0x7f) == 0) {
1377 			why = CLD_EXITED;
1378 			status >>= 8;
1379 		} else {
1380 			why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1381 			status &= 0x7f;
1382 		}
1383 		retval = put_user((short)why, &infop->si_code);
1384 		if (!retval)
1385 			retval = put_user(status, &infop->si_status);
1386 	}
1387 	if (!retval && infop)
1388 		retval = put_user(pid, &infop->si_pid);
1389 	if (!retval && infop)
1390 		retval = put_user(p->uid, &infop->si_uid);
1391 	if (!retval)
1392 		retval = pid;
1393 
1394 	if (traced) {
1395 		write_lock_irq(&tasklist_lock);
1396 		/* We dropped tasklist, ptracer could die and untrace */
1397 		ptrace_unlink(p);
1398 		/*
1399 		 * If this is not a detached task, notify the parent.
1400 		 * If it's still not detached after that, don't release
1401 		 * it now.
1402 		 */
1403 		if (!task_detached(p)) {
1404 			do_notify_parent(p, p->exit_signal);
1405 			if (!task_detached(p)) {
1406 				p->exit_state = EXIT_ZOMBIE;
1407 				p = NULL;
1408 			}
1409 		}
1410 		write_unlock_irq(&tasklist_lock);
1411 	}
1412 	if (p != NULL)
1413 		release_task(p);
1414 
1415 	return retval;
1416 }
1417 
1418 /*
1419  * Handle sys_wait4 work for one task in state TASK_STOPPED.  We hold
1420  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1421  * the lock and this task is uninteresting.  If we return nonzero, we have
1422  * released the lock and the system call should return.
1423  */
1424 static int wait_task_stopped(int ptrace, struct task_struct *p,
1425 			     int options, struct siginfo __user *infop,
1426 			     int __user *stat_addr, struct rusage __user *ru)
1427 {
1428 	int retval, exit_code, why;
1429 	uid_t uid = 0; /* unneeded, required by compiler */
1430 	pid_t pid;
1431 
1432 	if (!(options & WUNTRACED))
1433 		return 0;
1434 
1435 	exit_code = 0;
1436 	spin_lock_irq(&p->sighand->siglock);
1437 
1438 	if (unlikely(!task_is_stopped_or_traced(p)))
1439 		goto unlock_sig;
1440 
1441 	if (!ptrace && p->signal->group_stop_count > 0)
1442 		/*
1443 		 * A group stop is in progress and this is the group leader.
1444 		 * We won't report until all threads have stopped.
1445 		 */
1446 		goto unlock_sig;
1447 
1448 	exit_code = p->exit_code;
1449 	if (!exit_code)
1450 		goto unlock_sig;
1451 
1452 	if (!unlikely(options & WNOWAIT))
1453 		p->exit_code = 0;
1454 
1455 	uid = p->uid;
1456 unlock_sig:
1457 	spin_unlock_irq(&p->sighand->siglock);
1458 	if (!exit_code)
1459 		return 0;
1460 
1461 	/*
1462 	 * Now we are pretty sure this task is interesting.
1463 	 * Make sure it doesn't get reaped out from under us while we
1464 	 * give up the lock and then examine it below.  We don't want to
1465 	 * keep holding onto the tasklist_lock while we call getrusage and
1466 	 * possibly take page faults for user memory.
1467 	 */
1468 	get_task_struct(p);
1469 	pid = task_pid_vnr(p);
1470 	why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
1471 	read_unlock(&tasklist_lock);
1472 
1473 	if (unlikely(options & WNOWAIT))
1474 		return wait_noreap_copyout(p, pid, uid,
1475 					   why, exit_code,
1476 					   infop, ru);
1477 
1478 	retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1479 	if (!retval && stat_addr)
1480 		retval = put_user((exit_code << 8) | 0x7f, stat_addr);
1481 	if (!retval && infop)
1482 		retval = put_user(SIGCHLD, &infop->si_signo);
1483 	if (!retval && infop)
1484 		retval = put_user(0, &infop->si_errno);
1485 	if (!retval && infop)
1486 		retval = put_user((short)why, &infop->si_code);
1487 	if (!retval && infop)
1488 		retval = put_user(exit_code, &infop->si_status);
1489 	if (!retval && infop)
1490 		retval = put_user(pid, &infop->si_pid);
1491 	if (!retval && infop)
1492 		retval = put_user(uid, &infop->si_uid);
1493 	if (!retval)
1494 		retval = pid;
1495 	put_task_struct(p);
1496 
1497 	BUG_ON(!retval);
1498 	return retval;
1499 }
1500 
1501 /*
1502  * Handle do_wait work for one task in a live, non-stopped state.
1503  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1504  * the lock and this task is uninteresting.  If we return nonzero, we have
1505  * released the lock and the system call should return.
1506  */
1507 static int wait_task_continued(struct task_struct *p, int options,
1508 			       struct siginfo __user *infop,
1509 			       int __user *stat_addr, struct rusage __user *ru)
1510 {
1511 	int retval;
1512 	pid_t pid;
1513 	uid_t uid;
1514 
1515 	if (!unlikely(options & WCONTINUED))
1516 		return 0;
1517 
1518 	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1519 		return 0;
1520 
1521 	spin_lock_irq(&p->sighand->siglock);
1522 	/* Re-check with the lock held.  */
1523 	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1524 		spin_unlock_irq(&p->sighand->siglock);
1525 		return 0;
1526 	}
1527 	if (!unlikely(options & WNOWAIT))
1528 		p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1529 	spin_unlock_irq(&p->sighand->siglock);
1530 
1531 	pid = task_pid_vnr(p);
1532 	uid = p->uid;
1533 	get_task_struct(p);
1534 	read_unlock(&tasklist_lock);
1535 
1536 	if (!infop) {
1537 		retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1538 		put_task_struct(p);
1539 		if (!retval && stat_addr)
1540 			retval = put_user(0xffff, stat_addr);
1541 		if (!retval)
1542 			retval = pid;
1543 	} else {
1544 		retval = wait_noreap_copyout(p, pid, uid,
1545 					     CLD_CONTINUED, SIGCONT,
1546 					     infop, ru);
1547 		BUG_ON(retval == 0);
1548 	}
1549 
1550 	return retval;
1551 }
1552 
1553 /*
1554  * Consider @p for a wait by @parent.
1555  *
1556  * -ECHILD should be in *@notask_error before the first call.
1557  * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1558  * Returns zero if the search for a child should continue;
1559  * then *@notask_error is 0 if @p is an eligible child,
1560  * or another error from security_task_wait(), or still -ECHILD.
1561  */
1562 static int wait_consider_task(struct task_struct *parent, int ptrace,
1563 			      struct task_struct *p, int *notask_error,
1564 			      enum pid_type type, struct pid *pid, int options,
1565 			      struct siginfo __user *infop,
1566 			      int __user *stat_addr, struct rusage __user *ru)
1567 {
1568 	int ret = eligible_child(type, pid, options, p);
1569 	if (!ret)
1570 		return ret;
1571 
1572 	if (unlikely(ret < 0)) {
1573 		/*
1574 		 * If we have not yet seen any eligible child,
1575 		 * then let this error code replace -ECHILD.
1576 		 * A permission error will give the user a clue
1577 		 * to look for security policy problems, rather
1578 		 * than for mysterious wait bugs.
1579 		 */
1580 		if (*notask_error)
1581 			*notask_error = ret;
1582 	}
1583 
1584 	if (likely(!ptrace) && unlikely(p->ptrace)) {
1585 		/*
1586 		 * This child is hidden by ptrace.
1587 		 * We aren't allowed to see it now, but eventually we will.
1588 		 */
1589 		*notask_error = 0;
1590 		return 0;
1591 	}
1592 
1593 	if (p->exit_state == EXIT_DEAD)
1594 		return 0;
1595 
1596 	/*
1597 	 * We don't reap group leaders with subthreads.
1598 	 */
1599 	if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p))
1600 		return wait_task_zombie(p, options, infop, stat_addr, ru);
1601 
1602 	/*
1603 	 * It's stopped or running now, so it might
1604 	 * later continue, exit, or stop again.
1605 	 */
1606 	*notask_error = 0;
1607 
1608 	if (task_is_stopped_or_traced(p))
1609 		return wait_task_stopped(ptrace, p, options,
1610 					 infop, stat_addr, ru);
1611 
1612 	return wait_task_continued(p, options, infop, stat_addr, ru);
1613 }
1614 
1615 /*
1616  * Do the work of do_wait() for one thread in the group, @tsk.
1617  *
1618  * -ECHILD should be in *@notask_error before the first call.
1619  * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1620  * Returns zero if the search for a child should continue; then
1621  * *@notask_error is 0 if there were any eligible children,
1622  * or another error from security_task_wait(), or still -ECHILD.
1623  */
1624 static int do_wait_thread(struct task_struct *tsk, int *notask_error,
1625 			  enum pid_type type, struct pid *pid, int options,
1626 			  struct siginfo __user *infop, int __user *stat_addr,
1627 			  struct rusage __user *ru)
1628 {
1629 	struct task_struct *p;
1630 
1631 	list_for_each_entry(p, &tsk->children, sibling) {
1632 		/*
1633 		 * Do not consider detached threads.
1634 		 */
1635 		if (!task_detached(p)) {
1636 			int ret = wait_consider_task(tsk, 0, p, notask_error,
1637 						     type, pid, options,
1638 						     infop, stat_addr, ru);
1639 			if (ret)
1640 				return ret;
1641 		}
1642 	}
1643 
1644 	return 0;
1645 }
1646 
1647 static int ptrace_do_wait(struct task_struct *tsk, int *notask_error,
1648 			  enum pid_type type, struct pid *pid, int options,
1649 			  struct siginfo __user *infop, int __user *stat_addr,
1650 			  struct rusage __user *ru)
1651 {
1652 	struct task_struct *p;
1653 
1654 	/*
1655 	 * Traditionally we see ptrace'd stopped tasks regardless of options.
1656 	 */
1657 	options |= WUNTRACED;
1658 
1659 	list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
1660 		int ret = wait_consider_task(tsk, 1, p, notask_error,
1661 					     type, pid, options,
1662 					     infop, stat_addr, ru);
1663 		if (ret)
1664 			return ret;
1665 	}
1666 
1667 	return 0;
1668 }
1669 
1670 static long do_wait(enum pid_type type, struct pid *pid, int options,
1671 		    struct siginfo __user *infop, int __user *stat_addr,
1672 		    struct rusage __user *ru)
1673 {
1674 	DECLARE_WAITQUEUE(wait, current);
1675 	struct task_struct *tsk;
1676 	int retval;
1677 
1678 	add_wait_queue(&current->signal->wait_chldexit,&wait);
1679 repeat:
1680 	/*
1681 	 * If there is nothing that can match our critiera just get out.
1682 	 * We will clear @retval to zero if we see any child that might later
1683 	 * match our criteria, even if we are not able to reap it yet.
1684 	 */
1685 	retval = -ECHILD;
1686 	if ((type < PIDTYPE_MAX) && (!pid || hlist_empty(&pid->tasks[type])))
1687 		goto end;
1688 
1689 	current->state = TASK_INTERRUPTIBLE;
1690 	read_lock(&tasklist_lock);
1691 	tsk = current;
1692 	do {
1693 		int tsk_result = do_wait_thread(tsk, &retval,
1694 						type, pid, options,
1695 						infop, stat_addr, ru);
1696 		if (!tsk_result)
1697 			tsk_result = ptrace_do_wait(tsk, &retval,
1698 						    type, pid, options,
1699 						    infop, stat_addr, ru);
1700 		if (tsk_result) {
1701 			/*
1702 			 * tasklist_lock is unlocked and we have a final result.
1703 			 */
1704 			retval = tsk_result;
1705 			goto end;
1706 		}
1707 
1708 		if (options & __WNOTHREAD)
1709 			break;
1710 		tsk = next_thread(tsk);
1711 		BUG_ON(tsk->signal != current->signal);
1712 	} while (tsk != current);
1713 	read_unlock(&tasklist_lock);
1714 
1715 	if (!retval && !(options & WNOHANG)) {
1716 		retval = -ERESTARTSYS;
1717 		if (!signal_pending(current)) {
1718 			schedule();
1719 			goto repeat;
1720 		}
1721 	}
1722 
1723 end:
1724 	current->state = TASK_RUNNING;
1725 	remove_wait_queue(&current->signal->wait_chldexit,&wait);
1726 	if (infop) {
1727 		if (retval > 0)
1728 			retval = 0;
1729 		else {
1730 			/*
1731 			 * For a WNOHANG return, clear out all the fields
1732 			 * we would set so the user can easily tell the
1733 			 * difference.
1734 			 */
1735 			if (!retval)
1736 				retval = put_user(0, &infop->si_signo);
1737 			if (!retval)
1738 				retval = put_user(0, &infop->si_errno);
1739 			if (!retval)
1740 				retval = put_user(0, &infop->si_code);
1741 			if (!retval)
1742 				retval = put_user(0, &infop->si_pid);
1743 			if (!retval)
1744 				retval = put_user(0, &infop->si_uid);
1745 			if (!retval)
1746 				retval = put_user(0, &infop->si_status);
1747 		}
1748 	}
1749 	return retval;
1750 }
1751 
1752 asmlinkage long sys_waitid(int which, pid_t upid,
1753 			   struct siginfo __user *infop, int options,
1754 			   struct rusage __user *ru)
1755 {
1756 	struct pid *pid = NULL;
1757 	enum pid_type type;
1758 	long ret;
1759 
1760 	if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
1761 		return -EINVAL;
1762 	if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1763 		return -EINVAL;
1764 
1765 	switch (which) {
1766 	case P_ALL:
1767 		type = PIDTYPE_MAX;
1768 		break;
1769 	case P_PID:
1770 		type = PIDTYPE_PID;
1771 		if (upid <= 0)
1772 			return -EINVAL;
1773 		break;
1774 	case P_PGID:
1775 		type = PIDTYPE_PGID;
1776 		if (upid <= 0)
1777 			return -EINVAL;
1778 		break;
1779 	default:
1780 		return -EINVAL;
1781 	}
1782 
1783 	if (type < PIDTYPE_MAX)
1784 		pid = find_get_pid(upid);
1785 	ret = do_wait(type, pid, options, infop, NULL, ru);
1786 	put_pid(pid);
1787 
1788 	/* avoid REGPARM breakage on x86: */
1789 	asmlinkage_protect(5, ret, which, upid, infop, options, ru);
1790 	return ret;
1791 }
1792 
1793 asmlinkage long sys_wait4(pid_t upid, int __user *stat_addr,
1794 			  int options, struct rusage __user *ru)
1795 {
1796 	struct pid *pid = NULL;
1797 	enum pid_type type;
1798 	long ret;
1799 
1800 	if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1801 			__WNOTHREAD|__WCLONE|__WALL))
1802 		return -EINVAL;
1803 
1804 	if (upid == -1)
1805 		type = PIDTYPE_MAX;
1806 	else if (upid < 0) {
1807 		type = PIDTYPE_PGID;
1808 		pid = find_get_pid(-upid);
1809 	} else if (upid == 0) {
1810 		type = PIDTYPE_PGID;
1811 		pid = get_pid(task_pgrp(current));
1812 	} else /* upid > 0 */ {
1813 		type = PIDTYPE_PID;
1814 		pid = find_get_pid(upid);
1815 	}
1816 
1817 	ret = do_wait(type, pid, options | WEXITED, NULL, stat_addr, ru);
1818 	put_pid(pid);
1819 
1820 	/* avoid REGPARM breakage on x86: */
1821 	asmlinkage_protect(4, ret, upid, stat_addr, options, ru);
1822 	return ret;
1823 }
1824 
1825 #ifdef __ARCH_WANT_SYS_WAITPID
1826 
1827 /*
1828  * sys_waitpid() remains for compatibility. waitpid() should be
1829  * implemented by calling sys_wait4() from libc.a.
1830  */
1831 asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options)
1832 {
1833 	return sys_wait4(pid, stat_addr, options, NULL);
1834 }
1835 
1836 #endif
1837