xref: /openbmc/linux/kernel/signal.c (revision 384740dc)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/tracehook.h>
26 #include <linux/capability.h>
27 #include <linux/freezer.h>
28 #include <linux/pid_namespace.h>
29 #include <linux/nsproxy.h>
30 
31 #include <asm/param.h>
32 #include <asm/uaccess.h>
33 #include <asm/unistd.h>
34 #include <asm/siginfo.h>
35 #include "audit.h"	/* audit_signal_info() */
36 
37 /*
38  * SLAB caches for signal bits.
39  */
40 
41 static struct kmem_cache *sigqueue_cachep;
42 
43 static void __user *sig_handler(struct task_struct *t, int sig)
44 {
45 	return t->sighand->action[sig - 1].sa.sa_handler;
46 }
47 
48 static int sig_handler_ignored(void __user *handler, int sig)
49 {
50 	/* Is it explicitly or implicitly ignored? */
51 	return handler == SIG_IGN ||
52 		(handler == SIG_DFL && sig_kernel_ignore(sig));
53 }
54 
55 static int sig_ignored(struct task_struct *t, int sig)
56 {
57 	void __user *handler;
58 
59 	/*
60 	 * Blocked signals are never ignored, since the
61 	 * signal handler may change by the time it is
62 	 * unblocked.
63 	 */
64 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
65 		return 0;
66 
67 	handler = sig_handler(t, sig);
68 	if (!sig_handler_ignored(handler, sig))
69 		return 0;
70 
71 	/*
72 	 * Tracers may want to know about even ignored signals.
73 	 */
74 	return !tracehook_consider_ignored_signal(t, sig, handler);
75 }
76 
77 /*
78  * Re-calculate pending state from the set of locally pending
79  * signals, globally pending signals, and blocked signals.
80  */
81 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
82 {
83 	unsigned long ready;
84 	long i;
85 
86 	switch (_NSIG_WORDS) {
87 	default:
88 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
89 			ready |= signal->sig[i] &~ blocked->sig[i];
90 		break;
91 
92 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
93 		ready |= signal->sig[2] &~ blocked->sig[2];
94 		ready |= signal->sig[1] &~ blocked->sig[1];
95 		ready |= signal->sig[0] &~ blocked->sig[0];
96 		break;
97 
98 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
99 		ready |= signal->sig[0] &~ blocked->sig[0];
100 		break;
101 
102 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
103 	}
104 	return ready !=	0;
105 }
106 
107 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
108 
109 static int recalc_sigpending_tsk(struct task_struct *t)
110 {
111 	if (t->signal->group_stop_count > 0 ||
112 	    PENDING(&t->pending, &t->blocked) ||
113 	    PENDING(&t->signal->shared_pending, &t->blocked)) {
114 		set_tsk_thread_flag(t, TIF_SIGPENDING);
115 		return 1;
116 	}
117 	/*
118 	 * We must never clear the flag in another thread, or in current
119 	 * when it's possible the current syscall is returning -ERESTART*.
120 	 * So we don't clear it here, and only callers who know they should do.
121 	 */
122 	return 0;
123 }
124 
125 /*
126  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
127  * This is superfluous when called on current, the wakeup is a harmless no-op.
128  */
129 void recalc_sigpending_and_wake(struct task_struct *t)
130 {
131 	if (recalc_sigpending_tsk(t))
132 		signal_wake_up(t, 0);
133 }
134 
135 void recalc_sigpending(void)
136 {
137 	if (unlikely(tracehook_force_sigpending()))
138 		set_thread_flag(TIF_SIGPENDING);
139 	else if (!recalc_sigpending_tsk(current) && !freezing(current))
140 		clear_thread_flag(TIF_SIGPENDING);
141 
142 }
143 
144 /* Given the mask, find the first available signal that should be serviced. */
145 
146 int next_signal(struct sigpending *pending, sigset_t *mask)
147 {
148 	unsigned long i, *s, *m, x;
149 	int sig = 0;
150 
151 	s = pending->signal.sig;
152 	m = mask->sig;
153 	switch (_NSIG_WORDS) {
154 	default:
155 		for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
156 			if ((x = *s &~ *m) != 0) {
157 				sig = ffz(~x) + i*_NSIG_BPW + 1;
158 				break;
159 			}
160 		break;
161 
162 	case 2: if ((x = s[0] &~ m[0]) != 0)
163 			sig = 1;
164 		else if ((x = s[1] &~ m[1]) != 0)
165 			sig = _NSIG_BPW + 1;
166 		else
167 			break;
168 		sig += ffz(~x);
169 		break;
170 
171 	case 1: if ((x = *s &~ *m) != 0)
172 			sig = ffz(~x) + 1;
173 		break;
174 	}
175 
176 	return sig;
177 }
178 
179 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
180 					 int override_rlimit)
181 {
182 	struct sigqueue *q = NULL;
183 	struct user_struct *user;
184 
185 	/*
186 	 * In order to avoid problems with "switch_user()", we want to make
187 	 * sure that the compiler doesn't re-load "t->user"
188 	 */
189 	user = t->user;
190 	barrier();
191 	atomic_inc(&user->sigpending);
192 	if (override_rlimit ||
193 	    atomic_read(&user->sigpending) <=
194 			t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
195 		q = kmem_cache_alloc(sigqueue_cachep, flags);
196 	if (unlikely(q == NULL)) {
197 		atomic_dec(&user->sigpending);
198 	} else {
199 		INIT_LIST_HEAD(&q->list);
200 		q->flags = 0;
201 		q->user = get_uid(user);
202 	}
203 	return(q);
204 }
205 
206 static void __sigqueue_free(struct sigqueue *q)
207 {
208 	if (q->flags & SIGQUEUE_PREALLOC)
209 		return;
210 	atomic_dec(&q->user->sigpending);
211 	free_uid(q->user);
212 	kmem_cache_free(sigqueue_cachep, q);
213 }
214 
215 void flush_sigqueue(struct sigpending *queue)
216 {
217 	struct sigqueue *q;
218 
219 	sigemptyset(&queue->signal);
220 	while (!list_empty(&queue->list)) {
221 		q = list_entry(queue->list.next, struct sigqueue , list);
222 		list_del_init(&q->list);
223 		__sigqueue_free(q);
224 	}
225 }
226 
227 /*
228  * Flush all pending signals for a task.
229  */
230 void flush_signals(struct task_struct *t)
231 {
232 	unsigned long flags;
233 
234 	spin_lock_irqsave(&t->sighand->siglock, flags);
235 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
236 	flush_sigqueue(&t->pending);
237 	flush_sigqueue(&t->signal->shared_pending);
238 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
239 }
240 
241 static void __flush_itimer_signals(struct sigpending *pending)
242 {
243 	sigset_t signal, retain;
244 	struct sigqueue *q, *n;
245 
246 	signal = pending->signal;
247 	sigemptyset(&retain);
248 
249 	list_for_each_entry_safe(q, n, &pending->list, list) {
250 		int sig = q->info.si_signo;
251 
252 		if (likely(q->info.si_code != SI_TIMER)) {
253 			sigaddset(&retain, sig);
254 		} else {
255 			sigdelset(&signal, sig);
256 			list_del_init(&q->list);
257 			__sigqueue_free(q);
258 		}
259 	}
260 
261 	sigorsets(&pending->signal, &signal, &retain);
262 }
263 
264 void flush_itimer_signals(void)
265 {
266 	struct task_struct *tsk = current;
267 	unsigned long flags;
268 
269 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
270 	__flush_itimer_signals(&tsk->pending);
271 	__flush_itimer_signals(&tsk->signal->shared_pending);
272 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
273 }
274 
275 void ignore_signals(struct task_struct *t)
276 {
277 	int i;
278 
279 	for (i = 0; i < _NSIG; ++i)
280 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
281 
282 	flush_signals(t);
283 }
284 
285 /*
286  * Flush all handlers for a task.
287  */
288 
289 void
290 flush_signal_handlers(struct task_struct *t, int force_default)
291 {
292 	int i;
293 	struct k_sigaction *ka = &t->sighand->action[0];
294 	for (i = _NSIG ; i != 0 ; i--) {
295 		if (force_default || ka->sa.sa_handler != SIG_IGN)
296 			ka->sa.sa_handler = SIG_DFL;
297 		ka->sa.sa_flags = 0;
298 		sigemptyset(&ka->sa.sa_mask);
299 		ka++;
300 	}
301 }
302 
303 int unhandled_signal(struct task_struct *tsk, int sig)
304 {
305 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
306 	if (is_global_init(tsk))
307 		return 1;
308 	if (handler != SIG_IGN && handler != SIG_DFL)
309 		return 0;
310 	return !tracehook_consider_fatal_signal(tsk, sig, handler);
311 }
312 
313 
314 /* Notify the system that a driver wants to block all signals for this
315  * process, and wants to be notified if any signals at all were to be
316  * sent/acted upon.  If the notifier routine returns non-zero, then the
317  * signal will be acted upon after all.  If the notifier routine returns 0,
318  * then then signal will be blocked.  Only one block per process is
319  * allowed.  priv is a pointer to private data that the notifier routine
320  * can use to determine if the signal should be blocked or not.  */
321 
322 void
323 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
324 {
325 	unsigned long flags;
326 
327 	spin_lock_irqsave(&current->sighand->siglock, flags);
328 	current->notifier_mask = mask;
329 	current->notifier_data = priv;
330 	current->notifier = notifier;
331 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
332 }
333 
334 /* Notify the system that blocking has ended. */
335 
336 void
337 unblock_all_signals(void)
338 {
339 	unsigned long flags;
340 
341 	spin_lock_irqsave(&current->sighand->siglock, flags);
342 	current->notifier = NULL;
343 	current->notifier_data = NULL;
344 	recalc_sigpending();
345 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
346 }
347 
348 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
349 {
350 	struct sigqueue *q, *first = NULL;
351 
352 	/*
353 	 * Collect the siginfo appropriate to this signal.  Check if
354 	 * there is another siginfo for the same signal.
355 	*/
356 	list_for_each_entry(q, &list->list, list) {
357 		if (q->info.si_signo == sig) {
358 			if (first)
359 				goto still_pending;
360 			first = q;
361 		}
362 	}
363 
364 	sigdelset(&list->signal, sig);
365 
366 	if (first) {
367 still_pending:
368 		list_del_init(&first->list);
369 		copy_siginfo(info, &first->info);
370 		__sigqueue_free(first);
371 	} else {
372 		/* Ok, it wasn't in the queue.  This must be
373 		   a fast-pathed signal or we must have been
374 		   out of queue space.  So zero out the info.
375 		 */
376 		info->si_signo = sig;
377 		info->si_errno = 0;
378 		info->si_code = 0;
379 		info->si_pid = 0;
380 		info->si_uid = 0;
381 	}
382 }
383 
384 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
385 			siginfo_t *info)
386 {
387 	int sig = next_signal(pending, mask);
388 
389 	if (sig) {
390 		if (current->notifier) {
391 			if (sigismember(current->notifier_mask, sig)) {
392 				if (!(current->notifier)(current->notifier_data)) {
393 					clear_thread_flag(TIF_SIGPENDING);
394 					return 0;
395 				}
396 			}
397 		}
398 
399 		collect_signal(sig, pending, info);
400 	}
401 
402 	return sig;
403 }
404 
405 /*
406  * Dequeue a signal and return the element to the caller, which is
407  * expected to free it.
408  *
409  * All callers have to hold the siglock.
410  */
411 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
412 {
413 	int signr;
414 
415 	/* We only dequeue private signals from ourselves, we don't let
416 	 * signalfd steal them
417 	 */
418 	signr = __dequeue_signal(&tsk->pending, mask, info);
419 	if (!signr) {
420 		signr = __dequeue_signal(&tsk->signal->shared_pending,
421 					 mask, info);
422 		/*
423 		 * itimer signal ?
424 		 *
425 		 * itimers are process shared and we restart periodic
426 		 * itimers in the signal delivery path to prevent DoS
427 		 * attacks in the high resolution timer case. This is
428 		 * compliant with the old way of self restarting
429 		 * itimers, as the SIGALRM is a legacy signal and only
430 		 * queued once. Changing the restart behaviour to
431 		 * restart the timer in the signal dequeue path is
432 		 * reducing the timer noise on heavy loaded !highres
433 		 * systems too.
434 		 */
435 		if (unlikely(signr == SIGALRM)) {
436 			struct hrtimer *tmr = &tsk->signal->real_timer;
437 
438 			if (!hrtimer_is_queued(tmr) &&
439 			    tsk->signal->it_real_incr.tv64 != 0) {
440 				hrtimer_forward(tmr, tmr->base->get_time(),
441 						tsk->signal->it_real_incr);
442 				hrtimer_restart(tmr);
443 			}
444 		}
445 	}
446 
447 	recalc_sigpending();
448 	if (!signr)
449 		return 0;
450 
451 	if (unlikely(sig_kernel_stop(signr))) {
452 		/*
453 		 * Set a marker that we have dequeued a stop signal.  Our
454 		 * caller might release the siglock and then the pending
455 		 * stop signal it is about to process is no longer in the
456 		 * pending bitmasks, but must still be cleared by a SIGCONT
457 		 * (and overruled by a SIGKILL).  So those cases clear this
458 		 * shared flag after we've set it.  Note that this flag may
459 		 * remain set after the signal we return is ignored or
460 		 * handled.  That doesn't matter because its only purpose
461 		 * is to alert stop-signal processing code when another
462 		 * processor has come along and cleared the flag.
463 		 */
464 		tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
465 	}
466 	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
467 		/*
468 		 * Release the siglock to ensure proper locking order
469 		 * of timer locks outside of siglocks.  Note, we leave
470 		 * irqs disabled here, since the posix-timers code is
471 		 * about to disable them again anyway.
472 		 */
473 		spin_unlock(&tsk->sighand->siglock);
474 		do_schedule_next_timer(info);
475 		spin_lock(&tsk->sighand->siglock);
476 	}
477 	return signr;
478 }
479 
480 /*
481  * Tell a process that it has a new active signal..
482  *
483  * NOTE! we rely on the previous spin_lock to
484  * lock interrupts for us! We can only be called with
485  * "siglock" held, and the local interrupt must
486  * have been disabled when that got acquired!
487  *
488  * No need to set need_resched since signal event passing
489  * goes through ->blocked
490  */
491 void signal_wake_up(struct task_struct *t, int resume)
492 {
493 	unsigned int mask;
494 
495 	set_tsk_thread_flag(t, TIF_SIGPENDING);
496 
497 	/*
498 	 * For SIGKILL, we want to wake it up in the stopped/traced/killable
499 	 * case. We don't check t->state here because there is a race with it
500 	 * executing another processor and just now entering stopped state.
501 	 * By using wake_up_state, we ensure the process will wake up and
502 	 * handle its death signal.
503 	 */
504 	mask = TASK_INTERRUPTIBLE;
505 	if (resume)
506 		mask |= TASK_WAKEKILL;
507 	if (!wake_up_state(t, mask))
508 		kick_process(t);
509 }
510 
511 /*
512  * Remove signals in mask from the pending set and queue.
513  * Returns 1 if any signals were found.
514  *
515  * All callers must be holding the siglock.
516  *
517  * This version takes a sigset mask and looks at all signals,
518  * not just those in the first mask word.
519  */
520 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
521 {
522 	struct sigqueue *q, *n;
523 	sigset_t m;
524 
525 	sigandsets(&m, mask, &s->signal);
526 	if (sigisemptyset(&m))
527 		return 0;
528 
529 	signandsets(&s->signal, &s->signal, mask);
530 	list_for_each_entry_safe(q, n, &s->list, list) {
531 		if (sigismember(mask, q->info.si_signo)) {
532 			list_del_init(&q->list);
533 			__sigqueue_free(q);
534 		}
535 	}
536 	return 1;
537 }
538 /*
539  * Remove signals in mask from the pending set and queue.
540  * Returns 1 if any signals were found.
541  *
542  * All callers must be holding the siglock.
543  */
544 static int rm_from_queue(unsigned long mask, struct sigpending *s)
545 {
546 	struct sigqueue *q, *n;
547 
548 	if (!sigtestsetmask(&s->signal, mask))
549 		return 0;
550 
551 	sigdelsetmask(&s->signal, mask);
552 	list_for_each_entry_safe(q, n, &s->list, list) {
553 		if (q->info.si_signo < SIGRTMIN &&
554 		    (mask & sigmask(q->info.si_signo))) {
555 			list_del_init(&q->list);
556 			__sigqueue_free(q);
557 		}
558 	}
559 	return 1;
560 }
561 
562 /*
563  * Bad permissions for sending the signal
564  */
565 static int check_kill_permission(int sig, struct siginfo *info,
566 				 struct task_struct *t)
567 {
568 	struct pid *sid;
569 	int error;
570 
571 	if (!valid_signal(sig))
572 		return -EINVAL;
573 
574 	if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
575 		return 0;
576 
577 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
578 	if (error)
579 		return error;
580 
581 	if ((current->euid ^ t->suid) && (current->euid ^ t->uid) &&
582 	    (current->uid  ^ t->suid) && (current->uid  ^ t->uid) &&
583 	    !capable(CAP_KILL)) {
584 		switch (sig) {
585 		case SIGCONT:
586 			sid = task_session(t);
587 			/*
588 			 * We don't return the error if sid == NULL. The
589 			 * task was unhashed, the caller must notice this.
590 			 */
591 			if (!sid || sid == task_session(current))
592 				break;
593 		default:
594 			return -EPERM;
595 		}
596 	}
597 
598 	return security_task_kill(t, info, sig, 0);
599 }
600 
601 /*
602  * Handle magic process-wide effects of stop/continue signals. Unlike
603  * the signal actions, these happen immediately at signal-generation
604  * time regardless of blocking, ignoring, or handling.  This does the
605  * actual continuing for SIGCONT, but not the actual stopping for stop
606  * signals. The process stop is done as a signal action for SIG_DFL.
607  *
608  * Returns true if the signal should be actually delivered, otherwise
609  * it should be dropped.
610  */
611 static int prepare_signal(int sig, struct task_struct *p)
612 {
613 	struct signal_struct *signal = p->signal;
614 	struct task_struct *t;
615 
616 	if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
617 		/*
618 		 * The process is in the middle of dying, nothing to do.
619 		 */
620 	} else if (sig_kernel_stop(sig)) {
621 		/*
622 		 * This is a stop signal.  Remove SIGCONT from all queues.
623 		 */
624 		rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
625 		t = p;
626 		do {
627 			rm_from_queue(sigmask(SIGCONT), &t->pending);
628 		} while_each_thread(p, t);
629 	} else if (sig == SIGCONT) {
630 		unsigned int why;
631 		/*
632 		 * Remove all stop signals from all queues,
633 		 * and wake all threads.
634 		 */
635 		rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
636 		t = p;
637 		do {
638 			unsigned int state;
639 			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
640 			/*
641 			 * If there is a handler for SIGCONT, we must make
642 			 * sure that no thread returns to user mode before
643 			 * we post the signal, in case it was the only
644 			 * thread eligible to run the signal handler--then
645 			 * it must not do anything between resuming and
646 			 * running the handler.  With the TIF_SIGPENDING
647 			 * flag set, the thread will pause and acquire the
648 			 * siglock that we hold now and until we've queued
649 			 * the pending signal.
650 			 *
651 			 * Wake up the stopped thread _after_ setting
652 			 * TIF_SIGPENDING
653 			 */
654 			state = __TASK_STOPPED;
655 			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
656 				set_tsk_thread_flag(t, TIF_SIGPENDING);
657 				state |= TASK_INTERRUPTIBLE;
658 			}
659 			wake_up_state(t, state);
660 		} while_each_thread(p, t);
661 
662 		/*
663 		 * Notify the parent with CLD_CONTINUED if we were stopped.
664 		 *
665 		 * If we were in the middle of a group stop, we pretend it
666 		 * was already finished, and then continued. Since SIGCHLD
667 		 * doesn't queue we report only CLD_STOPPED, as if the next
668 		 * CLD_CONTINUED was dropped.
669 		 */
670 		why = 0;
671 		if (signal->flags & SIGNAL_STOP_STOPPED)
672 			why |= SIGNAL_CLD_CONTINUED;
673 		else if (signal->group_stop_count)
674 			why |= SIGNAL_CLD_STOPPED;
675 
676 		if (why) {
677 			/*
678 			 * The first thread which returns from finish_stop()
679 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
680 			 * notify its parent. See get_signal_to_deliver().
681 			 */
682 			signal->flags = why | SIGNAL_STOP_CONTINUED;
683 			signal->group_stop_count = 0;
684 			signal->group_exit_code = 0;
685 		} else {
686 			/*
687 			 * We are not stopped, but there could be a stop
688 			 * signal in the middle of being processed after
689 			 * being removed from the queue.  Clear that too.
690 			 */
691 			signal->flags &= ~SIGNAL_STOP_DEQUEUED;
692 		}
693 	}
694 
695 	return !sig_ignored(p, sig);
696 }
697 
698 /*
699  * Test if P wants to take SIG.  After we've checked all threads with this,
700  * it's equivalent to finding no threads not blocking SIG.  Any threads not
701  * blocking SIG were ruled out because they are not running and already
702  * have pending signals.  Such threads will dequeue from the shared queue
703  * as soon as they're available, so putting the signal on the shared queue
704  * will be equivalent to sending it to one such thread.
705  */
706 static inline int wants_signal(int sig, struct task_struct *p)
707 {
708 	if (sigismember(&p->blocked, sig))
709 		return 0;
710 	if (p->flags & PF_EXITING)
711 		return 0;
712 	if (sig == SIGKILL)
713 		return 1;
714 	if (task_is_stopped_or_traced(p))
715 		return 0;
716 	return task_curr(p) || !signal_pending(p);
717 }
718 
719 static void complete_signal(int sig, struct task_struct *p, int group)
720 {
721 	struct signal_struct *signal = p->signal;
722 	struct task_struct *t;
723 
724 	/*
725 	 * Now find a thread we can wake up to take the signal off the queue.
726 	 *
727 	 * If the main thread wants the signal, it gets first crack.
728 	 * Probably the least surprising to the average bear.
729 	 */
730 	if (wants_signal(sig, p))
731 		t = p;
732 	else if (!group || thread_group_empty(p))
733 		/*
734 		 * There is just one thread and it does not need to be woken.
735 		 * It will dequeue unblocked signals before it runs again.
736 		 */
737 		return;
738 	else {
739 		/*
740 		 * Otherwise try to find a suitable thread.
741 		 */
742 		t = signal->curr_target;
743 		while (!wants_signal(sig, t)) {
744 			t = next_thread(t);
745 			if (t == signal->curr_target)
746 				/*
747 				 * No thread needs to be woken.
748 				 * Any eligible threads will see
749 				 * the signal in the queue soon.
750 				 */
751 				return;
752 		}
753 		signal->curr_target = t;
754 	}
755 
756 	/*
757 	 * Found a killable thread.  If the signal will be fatal,
758 	 * then start taking the whole group down immediately.
759 	 */
760 	if (sig_fatal(p, sig) &&
761 	    !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
762 	    !sigismember(&t->real_blocked, sig) &&
763 	    (sig == SIGKILL ||
764 	     !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) {
765 		/*
766 		 * This signal will be fatal to the whole group.
767 		 */
768 		if (!sig_kernel_coredump(sig)) {
769 			/*
770 			 * Start a group exit and wake everybody up.
771 			 * This way we don't have other threads
772 			 * running and doing things after a slower
773 			 * thread has the fatal signal pending.
774 			 */
775 			signal->flags = SIGNAL_GROUP_EXIT;
776 			signal->group_exit_code = sig;
777 			signal->group_stop_count = 0;
778 			t = p;
779 			do {
780 				sigaddset(&t->pending.signal, SIGKILL);
781 				signal_wake_up(t, 1);
782 			} while_each_thread(p, t);
783 			return;
784 		}
785 	}
786 
787 	/*
788 	 * The signal is already in the shared-pending queue.
789 	 * Tell the chosen thread to wake up and dequeue it.
790 	 */
791 	signal_wake_up(t, sig == SIGKILL);
792 	return;
793 }
794 
795 static inline int legacy_queue(struct sigpending *signals, int sig)
796 {
797 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
798 }
799 
800 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
801 			int group)
802 {
803 	struct sigpending *pending;
804 	struct sigqueue *q;
805 
806 	assert_spin_locked(&t->sighand->siglock);
807 	if (!prepare_signal(sig, t))
808 		return 0;
809 
810 	pending = group ? &t->signal->shared_pending : &t->pending;
811 	/*
812 	 * Short-circuit ignored signals and support queuing
813 	 * exactly one non-rt signal, so that we can get more
814 	 * detailed information about the cause of the signal.
815 	 */
816 	if (legacy_queue(pending, sig))
817 		return 0;
818 	/*
819 	 * fast-pathed signals for kernel-internal things like SIGSTOP
820 	 * or SIGKILL.
821 	 */
822 	if (info == SEND_SIG_FORCED)
823 		goto out_set;
824 
825 	/* Real-time signals must be queued if sent by sigqueue, or
826 	   some other real-time mechanism.  It is implementation
827 	   defined whether kill() does so.  We attempt to do so, on
828 	   the principle of least surprise, but since kill is not
829 	   allowed to fail with EAGAIN when low on memory we just
830 	   make sure at least one signal gets delivered and don't
831 	   pass on the info struct.  */
832 
833 	q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
834 					     (is_si_special(info) ||
835 					      info->si_code >= 0)));
836 	if (q) {
837 		list_add_tail(&q->list, &pending->list);
838 		switch ((unsigned long) info) {
839 		case (unsigned long) SEND_SIG_NOINFO:
840 			q->info.si_signo = sig;
841 			q->info.si_errno = 0;
842 			q->info.si_code = SI_USER;
843 			q->info.si_pid = task_pid_vnr(current);
844 			q->info.si_uid = current->uid;
845 			break;
846 		case (unsigned long) SEND_SIG_PRIV:
847 			q->info.si_signo = sig;
848 			q->info.si_errno = 0;
849 			q->info.si_code = SI_KERNEL;
850 			q->info.si_pid = 0;
851 			q->info.si_uid = 0;
852 			break;
853 		default:
854 			copy_siginfo(&q->info, info);
855 			break;
856 		}
857 	} else if (!is_si_special(info)) {
858 		if (sig >= SIGRTMIN && info->si_code != SI_USER)
859 		/*
860 		 * Queue overflow, abort.  We may abort if the signal was rt
861 		 * and sent by user using something other than kill().
862 		 */
863 			return -EAGAIN;
864 	}
865 
866 out_set:
867 	signalfd_notify(t, sig);
868 	sigaddset(&pending->signal, sig);
869 	complete_signal(sig, t, group);
870 	return 0;
871 }
872 
873 int print_fatal_signals;
874 
875 static void print_fatal_signal(struct pt_regs *regs, int signr)
876 {
877 	printk("%s/%d: potentially unexpected fatal signal %d.\n",
878 		current->comm, task_pid_nr(current), signr);
879 
880 #if defined(__i386__) && !defined(__arch_um__)
881 	printk("code at %08lx: ", regs->ip);
882 	{
883 		int i;
884 		for (i = 0; i < 16; i++) {
885 			unsigned char insn;
886 
887 			__get_user(insn, (unsigned char *)(regs->ip + i));
888 			printk("%02x ", insn);
889 		}
890 	}
891 #endif
892 	printk("\n");
893 	show_regs(regs);
894 }
895 
896 static int __init setup_print_fatal_signals(char *str)
897 {
898 	get_option (&str, &print_fatal_signals);
899 
900 	return 1;
901 }
902 
903 __setup("print-fatal-signals=", setup_print_fatal_signals);
904 
905 int
906 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
907 {
908 	return send_signal(sig, info, p, 1);
909 }
910 
911 static int
912 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
913 {
914 	return send_signal(sig, info, t, 0);
915 }
916 
917 /*
918  * Force a signal that the process can't ignore: if necessary
919  * we unblock the signal and change any SIG_IGN to SIG_DFL.
920  *
921  * Note: If we unblock the signal, we always reset it to SIG_DFL,
922  * since we do not want to have a signal handler that was blocked
923  * be invoked when user space had explicitly blocked it.
924  *
925  * We don't want to have recursive SIGSEGV's etc, for example,
926  * that is why we also clear SIGNAL_UNKILLABLE.
927  */
928 int
929 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
930 {
931 	unsigned long int flags;
932 	int ret, blocked, ignored;
933 	struct k_sigaction *action;
934 
935 	spin_lock_irqsave(&t->sighand->siglock, flags);
936 	action = &t->sighand->action[sig-1];
937 	ignored = action->sa.sa_handler == SIG_IGN;
938 	blocked = sigismember(&t->blocked, sig);
939 	if (blocked || ignored) {
940 		action->sa.sa_handler = SIG_DFL;
941 		if (blocked) {
942 			sigdelset(&t->blocked, sig);
943 			recalc_sigpending_and_wake(t);
944 		}
945 	}
946 	if (action->sa.sa_handler == SIG_DFL)
947 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
948 	ret = specific_send_sig_info(sig, info, t);
949 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
950 
951 	return ret;
952 }
953 
954 void
955 force_sig_specific(int sig, struct task_struct *t)
956 {
957 	force_sig_info(sig, SEND_SIG_FORCED, t);
958 }
959 
960 /*
961  * Nuke all other threads in the group.
962  */
963 void zap_other_threads(struct task_struct *p)
964 {
965 	struct task_struct *t;
966 
967 	p->signal->group_stop_count = 0;
968 
969 	for (t = next_thread(p); t != p; t = next_thread(t)) {
970 		/*
971 		 * Don't bother with already dead threads
972 		 */
973 		if (t->exit_state)
974 			continue;
975 
976 		/* SIGKILL will be handled before any pending SIGSTOP */
977 		sigaddset(&t->pending.signal, SIGKILL);
978 		signal_wake_up(t, 1);
979 	}
980 }
981 
982 int __fatal_signal_pending(struct task_struct *tsk)
983 {
984 	return sigismember(&tsk->pending.signal, SIGKILL);
985 }
986 EXPORT_SYMBOL(__fatal_signal_pending);
987 
988 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
989 {
990 	struct sighand_struct *sighand;
991 
992 	rcu_read_lock();
993 	for (;;) {
994 		sighand = rcu_dereference(tsk->sighand);
995 		if (unlikely(sighand == NULL))
996 			break;
997 
998 		spin_lock_irqsave(&sighand->siglock, *flags);
999 		if (likely(sighand == tsk->sighand))
1000 			break;
1001 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1002 	}
1003 	rcu_read_unlock();
1004 
1005 	return sighand;
1006 }
1007 
1008 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1009 {
1010 	unsigned long flags;
1011 	int ret;
1012 
1013 	ret = check_kill_permission(sig, info, p);
1014 
1015 	if (!ret && sig) {
1016 		ret = -ESRCH;
1017 		if (lock_task_sighand(p, &flags)) {
1018 			ret = __group_send_sig_info(sig, info, p);
1019 			unlock_task_sighand(p, &flags);
1020 		}
1021 	}
1022 
1023 	return ret;
1024 }
1025 
1026 /*
1027  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1028  * control characters do (^C, ^Z etc)
1029  */
1030 
1031 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1032 {
1033 	struct task_struct *p = NULL;
1034 	int retval, success;
1035 
1036 	success = 0;
1037 	retval = -ESRCH;
1038 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1039 		int err = group_send_sig_info(sig, info, p);
1040 		success |= !err;
1041 		retval = err;
1042 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1043 	return success ? 0 : retval;
1044 }
1045 
1046 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1047 {
1048 	int error = -ESRCH;
1049 	struct task_struct *p;
1050 
1051 	rcu_read_lock();
1052 retry:
1053 	p = pid_task(pid, PIDTYPE_PID);
1054 	if (p) {
1055 		error = group_send_sig_info(sig, info, p);
1056 		if (unlikely(error == -ESRCH))
1057 			/*
1058 			 * The task was unhashed in between, try again.
1059 			 * If it is dead, pid_task() will return NULL,
1060 			 * if we race with de_thread() it will find the
1061 			 * new leader.
1062 			 */
1063 			goto retry;
1064 	}
1065 	rcu_read_unlock();
1066 
1067 	return error;
1068 }
1069 
1070 int
1071 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1072 {
1073 	int error;
1074 	rcu_read_lock();
1075 	error = kill_pid_info(sig, info, find_vpid(pid));
1076 	rcu_read_unlock();
1077 	return error;
1078 }
1079 
1080 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1081 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1082 		      uid_t uid, uid_t euid, u32 secid)
1083 {
1084 	int ret = -EINVAL;
1085 	struct task_struct *p;
1086 
1087 	if (!valid_signal(sig))
1088 		return ret;
1089 
1090 	read_lock(&tasklist_lock);
1091 	p = pid_task(pid, PIDTYPE_PID);
1092 	if (!p) {
1093 		ret = -ESRCH;
1094 		goto out_unlock;
1095 	}
1096 	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1097 	    && (euid != p->suid) && (euid != p->uid)
1098 	    && (uid != p->suid) && (uid != p->uid)) {
1099 		ret = -EPERM;
1100 		goto out_unlock;
1101 	}
1102 	ret = security_task_kill(p, info, sig, secid);
1103 	if (ret)
1104 		goto out_unlock;
1105 	if (sig && p->sighand) {
1106 		unsigned long flags;
1107 		spin_lock_irqsave(&p->sighand->siglock, flags);
1108 		ret = __group_send_sig_info(sig, info, p);
1109 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1110 	}
1111 out_unlock:
1112 	read_unlock(&tasklist_lock);
1113 	return ret;
1114 }
1115 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1116 
1117 /*
1118  * kill_something_info() interprets pid in interesting ways just like kill(2).
1119  *
1120  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1121  * is probably wrong.  Should make it like BSD or SYSV.
1122  */
1123 
1124 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1125 {
1126 	int ret;
1127 
1128 	if (pid > 0) {
1129 		rcu_read_lock();
1130 		ret = kill_pid_info(sig, info, find_vpid(pid));
1131 		rcu_read_unlock();
1132 		return ret;
1133 	}
1134 
1135 	read_lock(&tasklist_lock);
1136 	if (pid != -1) {
1137 		ret = __kill_pgrp_info(sig, info,
1138 				pid ? find_vpid(-pid) : task_pgrp(current));
1139 	} else {
1140 		int retval = 0, count = 0;
1141 		struct task_struct * p;
1142 
1143 		for_each_process(p) {
1144 			if (p->pid > 1 && !same_thread_group(p, current)) {
1145 				int err = group_send_sig_info(sig, info, p);
1146 				++count;
1147 				if (err != -EPERM)
1148 					retval = err;
1149 			}
1150 		}
1151 		ret = count ? retval : -ESRCH;
1152 	}
1153 	read_unlock(&tasklist_lock);
1154 
1155 	return ret;
1156 }
1157 
1158 /*
1159  * These are for backward compatibility with the rest of the kernel source.
1160  */
1161 
1162 /*
1163  * The caller must ensure the task can't exit.
1164  */
1165 int
1166 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1167 {
1168 	int ret;
1169 	unsigned long flags;
1170 
1171 	/*
1172 	 * Make sure legacy kernel users don't send in bad values
1173 	 * (normal paths check this in check_kill_permission).
1174 	 */
1175 	if (!valid_signal(sig))
1176 		return -EINVAL;
1177 
1178 	spin_lock_irqsave(&p->sighand->siglock, flags);
1179 	ret = specific_send_sig_info(sig, info, p);
1180 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1181 	return ret;
1182 }
1183 
1184 #define __si_special(priv) \
1185 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1186 
1187 int
1188 send_sig(int sig, struct task_struct *p, int priv)
1189 {
1190 	return send_sig_info(sig, __si_special(priv), p);
1191 }
1192 
1193 void
1194 force_sig(int sig, struct task_struct *p)
1195 {
1196 	force_sig_info(sig, SEND_SIG_PRIV, p);
1197 }
1198 
1199 /*
1200  * When things go south during signal handling, we
1201  * will force a SIGSEGV. And if the signal that caused
1202  * the problem was already a SIGSEGV, we'll want to
1203  * make sure we don't even try to deliver the signal..
1204  */
1205 int
1206 force_sigsegv(int sig, struct task_struct *p)
1207 {
1208 	if (sig == SIGSEGV) {
1209 		unsigned long flags;
1210 		spin_lock_irqsave(&p->sighand->siglock, flags);
1211 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1212 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1213 	}
1214 	force_sig(SIGSEGV, p);
1215 	return 0;
1216 }
1217 
1218 int kill_pgrp(struct pid *pid, int sig, int priv)
1219 {
1220 	int ret;
1221 
1222 	read_lock(&tasklist_lock);
1223 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1224 	read_unlock(&tasklist_lock);
1225 
1226 	return ret;
1227 }
1228 EXPORT_SYMBOL(kill_pgrp);
1229 
1230 int kill_pid(struct pid *pid, int sig, int priv)
1231 {
1232 	return kill_pid_info(sig, __si_special(priv), pid);
1233 }
1234 EXPORT_SYMBOL(kill_pid);
1235 
1236 /*
1237  * These functions support sending signals using preallocated sigqueue
1238  * structures.  This is needed "because realtime applications cannot
1239  * afford to lose notifications of asynchronous events, like timer
1240  * expirations or I/O completions".  In the case of Posix Timers
1241  * we allocate the sigqueue structure from the timer_create.  If this
1242  * allocation fails we are able to report the failure to the application
1243  * with an EAGAIN error.
1244  */
1245 
1246 struct sigqueue *sigqueue_alloc(void)
1247 {
1248 	struct sigqueue *q;
1249 
1250 	if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1251 		q->flags |= SIGQUEUE_PREALLOC;
1252 	return(q);
1253 }
1254 
1255 void sigqueue_free(struct sigqueue *q)
1256 {
1257 	unsigned long flags;
1258 	spinlock_t *lock = &current->sighand->siglock;
1259 
1260 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1261 	/*
1262 	 * We must hold ->siglock while testing q->list
1263 	 * to serialize with collect_signal() or with
1264 	 * __exit_signal()->flush_sigqueue().
1265 	 */
1266 	spin_lock_irqsave(lock, flags);
1267 	q->flags &= ~SIGQUEUE_PREALLOC;
1268 	/*
1269 	 * If it is queued it will be freed when dequeued,
1270 	 * like the "regular" sigqueue.
1271 	 */
1272 	if (!list_empty(&q->list))
1273 		q = NULL;
1274 	spin_unlock_irqrestore(lock, flags);
1275 
1276 	if (q)
1277 		__sigqueue_free(q);
1278 }
1279 
1280 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1281 {
1282 	int sig = q->info.si_signo;
1283 	struct sigpending *pending;
1284 	unsigned long flags;
1285 	int ret;
1286 
1287 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1288 
1289 	ret = -1;
1290 	if (!likely(lock_task_sighand(t, &flags)))
1291 		goto ret;
1292 
1293 	ret = 1; /* the signal is ignored */
1294 	if (!prepare_signal(sig, t))
1295 		goto out;
1296 
1297 	ret = 0;
1298 	if (unlikely(!list_empty(&q->list))) {
1299 		/*
1300 		 * If an SI_TIMER entry is already queue just increment
1301 		 * the overrun count.
1302 		 */
1303 		BUG_ON(q->info.si_code != SI_TIMER);
1304 		q->info.si_overrun++;
1305 		goto out;
1306 	}
1307 	q->info.si_overrun = 0;
1308 
1309 	signalfd_notify(t, sig);
1310 	pending = group ? &t->signal->shared_pending : &t->pending;
1311 	list_add_tail(&q->list, &pending->list);
1312 	sigaddset(&pending->signal, sig);
1313 	complete_signal(sig, t, group);
1314 out:
1315 	unlock_task_sighand(t, &flags);
1316 ret:
1317 	return ret;
1318 }
1319 
1320 /*
1321  * Wake up any threads in the parent blocked in wait* syscalls.
1322  */
1323 static inline void __wake_up_parent(struct task_struct *p,
1324 				    struct task_struct *parent)
1325 {
1326 	wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1327 }
1328 
1329 /*
1330  * Let a parent know about the death of a child.
1331  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1332  *
1333  * Returns -1 if our parent ignored us and so we've switched to
1334  * self-reaping, or else @sig.
1335  */
1336 int do_notify_parent(struct task_struct *tsk, int sig)
1337 {
1338 	struct siginfo info;
1339 	unsigned long flags;
1340 	struct sighand_struct *psig;
1341 	int ret = sig;
1342 
1343 	BUG_ON(sig == -1);
1344 
1345  	/* do_notify_parent_cldstop should have been called instead.  */
1346  	BUG_ON(task_is_stopped_or_traced(tsk));
1347 
1348 	BUG_ON(!tsk->ptrace &&
1349 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1350 
1351 	info.si_signo = sig;
1352 	info.si_errno = 0;
1353 	/*
1354 	 * we are under tasklist_lock here so our parent is tied to
1355 	 * us and cannot exit and release its namespace.
1356 	 *
1357 	 * the only it can is to switch its nsproxy with sys_unshare,
1358 	 * bu uncharing pid namespaces is not allowed, so we'll always
1359 	 * see relevant namespace
1360 	 *
1361 	 * write_lock() currently calls preempt_disable() which is the
1362 	 * same as rcu_read_lock(), but according to Oleg, this is not
1363 	 * correct to rely on this
1364 	 */
1365 	rcu_read_lock();
1366 	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1367 	rcu_read_unlock();
1368 
1369 	info.si_uid = tsk->uid;
1370 
1371 	info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1372 						       tsk->signal->utime));
1373 	info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1374 						       tsk->signal->stime));
1375 
1376 	info.si_status = tsk->exit_code & 0x7f;
1377 	if (tsk->exit_code & 0x80)
1378 		info.si_code = CLD_DUMPED;
1379 	else if (tsk->exit_code & 0x7f)
1380 		info.si_code = CLD_KILLED;
1381 	else {
1382 		info.si_code = CLD_EXITED;
1383 		info.si_status = tsk->exit_code >> 8;
1384 	}
1385 
1386 	psig = tsk->parent->sighand;
1387 	spin_lock_irqsave(&psig->siglock, flags);
1388 	if (!tsk->ptrace && sig == SIGCHLD &&
1389 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1390 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1391 		/*
1392 		 * We are exiting and our parent doesn't care.  POSIX.1
1393 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1394 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1395 		 * automatically and not left for our parent's wait4 call.
1396 		 * Rather than having the parent do it as a magic kind of
1397 		 * signal handler, we just set this to tell do_exit that we
1398 		 * can be cleaned up without becoming a zombie.  Note that
1399 		 * we still call __wake_up_parent in this case, because a
1400 		 * blocked sys_wait4 might now return -ECHILD.
1401 		 *
1402 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1403 		 * is implementation-defined: we do (if you don't want
1404 		 * it, just use SIG_IGN instead).
1405 		 */
1406 		ret = tsk->exit_signal = -1;
1407 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1408 			sig = -1;
1409 	}
1410 	if (valid_signal(sig) && sig > 0)
1411 		__group_send_sig_info(sig, &info, tsk->parent);
1412 	__wake_up_parent(tsk, tsk->parent);
1413 	spin_unlock_irqrestore(&psig->siglock, flags);
1414 
1415 	return ret;
1416 }
1417 
1418 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1419 {
1420 	struct siginfo info;
1421 	unsigned long flags;
1422 	struct task_struct *parent;
1423 	struct sighand_struct *sighand;
1424 
1425 	if (tsk->ptrace & PT_PTRACED)
1426 		parent = tsk->parent;
1427 	else {
1428 		tsk = tsk->group_leader;
1429 		parent = tsk->real_parent;
1430 	}
1431 
1432 	info.si_signo = SIGCHLD;
1433 	info.si_errno = 0;
1434 	/*
1435 	 * see comment in do_notify_parent() abot the following 3 lines
1436 	 */
1437 	rcu_read_lock();
1438 	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1439 	rcu_read_unlock();
1440 
1441 	info.si_uid = tsk->uid;
1442 
1443 	info.si_utime = cputime_to_clock_t(tsk->utime);
1444 	info.si_stime = cputime_to_clock_t(tsk->stime);
1445 
1446  	info.si_code = why;
1447  	switch (why) {
1448  	case CLD_CONTINUED:
1449  		info.si_status = SIGCONT;
1450  		break;
1451  	case CLD_STOPPED:
1452  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1453  		break;
1454  	case CLD_TRAPPED:
1455  		info.si_status = tsk->exit_code & 0x7f;
1456  		break;
1457  	default:
1458  		BUG();
1459  	}
1460 
1461 	sighand = parent->sighand;
1462 	spin_lock_irqsave(&sighand->siglock, flags);
1463 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1464 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1465 		__group_send_sig_info(SIGCHLD, &info, parent);
1466 	/*
1467 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1468 	 */
1469 	__wake_up_parent(tsk, parent);
1470 	spin_unlock_irqrestore(&sighand->siglock, flags);
1471 }
1472 
1473 static inline int may_ptrace_stop(void)
1474 {
1475 	if (!likely(current->ptrace & PT_PTRACED))
1476 		return 0;
1477 	/*
1478 	 * Are we in the middle of do_coredump?
1479 	 * If so and our tracer is also part of the coredump stopping
1480 	 * is a deadlock situation, and pointless because our tracer
1481 	 * is dead so don't allow us to stop.
1482 	 * If SIGKILL was already sent before the caller unlocked
1483 	 * ->siglock we must see ->core_state != NULL. Otherwise it
1484 	 * is safe to enter schedule().
1485 	 */
1486 	if (unlikely(current->mm->core_state) &&
1487 	    unlikely(current->mm == current->parent->mm))
1488 		return 0;
1489 
1490 	return 1;
1491 }
1492 
1493 /*
1494  * Return nonzero if there is a SIGKILL that should be waking us up.
1495  * Called with the siglock held.
1496  */
1497 static int sigkill_pending(struct task_struct *tsk)
1498 {
1499 	return	sigismember(&tsk->pending.signal, SIGKILL) ||
1500 		sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1501 }
1502 
1503 /*
1504  * This must be called with current->sighand->siglock held.
1505  *
1506  * This should be the path for all ptrace stops.
1507  * We always set current->last_siginfo while stopped here.
1508  * That makes it a way to test a stopped process for
1509  * being ptrace-stopped vs being job-control-stopped.
1510  *
1511  * If we actually decide not to stop at all because the tracer
1512  * is gone, we keep current->exit_code unless clear_code.
1513  */
1514 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1515 {
1516 	if (arch_ptrace_stop_needed(exit_code, info)) {
1517 		/*
1518 		 * The arch code has something special to do before a
1519 		 * ptrace stop.  This is allowed to block, e.g. for faults
1520 		 * on user stack pages.  We can't keep the siglock while
1521 		 * calling arch_ptrace_stop, so we must release it now.
1522 		 * To preserve proper semantics, we must do this before
1523 		 * any signal bookkeeping like checking group_stop_count.
1524 		 * Meanwhile, a SIGKILL could come in before we retake the
1525 		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
1526 		 * So after regaining the lock, we must check for SIGKILL.
1527 		 */
1528 		spin_unlock_irq(&current->sighand->siglock);
1529 		arch_ptrace_stop(exit_code, info);
1530 		spin_lock_irq(&current->sighand->siglock);
1531 		if (sigkill_pending(current))
1532 			return;
1533 	}
1534 
1535 	/*
1536 	 * If there is a group stop in progress,
1537 	 * we must participate in the bookkeeping.
1538 	 */
1539 	if (current->signal->group_stop_count > 0)
1540 		--current->signal->group_stop_count;
1541 
1542 	current->last_siginfo = info;
1543 	current->exit_code = exit_code;
1544 
1545 	/* Let the debugger run.  */
1546 	__set_current_state(TASK_TRACED);
1547 	spin_unlock_irq(&current->sighand->siglock);
1548 	read_lock(&tasklist_lock);
1549 	if (may_ptrace_stop()) {
1550 		do_notify_parent_cldstop(current, CLD_TRAPPED);
1551 		read_unlock(&tasklist_lock);
1552 		schedule();
1553 	} else {
1554 		/*
1555 		 * By the time we got the lock, our tracer went away.
1556 		 * Don't drop the lock yet, another tracer may come.
1557 		 */
1558 		__set_current_state(TASK_RUNNING);
1559 		if (clear_code)
1560 			current->exit_code = 0;
1561 		read_unlock(&tasklist_lock);
1562 	}
1563 
1564 	/*
1565 	 * While in TASK_TRACED, we were considered "frozen enough".
1566 	 * Now that we woke up, it's crucial if we're supposed to be
1567 	 * frozen that we freeze now before running anything substantial.
1568 	 */
1569 	try_to_freeze();
1570 
1571 	/*
1572 	 * We are back.  Now reacquire the siglock before touching
1573 	 * last_siginfo, so that we are sure to have synchronized with
1574 	 * any signal-sending on another CPU that wants to examine it.
1575 	 */
1576 	spin_lock_irq(&current->sighand->siglock);
1577 	current->last_siginfo = NULL;
1578 
1579 	/*
1580 	 * Queued signals ignored us while we were stopped for tracing.
1581 	 * So check for any that we should take before resuming user mode.
1582 	 * This sets TIF_SIGPENDING, but never clears it.
1583 	 */
1584 	recalc_sigpending_tsk(current);
1585 }
1586 
1587 void ptrace_notify(int exit_code)
1588 {
1589 	siginfo_t info;
1590 
1591 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1592 
1593 	memset(&info, 0, sizeof info);
1594 	info.si_signo = SIGTRAP;
1595 	info.si_code = exit_code;
1596 	info.si_pid = task_pid_vnr(current);
1597 	info.si_uid = current->uid;
1598 
1599 	/* Let the debugger run.  */
1600 	spin_lock_irq(&current->sighand->siglock);
1601 	ptrace_stop(exit_code, 1, &info);
1602 	spin_unlock_irq(&current->sighand->siglock);
1603 }
1604 
1605 static void
1606 finish_stop(int stop_count)
1607 {
1608 	/*
1609 	 * If there are no other threads in the group, or if there is
1610 	 * a group stop in progress and we are the last to stop,
1611 	 * report to the parent.  When ptraced, every thread reports itself.
1612 	 */
1613 	if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
1614 		read_lock(&tasklist_lock);
1615 		do_notify_parent_cldstop(current, CLD_STOPPED);
1616 		read_unlock(&tasklist_lock);
1617 	}
1618 
1619 	do {
1620 		schedule();
1621 	} while (try_to_freeze());
1622 	/*
1623 	 * Now we don't run again until continued.
1624 	 */
1625 	current->exit_code = 0;
1626 }
1627 
1628 /*
1629  * This performs the stopping for SIGSTOP and other stop signals.
1630  * We have to stop all threads in the thread group.
1631  * Returns nonzero if we've actually stopped and released the siglock.
1632  * Returns zero if we didn't stop and still hold the siglock.
1633  */
1634 static int do_signal_stop(int signr)
1635 {
1636 	struct signal_struct *sig = current->signal;
1637 	int stop_count;
1638 
1639 	if (sig->group_stop_count > 0) {
1640 		/*
1641 		 * There is a group stop in progress.  We don't need to
1642 		 * start another one.
1643 		 */
1644 		stop_count = --sig->group_stop_count;
1645 	} else {
1646 		struct task_struct *t;
1647 
1648 		if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1649 		    unlikely(signal_group_exit(sig)))
1650 			return 0;
1651 		/*
1652 		 * There is no group stop already in progress.
1653 		 * We must initiate one now.
1654 		 */
1655 		sig->group_exit_code = signr;
1656 
1657 		stop_count = 0;
1658 		for (t = next_thread(current); t != current; t = next_thread(t))
1659 			/*
1660 			 * Setting state to TASK_STOPPED for a group
1661 			 * stop is always done with the siglock held,
1662 			 * so this check has no races.
1663 			 */
1664 			if (!(t->flags & PF_EXITING) &&
1665 			    !task_is_stopped_or_traced(t)) {
1666 				stop_count++;
1667 				signal_wake_up(t, 0);
1668 			}
1669 		sig->group_stop_count = stop_count;
1670 	}
1671 
1672 	if (stop_count == 0)
1673 		sig->flags = SIGNAL_STOP_STOPPED;
1674 	current->exit_code = sig->group_exit_code;
1675 	__set_current_state(TASK_STOPPED);
1676 
1677 	spin_unlock_irq(&current->sighand->siglock);
1678 	finish_stop(stop_count);
1679 	return 1;
1680 }
1681 
1682 static int ptrace_signal(int signr, siginfo_t *info,
1683 			 struct pt_regs *regs, void *cookie)
1684 {
1685 	if (!(current->ptrace & PT_PTRACED))
1686 		return signr;
1687 
1688 	ptrace_signal_deliver(regs, cookie);
1689 
1690 	/* Let the debugger run.  */
1691 	ptrace_stop(signr, 0, info);
1692 
1693 	/* We're back.  Did the debugger cancel the sig?  */
1694 	signr = current->exit_code;
1695 	if (signr == 0)
1696 		return signr;
1697 
1698 	current->exit_code = 0;
1699 
1700 	/* Update the siginfo structure if the signal has
1701 	   changed.  If the debugger wanted something
1702 	   specific in the siginfo structure then it should
1703 	   have updated *info via PTRACE_SETSIGINFO.  */
1704 	if (signr != info->si_signo) {
1705 		info->si_signo = signr;
1706 		info->si_errno = 0;
1707 		info->si_code = SI_USER;
1708 		info->si_pid = task_pid_vnr(current->parent);
1709 		info->si_uid = current->parent->uid;
1710 	}
1711 
1712 	/* If the (new) signal is now blocked, requeue it.  */
1713 	if (sigismember(&current->blocked, signr)) {
1714 		specific_send_sig_info(signr, info, current);
1715 		signr = 0;
1716 	}
1717 
1718 	return signr;
1719 }
1720 
1721 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1722 			  struct pt_regs *regs, void *cookie)
1723 {
1724 	struct sighand_struct *sighand = current->sighand;
1725 	struct signal_struct *signal = current->signal;
1726 	int signr;
1727 
1728 relock:
1729 	/*
1730 	 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1731 	 * While in TASK_STOPPED, we were considered "frozen enough".
1732 	 * Now that we woke up, it's crucial if we're supposed to be
1733 	 * frozen that we freeze now before running anything substantial.
1734 	 */
1735 	try_to_freeze();
1736 
1737 	spin_lock_irq(&sighand->siglock);
1738 	/*
1739 	 * Every stopped thread goes here after wakeup. Check to see if
1740 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
1741 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1742 	 */
1743 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1744 		int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1745 				? CLD_CONTINUED : CLD_STOPPED;
1746 		signal->flags &= ~SIGNAL_CLD_MASK;
1747 		spin_unlock_irq(&sighand->siglock);
1748 
1749 		if (unlikely(!tracehook_notify_jctl(1, why)))
1750 			goto relock;
1751 
1752 		read_lock(&tasklist_lock);
1753 		do_notify_parent_cldstop(current->group_leader, why);
1754 		read_unlock(&tasklist_lock);
1755 		goto relock;
1756 	}
1757 
1758 	for (;;) {
1759 		struct k_sigaction *ka;
1760 
1761 		if (unlikely(signal->group_stop_count > 0) &&
1762 		    do_signal_stop(0))
1763 			goto relock;
1764 
1765 		/*
1766 		 * Tracing can induce an artifical signal and choose sigaction.
1767 		 * The return value in @signr determines the default action,
1768 		 * but @info->si_signo is the signal number we will report.
1769 		 */
1770 		signr = tracehook_get_signal(current, regs, info, return_ka);
1771 		if (unlikely(signr < 0))
1772 			goto relock;
1773 		if (unlikely(signr != 0))
1774 			ka = return_ka;
1775 		else {
1776 			signr = dequeue_signal(current, &current->blocked,
1777 					       info);
1778 
1779 			if (!signr)
1780 				break; /* will return 0 */
1781 
1782 			if (signr != SIGKILL) {
1783 				signr = ptrace_signal(signr, info,
1784 						      regs, cookie);
1785 				if (!signr)
1786 					continue;
1787 			}
1788 
1789 			ka = &sighand->action[signr-1];
1790 		}
1791 
1792 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1793 			continue;
1794 		if (ka->sa.sa_handler != SIG_DFL) {
1795 			/* Run the handler.  */
1796 			*return_ka = *ka;
1797 
1798 			if (ka->sa.sa_flags & SA_ONESHOT)
1799 				ka->sa.sa_handler = SIG_DFL;
1800 
1801 			break; /* will return non-zero "signr" value */
1802 		}
1803 
1804 		/*
1805 		 * Now we are doing the default action for this signal.
1806 		 */
1807 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
1808 			continue;
1809 
1810 		/*
1811 		 * Global init gets no signals it doesn't want.
1812 		 */
1813 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1814 		    !signal_group_exit(signal))
1815 			continue;
1816 
1817 		if (sig_kernel_stop(signr)) {
1818 			/*
1819 			 * The default action is to stop all threads in
1820 			 * the thread group.  The job control signals
1821 			 * do nothing in an orphaned pgrp, but SIGSTOP
1822 			 * always works.  Note that siglock needs to be
1823 			 * dropped during the call to is_orphaned_pgrp()
1824 			 * because of lock ordering with tasklist_lock.
1825 			 * This allows an intervening SIGCONT to be posted.
1826 			 * We need to check for that and bail out if necessary.
1827 			 */
1828 			if (signr != SIGSTOP) {
1829 				spin_unlock_irq(&sighand->siglock);
1830 
1831 				/* signals can be posted during this window */
1832 
1833 				if (is_current_pgrp_orphaned())
1834 					goto relock;
1835 
1836 				spin_lock_irq(&sighand->siglock);
1837 			}
1838 
1839 			if (likely(do_signal_stop(info->si_signo))) {
1840 				/* It released the siglock.  */
1841 				goto relock;
1842 			}
1843 
1844 			/*
1845 			 * We didn't actually stop, due to a race
1846 			 * with SIGCONT or something like that.
1847 			 */
1848 			continue;
1849 		}
1850 
1851 		spin_unlock_irq(&sighand->siglock);
1852 
1853 		/*
1854 		 * Anything else is fatal, maybe with a core dump.
1855 		 */
1856 		current->flags |= PF_SIGNALED;
1857 
1858 		if (sig_kernel_coredump(signr)) {
1859 			if (print_fatal_signals)
1860 				print_fatal_signal(regs, info->si_signo);
1861 			/*
1862 			 * If it was able to dump core, this kills all
1863 			 * other threads in the group and synchronizes with
1864 			 * their demise.  If we lost the race with another
1865 			 * thread getting here, it set group_exit_code
1866 			 * first and our do_group_exit call below will use
1867 			 * that value and ignore the one we pass it.
1868 			 */
1869 			do_coredump(info->si_signo, info->si_signo, regs);
1870 		}
1871 
1872 		/*
1873 		 * Death signals, no core dump.
1874 		 */
1875 		do_group_exit(info->si_signo);
1876 		/* NOTREACHED */
1877 	}
1878 	spin_unlock_irq(&sighand->siglock);
1879 	return signr;
1880 }
1881 
1882 void exit_signals(struct task_struct *tsk)
1883 {
1884 	int group_stop = 0;
1885 	struct task_struct *t;
1886 
1887 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1888 		tsk->flags |= PF_EXITING;
1889 		return;
1890 	}
1891 
1892 	spin_lock_irq(&tsk->sighand->siglock);
1893 	/*
1894 	 * From now this task is not visible for group-wide signals,
1895 	 * see wants_signal(), do_signal_stop().
1896 	 */
1897 	tsk->flags |= PF_EXITING;
1898 	if (!signal_pending(tsk))
1899 		goto out;
1900 
1901 	/* It could be that __group_complete_signal() choose us to
1902 	 * notify about group-wide signal. Another thread should be
1903 	 * woken now to take the signal since we will not.
1904 	 */
1905 	for (t = tsk; (t = next_thread(t)) != tsk; )
1906 		if (!signal_pending(t) && !(t->flags & PF_EXITING))
1907 			recalc_sigpending_and_wake(t);
1908 
1909 	if (unlikely(tsk->signal->group_stop_count) &&
1910 			!--tsk->signal->group_stop_count) {
1911 		tsk->signal->flags = SIGNAL_STOP_STOPPED;
1912 		group_stop = 1;
1913 	}
1914 out:
1915 	spin_unlock_irq(&tsk->sighand->siglock);
1916 
1917 	if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) {
1918 		read_lock(&tasklist_lock);
1919 		do_notify_parent_cldstop(tsk, CLD_STOPPED);
1920 		read_unlock(&tasklist_lock);
1921 	}
1922 }
1923 
1924 EXPORT_SYMBOL(recalc_sigpending);
1925 EXPORT_SYMBOL_GPL(dequeue_signal);
1926 EXPORT_SYMBOL(flush_signals);
1927 EXPORT_SYMBOL(force_sig);
1928 EXPORT_SYMBOL(send_sig);
1929 EXPORT_SYMBOL(send_sig_info);
1930 EXPORT_SYMBOL(sigprocmask);
1931 EXPORT_SYMBOL(block_all_signals);
1932 EXPORT_SYMBOL(unblock_all_signals);
1933 
1934 
1935 /*
1936  * System call entry points.
1937  */
1938 
1939 asmlinkage long sys_restart_syscall(void)
1940 {
1941 	struct restart_block *restart = &current_thread_info()->restart_block;
1942 	return restart->fn(restart);
1943 }
1944 
1945 long do_no_restart_syscall(struct restart_block *param)
1946 {
1947 	return -EINTR;
1948 }
1949 
1950 /*
1951  * We don't need to get the kernel lock - this is all local to this
1952  * particular thread.. (and that's good, because this is _heavily_
1953  * used by various programs)
1954  */
1955 
1956 /*
1957  * This is also useful for kernel threads that want to temporarily
1958  * (or permanently) block certain signals.
1959  *
1960  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1961  * interface happily blocks "unblockable" signals like SIGKILL
1962  * and friends.
1963  */
1964 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1965 {
1966 	int error;
1967 
1968 	spin_lock_irq(&current->sighand->siglock);
1969 	if (oldset)
1970 		*oldset = current->blocked;
1971 
1972 	error = 0;
1973 	switch (how) {
1974 	case SIG_BLOCK:
1975 		sigorsets(&current->blocked, &current->blocked, set);
1976 		break;
1977 	case SIG_UNBLOCK:
1978 		signandsets(&current->blocked, &current->blocked, set);
1979 		break;
1980 	case SIG_SETMASK:
1981 		current->blocked = *set;
1982 		break;
1983 	default:
1984 		error = -EINVAL;
1985 	}
1986 	recalc_sigpending();
1987 	spin_unlock_irq(&current->sighand->siglock);
1988 
1989 	return error;
1990 }
1991 
1992 asmlinkage long
1993 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1994 {
1995 	int error = -EINVAL;
1996 	sigset_t old_set, new_set;
1997 
1998 	/* XXX: Don't preclude handling different sized sigset_t's.  */
1999 	if (sigsetsize != sizeof(sigset_t))
2000 		goto out;
2001 
2002 	if (set) {
2003 		error = -EFAULT;
2004 		if (copy_from_user(&new_set, set, sizeof(*set)))
2005 			goto out;
2006 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2007 
2008 		error = sigprocmask(how, &new_set, &old_set);
2009 		if (error)
2010 			goto out;
2011 		if (oset)
2012 			goto set_old;
2013 	} else if (oset) {
2014 		spin_lock_irq(&current->sighand->siglock);
2015 		old_set = current->blocked;
2016 		spin_unlock_irq(&current->sighand->siglock);
2017 
2018 	set_old:
2019 		error = -EFAULT;
2020 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2021 			goto out;
2022 	}
2023 	error = 0;
2024 out:
2025 	return error;
2026 }
2027 
2028 long do_sigpending(void __user *set, unsigned long sigsetsize)
2029 {
2030 	long error = -EINVAL;
2031 	sigset_t pending;
2032 
2033 	if (sigsetsize > sizeof(sigset_t))
2034 		goto out;
2035 
2036 	spin_lock_irq(&current->sighand->siglock);
2037 	sigorsets(&pending, &current->pending.signal,
2038 		  &current->signal->shared_pending.signal);
2039 	spin_unlock_irq(&current->sighand->siglock);
2040 
2041 	/* Outside the lock because only this thread touches it.  */
2042 	sigandsets(&pending, &current->blocked, &pending);
2043 
2044 	error = -EFAULT;
2045 	if (!copy_to_user(set, &pending, sigsetsize))
2046 		error = 0;
2047 
2048 out:
2049 	return error;
2050 }
2051 
2052 asmlinkage long
2053 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2054 {
2055 	return do_sigpending(set, sigsetsize);
2056 }
2057 
2058 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2059 
2060 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2061 {
2062 	int err;
2063 
2064 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2065 		return -EFAULT;
2066 	if (from->si_code < 0)
2067 		return __copy_to_user(to, from, sizeof(siginfo_t))
2068 			? -EFAULT : 0;
2069 	/*
2070 	 * If you change siginfo_t structure, please be sure
2071 	 * this code is fixed accordingly.
2072 	 * Please remember to update the signalfd_copyinfo() function
2073 	 * inside fs/signalfd.c too, in case siginfo_t changes.
2074 	 * It should never copy any pad contained in the structure
2075 	 * to avoid security leaks, but must copy the generic
2076 	 * 3 ints plus the relevant union member.
2077 	 */
2078 	err = __put_user(from->si_signo, &to->si_signo);
2079 	err |= __put_user(from->si_errno, &to->si_errno);
2080 	err |= __put_user((short)from->si_code, &to->si_code);
2081 	switch (from->si_code & __SI_MASK) {
2082 	case __SI_KILL:
2083 		err |= __put_user(from->si_pid, &to->si_pid);
2084 		err |= __put_user(from->si_uid, &to->si_uid);
2085 		break;
2086 	case __SI_TIMER:
2087 		 err |= __put_user(from->si_tid, &to->si_tid);
2088 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2089 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2090 		break;
2091 	case __SI_POLL:
2092 		err |= __put_user(from->si_band, &to->si_band);
2093 		err |= __put_user(from->si_fd, &to->si_fd);
2094 		break;
2095 	case __SI_FAULT:
2096 		err |= __put_user(from->si_addr, &to->si_addr);
2097 #ifdef __ARCH_SI_TRAPNO
2098 		err |= __put_user(from->si_trapno, &to->si_trapno);
2099 #endif
2100 		break;
2101 	case __SI_CHLD:
2102 		err |= __put_user(from->si_pid, &to->si_pid);
2103 		err |= __put_user(from->si_uid, &to->si_uid);
2104 		err |= __put_user(from->si_status, &to->si_status);
2105 		err |= __put_user(from->si_utime, &to->si_utime);
2106 		err |= __put_user(from->si_stime, &to->si_stime);
2107 		break;
2108 	case __SI_RT: /* This is not generated by the kernel as of now. */
2109 	case __SI_MESGQ: /* But this is */
2110 		err |= __put_user(from->si_pid, &to->si_pid);
2111 		err |= __put_user(from->si_uid, &to->si_uid);
2112 		err |= __put_user(from->si_ptr, &to->si_ptr);
2113 		break;
2114 	default: /* this is just in case for now ... */
2115 		err |= __put_user(from->si_pid, &to->si_pid);
2116 		err |= __put_user(from->si_uid, &to->si_uid);
2117 		break;
2118 	}
2119 	return err;
2120 }
2121 
2122 #endif
2123 
2124 asmlinkage long
2125 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2126 		    siginfo_t __user *uinfo,
2127 		    const struct timespec __user *uts,
2128 		    size_t sigsetsize)
2129 {
2130 	int ret, sig;
2131 	sigset_t these;
2132 	struct timespec ts;
2133 	siginfo_t info;
2134 	long timeout = 0;
2135 
2136 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2137 	if (sigsetsize != sizeof(sigset_t))
2138 		return -EINVAL;
2139 
2140 	if (copy_from_user(&these, uthese, sizeof(these)))
2141 		return -EFAULT;
2142 
2143 	/*
2144 	 * Invert the set of allowed signals to get those we
2145 	 * want to block.
2146 	 */
2147 	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2148 	signotset(&these);
2149 
2150 	if (uts) {
2151 		if (copy_from_user(&ts, uts, sizeof(ts)))
2152 			return -EFAULT;
2153 		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2154 		    || ts.tv_sec < 0)
2155 			return -EINVAL;
2156 	}
2157 
2158 	spin_lock_irq(&current->sighand->siglock);
2159 	sig = dequeue_signal(current, &these, &info);
2160 	if (!sig) {
2161 		timeout = MAX_SCHEDULE_TIMEOUT;
2162 		if (uts)
2163 			timeout = (timespec_to_jiffies(&ts)
2164 				   + (ts.tv_sec || ts.tv_nsec));
2165 
2166 		if (timeout) {
2167 			/* None ready -- temporarily unblock those we're
2168 			 * interested while we are sleeping in so that we'll
2169 			 * be awakened when they arrive.  */
2170 			current->real_blocked = current->blocked;
2171 			sigandsets(&current->blocked, &current->blocked, &these);
2172 			recalc_sigpending();
2173 			spin_unlock_irq(&current->sighand->siglock);
2174 
2175 			timeout = schedule_timeout_interruptible(timeout);
2176 
2177 			spin_lock_irq(&current->sighand->siglock);
2178 			sig = dequeue_signal(current, &these, &info);
2179 			current->blocked = current->real_blocked;
2180 			siginitset(&current->real_blocked, 0);
2181 			recalc_sigpending();
2182 		}
2183 	}
2184 	spin_unlock_irq(&current->sighand->siglock);
2185 
2186 	if (sig) {
2187 		ret = sig;
2188 		if (uinfo) {
2189 			if (copy_siginfo_to_user(uinfo, &info))
2190 				ret = -EFAULT;
2191 		}
2192 	} else {
2193 		ret = -EAGAIN;
2194 		if (timeout)
2195 			ret = -EINTR;
2196 	}
2197 
2198 	return ret;
2199 }
2200 
2201 asmlinkage long
2202 sys_kill(pid_t pid, int sig)
2203 {
2204 	struct siginfo info;
2205 
2206 	info.si_signo = sig;
2207 	info.si_errno = 0;
2208 	info.si_code = SI_USER;
2209 	info.si_pid = task_tgid_vnr(current);
2210 	info.si_uid = current->uid;
2211 
2212 	return kill_something_info(sig, &info, pid);
2213 }
2214 
2215 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2216 {
2217 	int error;
2218 	struct siginfo info;
2219 	struct task_struct *p;
2220 	unsigned long flags;
2221 
2222 	error = -ESRCH;
2223 	info.si_signo = sig;
2224 	info.si_errno = 0;
2225 	info.si_code = SI_TKILL;
2226 	info.si_pid = task_tgid_vnr(current);
2227 	info.si_uid = current->uid;
2228 
2229 	rcu_read_lock();
2230 	p = find_task_by_vpid(pid);
2231 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2232 		error = check_kill_permission(sig, &info, p);
2233 		/*
2234 		 * The null signal is a permissions and process existence
2235 		 * probe.  No signal is actually delivered.
2236 		 *
2237 		 * If lock_task_sighand() fails we pretend the task dies
2238 		 * after receiving the signal. The window is tiny, and the
2239 		 * signal is private anyway.
2240 		 */
2241 		if (!error && sig && lock_task_sighand(p, &flags)) {
2242 			error = specific_send_sig_info(sig, &info, p);
2243 			unlock_task_sighand(p, &flags);
2244 		}
2245 	}
2246 	rcu_read_unlock();
2247 
2248 	return error;
2249 }
2250 
2251 /**
2252  *  sys_tgkill - send signal to one specific thread
2253  *  @tgid: the thread group ID of the thread
2254  *  @pid: the PID of the thread
2255  *  @sig: signal to be sent
2256  *
2257  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2258  *  exists but it's not belonging to the target process anymore. This
2259  *  method solves the problem of threads exiting and PIDs getting reused.
2260  */
2261 asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig)
2262 {
2263 	/* This is only valid for single tasks */
2264 	if (pid <= 0 || tgid <= 0)
2265 		return -EINVAL;
2266 
2267 	return do_tkill(tgid, pid, sig);
2268 }
2269 
2270 /*
2271  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2272  */
2273 asmlinkage long
2274 sys_tkill(pid_t pid, int sig)
2275 {
2276 	/* This is only valid for single tasks */
2277 	if (pid <= 0)
2278 		return -EINVAL;
2279 
2280 	return do_tkill(0, pid, sig);
2281 }
2282 
2283 asmlinkage long
2284 sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo)
2285 {
2286 	siginfo_t info;
2287 
2288 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2289 		return -EFAULT;
2290 
2291 	/* Not even root can pretend to send signals from the kernel.
2292 	   Nor can they impersonate a kill(), which adds source info.  */
2293 	if (info.si_code >= 0)
2294 		return -EPERM;
2295 	info.si_signo = sig;
2296 
2297 	/* POSIX.1b doesn't mention process groups.  */
2298 	return kill_proc_info(sig, &info, pid);
2299 }
2300 
2301 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2302 {
2303 	struct task_struct *t = current;
2304 	struct k_sigaction *k;
2305 	sigset_t mask;
2306 
2307 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2308 		return -EINVAL;
2309 
2310 	k = &t->sighand->action[sig-1];
2311 
2312 	spin_lock_irq(&current->sighand->siglock);
2313 	if (oact)
2314 		*oact = *k;
2315 
2316 	if (act) {
2317 		sigdelsetmask(&act->sa.sa_mask,
2318 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2319 		*k = *act;
2320 		/*
2321 		 * POSIX 3.3.1.3:
2322 		 *  "Setting a signal action to SIG_IGN for a signal that is
2323 		 *   pending shall cause the pending signal to be discarded,
2324 		 *   whether or not it is blocked."
2325 		 *
2326 		 *  "Setting a signal action to SIG_DFL for a signal that is
2327 		 *   pending and whose default action is to ignore the signal
2328 		 *   (for example, SIGCHLD), shall cause the pending signal to
2329 		 *   be discarded, whether or not it is blocked"
2330 		 */
2331 		if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2332 			sigemptyset(&mask);
2333 			sigaddset(&mask, sig);
2334 			rm_from_queue_full(&mask, &t->signal->shared_pending);
2335 			do {
2336 				rm_from_queue_full(&mask, &t->pending);
2337 				t = next_thread(t);
2338 			} while (t != current);
2339 		}
2340 	}
2341 
2342 	spin_unlock_irq(&current->sighand->siglock);
2343 	return 0;
2344 }
2345 
2346 int
2347 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2348 {
2349 	stack_t oss;
2350 	int error;
2351 
2352 	if (uoss) {
2353 		oss.ss_sp = (void __user *) current->sas_ss_sp;
2354 		oss.ss_size = current->sas_ss_size;
2355 		oss.ss_flags = sas_ss_flags(sp);
2356 	}
2357 
2358 	if (uss) {
2359 		void __user *ss_sp;
2360 		size_t ss_size;
2361 		int ss_flags;
2362 
2363 		error = -EFAULT;
2364 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2365 		    || __get_user(ss_sp, &uss->ss_sp)
2366 		    || __get_user(ss_flags, &uss->ss_flags)
2367 		    || __get_user(ss_size, &uss->ss_size))
2368 			goto out;
2369 
2370 		error = -EPERM;
2371 		if (on_sig_stack(sp))
2372 			goto out;
2373 
2374 		error = -EINVAL;
2375 		/*
2376 		 *
2377 		 * Note - this code used to test ss_flags incorrectly
2378 		 *  	  old code may have been written using ss_flags==0
2379 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2380 		 *	  way that worked) - this fix preserves that older
2381 		 *	  mechanism
2382 		 */
2383 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2384 			goto out;
2385 
2386 		if (ss_flags == SS_DISABLE) {
2387 			ss_size = 0;
2388 			ss_sp = NULL;
2389 		} else {
2390 			error = -ENOMEM;
2391 			if (ss_size < MINSIGSTKSZ)
2392 				goto out;
2393 		}
2394 
2395 		current->sas_ss_sp = (unsigned long) ss_sp;
2396 		current->sas_ss_size = ss_size;
2397 	}
2398 
2399 	if (uoss) {
2400 		error = -EFAULT;
2401 		if (copy_to_user(uoss, &oss, sizeof(oss)))
2402 			goto out;
2403 	}
2404 
2405 	error = 0;
2406 out:
2407 	return error;
2408 }
2409 
2410 #ifdef __ARCH_WANT_SYS_SIGPENDING
2411 
2412 asmlinkage long
2413 sys_sigpending(old_sigset_t __user *set)
2414 {
2415 	return do_sigpending(set, sizeof(*set));
2416 }
2417 
2418 #endif
2419 
2420 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2421 /* Some platforms have their own version with special arguments others
2422    support only sys_rt_sigprocmask.  */
2423 
2424 asmlinkage long
2425 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2426 {
2427 	int error;
2428 	old_sigset_t old_set, new_set;
2429 
2430 	if (set) {
2431 		error = -EFAULT;
2432 		if (copy_from_user(&new_set, set, sizeof(*set)))
2433 			goto out;
2434 		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2435 
2436 		spin_lock_irq(&current->sighand->siglock);
2437 		old_set = current->blocked.sig[0];
2438 
2439 		error = 0;
2440 		switch (how) {
2441 		default:
2442 			error = -EINVAL;
2443 			break;
2444 		case SIG_BLOCK:
2445 			sigaddsetmask(&current->blocked, new_set);
2446 			break;
2447 		case SIG_UNBLOCK:
2448 			sigdelsetmask(&current->blocked, new_set);
2449 			break;
2450 		case SIG_SETMASK:
2451 			current->blocked.sig[0] = new_set;
2452 			break;
2453 		}
2454 
2455 		recalc_sigpending();
2456 		spin_unlock_irq(&current->sighand->siglock);
2457 		if (error)
2458 			goto out;
2459 		if (oset)
2460 			goto set_old;
2461 	} else if (oset) {
2462 		old_set = current->blocked.sig[0];
2463 	set_old:
2464 		error = -EFAULT;
2465 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2466 			goto out;
2467 	}
2468 	error = 0;
2469 out:
2470 	return error;
2471 }
2472 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2473 
2474 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2475 asmlinkage long
2476 sys_rt_sigaction(int sig,
2477 		 const struct sigaction __user *act,
2478 		 struct sigaction __user *oact,
2479 		 size_t sigsetsize)
2480 {
2481 	struct k_sigaction new_sa, old_sa;
2482 	int ret = -EINVAL;
2483 
2484 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2485 	if (sigsetsize != sizeof(sigset_t))
2486 		goto out;
2487 
2488 	if (act) {
2489 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2490 			return -EFAULT;
2491 	}
2492 
2493 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2494 
2495 	if (!ret && oact) {
2496 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2497 			return -EFAULT;
2498 	}
2499 out:
2500 	return ret;
2501 }
2502 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2503 
2504 #ifdef __ARCH_WANT_SYS_SGETMASK
2505 
2506 /*
2507  * For backwards compatibility.  Functionality superseded by sigprocmask.
2508  */
2509 asmlinkage long
2510 sys_sgetmask(void)
2511 {
2512 	/* SMP safe */
2513 	return current->blocked.sig[0];
2514 }
2515 
2516 asmlinkage long
2517 sys_ssetmask(int newmask)
2518 {
2519 	int old;
2520 
2521 	spin_lock_irq(&current->sighand->siglock);
2522 	old = current->blocked.sig[0];
2523 
2524 	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2525 						  sigmask(SIGSTOP)));
2526 	recalc_sigpending();
2527 	spin_unlock_irq(&current->sighand->siglock);
2528 
2529 	return old;
2530 }
2531 #endif /* __ARCH_WANT_SGETMASK */
2532 
2533 #ifdef __ARCH_WANT_SYS_SIGNAL
2534 /*
2535  * For backwards compatibility.  Functionality superseded by sigaction.
2536  */
2537 asmlinkage unsigned long
2538 sys_signal(int sig, __sighandler_t handler)
2539 {
2540 	struct k_sigaction new_sa, old_sa;
2541 	int ret;
2542 
2543 	new_sa.sa.sa_handler = handler;
2544 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2545 	sigemptyset(&new_sa.sa.sa_mask);
2546 
2547 	ret = do_sigaction(sig, &new_sa, &old_sa);
2548 
2549 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2550 }
2551 #endif /* __ARCH_WANT_SYS_SIGNAL */
2552 
2553 #ifdef __ARCH_WANT_SYS_PAUSE
2554 
2555 asmlinkage long
2556 sys_pause(void)
2557 {
2558 	current->state = TASK_INTERRUPTIBLE;
2559 	schedule();
2560 	return -ERESTARTNOHAND;
2561 }
2562 
2563 #endif
2564 
2565 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2566 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2567 {
2568 	sigset_t newset;
2569 
2570 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2571 	if (sigsetsize != sizeof(sigset_t))
2572 		return -EINVAL;
2573 
2574 	if (copy_from_user(&newset, unewset, sizeof(newset)))
2575 		return -EFAULT;
2576 	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2577 
2578 	spin_lock_irq(&current->sighand->siglock);
2579 	current->saved_sigmask = current->blocked;
2580 	current->blocked = newset;
2581 	recalc_sigpending();
2582 	spin_unlock_irq(&current->sighand->siglock);
2583 
2584 	current->state = TASK_INTERRUPTIBLE;
2585 	schedule();
2586 	set_restore_sigmask();
2587 	return -ERESTARTNOHAND;
2588 }
2589 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2590 
2591 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2592 {
2593 	return NULL;
2594 }
2595 
2596 void __init signals_init(void)
2597 {
2598 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2599 }
2600