xref: /openbmc/linux/kernel/signal.c (revision 9ac8d3fb)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/tracehook.h>
26 #include <linux/capability.h>
27 #include <linux/freezer.h>
28 #include <linux/pid_namespace.h>
29 #include <linux/nsproxy.h>
30 #include <trace/sched.h>
31 
32 #include <asm/param.h>
33 #include <asm/uaccess.h>
34 #include <asm/unistd.h>
35 #include <asm/siginfo.h>
36 #include "audit.h"	/* audit_signal_info() */
37 
38 /*
39  * SLAB caches for signal bits.
40  */
41 
42 static struct kmem_cache *sigqueue_cachep;
43 
44 static void __user *sig_handler(struct task_struct *t, int sig)
45 {
46 	return t->sighand->action[sig - 1].sa.sa_handler;
47 }
48 
49 static int sig_handler_ignored(void __user *handler, int sig)
50 {
51 	/* Is it explicitly or implicitly ignored? */
52 	return handler == SIG_IGN ||
53 		(handler == SIG_DFL && sig_kernel_ignore(sig));
54 }
55 
56 static int sig_ignored(struct task_struct *t, int sig)
57 {
58 	void __user *handler;
59 
60 	/*
61 	 * Blocked signals are never ignored, since the
62 	 * signal handler may change by the time it is
63 	 * unblocked.
64 	 */
65 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
66 		return 0;
67 
68 	handler = sig_handler(t, sig);
69 	if (!sig_handler_ignored(handler, sig))
70 		return 0;
71 
72 	/*
73 	 * Tracers may want to know about even ignored signals.
74 	 */
75 	return !tracehook_consider_ignored_signal(t, sig, handler);
76 }
77 
78 /*
79  * Re-calculate pending state from the set of locally pending
80  * signals, globally pending signals, and blocked signals.
81  */
82 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
83 {
84 	unsigned long ready;
85 	long i;
86 
87 	switch (_NSIG_WORDS) {
88 	default:
89 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
90 			ready |= signal->sig[i] &~ blocked->sig[i];
91 		break;
92 
93 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
94 		ready |= signal->sig[2] &~ blocked->sig[2];
95 		ready |= signal->sig[1] &~ blocked->sig[1];
96 		ready |= signal->sig[0] &~ blocked->sig[0];
97 		break;
98 
99 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
100 		ready |= signal->sig[0] &~ blocked->sig[0];
101 		break;
102 
103 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
104 	}
105 	return ready !=	0;
106 }
107 
108 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
109 
110 static int recalc_sigpending_tsk(struct task_struct *t)
111 {
112 	if (t->signal->group_stop_count > 0 ||
113 	    PENDING(&t->pending, &t->blocked) ||
114 	    PENDING(&t->signal->shared_pending, &t->blocked)) {
115 		set_tsk_thread_flag(t, TIF_SIGPENDING);
116 		return 1;
117 	}
118 	/*
119 	 * We must never clear the flag in another thread, or in current
120 	 * when it's possible the current syscall is returning -ERESTART*.
121 	 * So we don't clear it here, and only callers who know they should do.
122 	 */
123 	return 0;
124 }
125 
126 /*
127  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
128  * This is superfluous when called on current, the wakeup is a harmless no-op.
129  */
130 void recalc_sigpending_and_wake(struct task_struct *t)
131 {
132 	if (recalc_sigpending_tsk(t))
133 		signal_wake_up(t, 0);
134 }
135 
136 void recalc_sigpending(void)
137 {
138 	if (unlikely(tracehook_force_sigpending()))
139 		set_thread_flag(TIF_SIGPENDING);
140 	else if (!recalc_sigpending_tsk(current) && !freezing(current))
141 		clear_thread_flag(TIF_SIGPENDING);
142 
143 }
144 
145 /* Given the mask, find the first available signal that should be serviced. */
146 
147 int next_signal(struct sigpending *pending, sigset_t *mask)
148 {
149 	unsigned long i, *s, *m, x;
150 	int sig = 0;
151 
152 	s = pending->signal.sig;
153 	m = mask->sig;
154 	switch (_NSIG_WORDS) {
155 	default:
156 		for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
157 			if ((x = *s &~ *m) != 0) {
158 				sig = ffz(~x) + i*_NSIG_BPW + 1;
159 				break;
160 			}
161 		break;
162 
163 	case 2: if ((x = s[0] &~ m[0]) != 0)
164 			sig = 1;
165 		else if ((x = s[1] &~ m[1]) != 0)
166 			sig = _NSIG_BPW + 1;
167 		else
168 			break;
169 		sig += ffz(~x);
170 		break;
171 
172 	case 1: if ((x = *s &~ *m) != 0)
173 			sig = ffz(~x) + 1;
174 		break;
175 	}
176 
177 	return sig;
178 }
179 
180 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
181 					 int override_rlimit)
182 {
183 	struct sigqueue *q = NULL;
184 	struct user_struct *user;
185 
186 	/*
187 	 * In order to avoid problems with "switch_user()", we want to make
188 	 * sure that the compiler doesn't re-load "t->user"
189 	 */
190 	user = t->user;
191 	barrier();
192 	atomic_inc(&user->sigpending);
193 	if (override_rlimit ||
194 	    atomic_read(&user->sigpending) <=
195 			t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
196 		q = kmem_cache_alloc(sigqueue_cachep, flags);
197 	if (unlikely(q == NULL)) {
198 		atomic_dec(&user->sigpending);
199 	} else {
200 		INIT_LIST_HEAD(&q->list);
201 		q->flags = 0;
202 		q->user = get_uid(user);
203 	}
204 	return(q);
205 }
206 
207 static void __sigqueue_free(struct sigqueue *q)
208 {
209 	if (q->flags & SIGQUEUE_PREALLOC)
210 		return;
211 	atomic_dec(&q->user->sigpending);
212 	free_uid(q->user);
213 	kmem_cache_free(sigqueue_cachep, q);
214 }
215 
216 void flush_sigqueue(struct sigpending *queue)
217 {
218 	struct sigqueue *q;
219 
220 	sigemptyset(&queue->signal);
221 	while (!list_empty(&queue->list)) {
222 		q = list_entry(queue->list.next, struct sigqueue , list);
223 		list_del_init(&q->list);
224 		__sigqueue_free(q);
225 	}
226 }
227 
228 /*
229  * Flush all pending signals for a task.
230  */
231 void flush_signals(struct task_struct *t)
232 {
233 	unsigned long flags;
234 
235 	spin_lock_irqsave(&t->sighand->siglock, flags);
236 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
237 	flush_sigqueue(&t->pending);
238 	flush_sigqueue(&t->signal->shared_pending);
239 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
240 }
241 
242 static void __flush_itimer_signals(struct sigpending *pending)
243 {
244 	sigset_t signal, retain;
245 	struct sigqueue *q, *n;
246 
247 	signal = pending->signal;
248 	sigemptyset(&retain);
249 
250 	list_for_each_entry_safe(q, n, &pending->list, list) {
251 		int sig = q->info.si_signo;
252 
253 		if (likely(q->info.si_code != SI_TIMER)) {
254 			sigaddset(&retain, sig);
255 		} else {
256 			sigdelset(&signal, sig);
257 			list_del_init(&q->list);
258 			__sigqueue_free(q);
259 		}
260 	}
261 
262 	sigorsets(&pending->signal, &signal, &retain);
263 }
264 
265 void flush_itimer_signals(void)
266 {
267 	struct task_struct *tsk = current;
268 	unsigned long flags;
269 
270 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
271 	__flush_itimer_signals(&tsk->pending);
272 	__flush_itimer_signals(&tsk->signal->shared_pending);
273 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
274 }
275 
276 void ignore_signals(struct task_struct *t)
277 {
278 	int i;
279 
280 	for (i = 0; i < _NSIG; ++i)
281 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
282 
283 	flush_signals(t);
284 }
285 
286 /*
287  * Flush all handlers for a task.
288  */
289 
290 void
291 flush_signal_handlers(struct task_struct *t, int force_default)
292 {
293 	int i;
294 	struct k_sigaction *ka = &t->sighand->action[0];
295 	for (i = _NSIG ; i != 0 ; i--) {
296 		if (force_default || ka->sa.sa_handler != SIG_IGN)
297 			ka->sa.sa_handler = SIG_DFL;
298 		ka->sa.sa_flags = 0;
299 		sigemptyset(&ka->sa.sa_mask);
300 		ka++;
301 	}
302 }
303 
304 int unhandled_signal(struct task_struct *tsk, int sig)
305 {
306 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
307 	if (is_global_init(tsk))
308 		return 1;
309 	if (handler != SIG_IGN && handler != SIG_DFL)
310 		return 0;
311 	return !tracehook_consider_fatal_signal(tsk, sig, handler);
312 }
313 
314 
315 /* Notify the system that a driver wants to block all signals for this
316  * process, and wants to be notified if any signals at all were to be
317  * sent/acted upon.  If the notifier routine returns non-zero, then the
318  * signal will be acted upon after all.  If the notifier routine returns 0,
319  * then then signal will be blocked.  Only one block per process is
320  * allowed.  priv is a pointer to private data that the notifier routine
321  * can use to determine if the signal should be blocked or not.  */
322 
323 void
324 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
325 {
326 	unsigned long flags;
327 
328 	spin_lock_irqsave(&current->sighand->siglock, flags);
329 	current->notifier_mask = mask;
330 	current->notifier_data = priv;
331 	current->notifier = notifier;
332 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
333 }
334 
335 /* Notify the system that blocking has ended. */
336 
337 void
338 unblock_all_signals(void)
339 {
340 	unsigned long flags;
341 
342 	spin_lock_irqsave(&current->sighand->siglock, flags);
343 	current->notifier = NULL;
344 	current->notifier_data = NULL;
345 	recalc_sigpending();
346 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
347 }
348 
349 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
350 {
351 	struct sigqueue *q, *first = NULL;
352 
353 	/*
354 	 * Collect the siginfo appropriate to this signal.  Check if
355 	 * there is another siginfo for the same signal.
356 	*/
357 	list_for_each_entry(q, &list->list, list) {
358 		if (q->info.si_signo == sig) {
359 			if (first)
360 				goto still_pending;
361 			first = q;
362 		}
363 	}
364 
365 	sigdelset(&list->signal, sig);
366 
367 	if (first) {
368 still_pending:
369 		list_del_init(&first->list);
370 		copy_siginfo(info, &first->info);
371 		__sigqueue_free(first);
372 	} else {
373 		/* Ok, it wasn't in the queue.  This must be
374 		   a fast-pathed signal or we must have been
375 		   out of queue space.  So zero out the info.
376 		 */
377 		info->si_signo = sig;
378 		info->si_errno = 0;
379 		info->si_code = 0;
380 		info->si_pid = 0;
381 		info->si_uid = 0;
382 	}
383 }
384 
385 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
386 			siginfo_t *info)
387 {
388 	int sig = next_signal(pending, mask);
389 
390 	if (sig) {
391 		if (current->notifier) {
392 			if (sigismember(current->notifier_mask, sig)) {
393 				if (!(current->notifier)(current->notifier_data)) {
394 					clear_thread_flag(TIF_SIGPENDING);
395 					return 0;
396 				}
397 			}
398 		}
399 
400 		collect_signal(sig, pending, info);
401 	}
402 
403 	return sig;
404 }
405 
406 /*
407  * Dequeue a signal and return the element to the caller, which is
408  * expected to free it.
409  *
410  * All callers have to hold the siglock.
411  */
412 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
413 {
414 	int signr;
415 
416 	/* We only dequeue private signals from ourselves, we don't let
417 	 * signalfd steal them
418 	 */
419 	signr = __dequeue_signal(&tsk->pending, mask, info);
420 	if (!signr) {
421 		signr = __dequeue_signal(&tsk->signal->shared_pending,
422 					 mask, info);
423 		/*
424 		 * itimer signal ?
425 		 *
426 		 * itimers are process shared and we restart periodic
427 		 * itimers in the signal delivery path to prevent DoS
428 		 * attacks in the high resolution timer case. This is
429 		 * compliant with the old way of self restarting
430 		 * itimers, as the SIGALRM is a legacy signal and only
431 		 * queued once. Changing the restart behaviour to
432 		 * restart the timer in the signal dequeue path is
433 		 * reducing the timer noise on heavy loaded !highres
434 		 * systems too.
435 		 */
436 		if (unlikely(signr == SIGALRM)) {
437 			struct hrtimer *tmr = &tsk->signal->real_timer;
438 
439 			if (!hrtimer_is_queued(tmr) &&
440 			    tsk->signal->it_real_incr.tv64 != 0) {
441 				hrtimer_forward(tmr, tmr->base->get_time(),
442 						tsk->signal->it_real_incr);
443 				hrtimer_restart(tmr);
444 			}
445 		}
446 	}
447 
448 	recalc_sigpending();
449 	if (!signr)
450 		return 0;
451 
452 	if (unlikely(sig_kernel_stop(signr))) {
453 		/*
454 		 * Set a marker that we have dequeued a stop signal.  Our
455 		 * caller might release the siglock and then the pending
456 		 * stop signal it is about to process is no longer in the
457 		 * pending bitmasks, but must still be cleared by a SIGCONT
458 		 * (and overruled by a SIGKILL).  So those cases clear this
459 		 * shared flag after we've set it.  Note that this flag may
460 		 * remain set after the signal we return is ignored or
461 		 * handled.  That doesn't matter because its only purpose
462 		 * is to alert stop-signal processing code when another
463 		 * processor has come along and cleared the flag.
464 		 */
465 		tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
466 	}
467 	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
468 		/*
469 		 * Release the siglock to ensure proper locking order
470 		 * of timer locks outside of siglocks.  Note, we leave
471 		 * irqs disabled here, since the posix-timers code is
472 		 * about to disable them again anyway.
473 		 */
474 		spin_unlock(&tsk->sighand->siglock);
475 		do_schedule_next_timer(info);
476 		spin_lock(&tsk->sighand->siglock);
477 	}
478 	return signr;
479 }
480 
481 /*
482  * Tell a process that it has a new active signal..
483  *
484  * NOTE! we rely on the previous spin_lock to
485  * lock interrupts for us! We can only be called with
486  * "siglock" held, and the local interrupt must
487  * have been disabled when that got acquired!
488  *
489  * No need to set need_resched since signal event passing
490  * goes through ->blocked
491  */
492 void signal_wake_up(struct task_struct *t, int resume)
493 {
494 	unsigned int mask;
495 
496 	set_tsk_thread_flag(t, TIF_SIGPENDING);
497 
498 	/*
499 	 * For SIGKILL, we want to wake it up in the stopped/traced/killable
500 	 * case. We don't check t->state here because there is a race with it
501 	 * executing another processor and just now entering stopped state.
502 	 * By using wake_up_state, we ensure the process will wake up and
503 	 * handle its death signal.
504 	 */
505 	mask = TASK_INTERRUPTIBLE;
506 	if (resume)
507 		mask |= TASK_WAKEKILL;
508 	if (!wake_up_state(t, mask))
509 		kick_process(t);
510 }
511 
512 /*
513  * Remove signals in mask from the pending set and queue.
514  * Returns 1 if any signals were found.
515  *
516  * All callers must be holding the siglock.
517  *
518  * This version takes a sigset mask and looks at all signals,
519  * not just those in the first mask word.
520  */
521 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
522 {
523 	struct sigqueue *q, *n;
524 	sigset_t m;
525 
526 	sigandsets(&m, mask, &s->signal);
527 	if (sigisemptyset(&m))
528 		return 0;
529 
530 	signandsets(&s->signal, &s->signal, mask);
531 	list_for_each_entry_safe(q, n, &s->list, list) {
532 		if (sigismember(mask, q->info.si_signo)) {
533 			list_del_init(&q->list);
534 			__sigqueue_free(q);
535 		}
536 	}
537 	return 1;
538 }
539 /*
540  * Remove signals in mask from the pending set and queue.
541  * Returns 1 if any signals were found.
542  *
543  * All callers must be holding the siglock.
544  */
545 static int rm_from_queue(unsigned long mask, struct sigpending *s)
546 {
547 	struct sigqueue *q, *n;
548 
549 	if (!sigtestsetmask(&s->signal, mask))
550 		return 0;
551 
552 	sigdelsetmask(&s->signal, mask);
553 	list_for_each_entry_safe(q, n, &s->list, list) {
554 		if (q->info.si_signo < SIGRTMIN &&
555 		    (mask & sigmask(q->info.si_signo))) {
556 			list_del_init(&q->list);
557 			__sigqueue_free(q);
558 		}
559 	}
560 	return 1;
561 }
562 
563 /*
564  * Bad permissions for sending the signal
565  */
566 static int check_kill_permission(int sig, struct siginfo *info,
567 				 struct task_struct *t)
568 {
569 	struct pid *sid;
570 	int error;
571 
572 	if (!valid_signal(sig))
573 		return -EINVAL;
574 
575 	if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
576 		return 0;
577 
578 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
579 	if (error)
580 		return error;
581 
582 	if ((current->euid ^ t->suid) && (current->euid ^ t->uid) &&
583 	    (current->uid  ^ t->suid) && (current->uid  ^ t->uid) &&
584 	    !capable(CAP_KILL)) {
585 		switch (sig) {
586 		case SIGCONT:
587 			sid = task_session(t);
588 			/*
589 			 * We don't return the error if sid == NULL. The
590 			 * task was unhashed, the caller must notice this.
591 			 */
592 			if (!sid || sid == task_session(current))
593 				break;
594 		default:
595 			return -EPERM;
596 		}
597 	}
598 
599 	return security_task_kill(t, info, sig, 0);
600 }
601 
602 /*
603  * Handle magic process-wide effects of stop/continue signals. Unlike
604  * the signal actions, these happen immediately at signal-generation
605  * time regardless of blocking, ignoring, or handling.  This does the
606  * actual continuing for SIGCONT, but not the actual stopping for stop
607  * signals. The process stop is done as a signal action for SIG_DFL.
608  *
609  * Returns true if the signal should be actually delivered, otherwise
610  * it should be dropped.
611  */
612 static int prepare_signal(int sig, struct task_struct *p)
613 {
614 	struct signal_struct *signal = p->signal;
615 	struct task_struct *t;
616 
617 	if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
618 		/*
619 		 * The process is in the middle of dying, nothing to do.
620 		 */
621 	} else if (sig_kernel_stop(sig)) {
622 		/*
623 		 * This is a stop signal.  Remove SIGCONT from all queues.
624 		 */
625 		rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
626 		t = p;
627 		do {
628 			rm_from_queue(sigmask(SIGCONT), &t->pending);
629 		} while_each_thread(p, t);
630 	} else if (sig == SIGCONT) {
631 		unsigned int why;
632 		/*
633 		 * Remove all stop signals from all queues,
634 		 * and wake all threads.
635 		 */
636 		rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
637 		t = p;
638 		do {
639 			unsigned int state;
640 			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
641 			/*
642 			 * If there is a handler for SIGCONT, we must make
643 			 * sure that no thread returns to user mode before
644 			 * we post the signal, in case it was the only
645 			 * thread eligible to run the signal handler--then
646 			 * it must not do anything between resuming and
647 			 * running the handler.  With the TIF_SIGPENDING
648 			 * flag set, the thread will pause and acquire the
649 			 * siglock that we hold now and until we've queued
650 			 * the pending signal.
651 			 *
652 			 * Wake up the stopped thread _after_ setting
653 			 * TIF_SIGPENDING
654 			 */
655 			state = __TASK_STOPPED;
656 			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
657 				set_tsk_thread_flag(t, TIF_SIGPENDING);
658 				state |= TASK_INTERRUPTIBLE;
659 			}
660 			wake_up_state(t, state);
661 		} while_each_thread(p, t);
662 
663 		/*
664 		 * Notify the parent with CLD_CONTINUED if we were stopped.
665 		 *
666 		 * If we were in the middle of a group stop, we pretend it
667 		 * was already finished, and then continued. Since SIGCHLD
668 		 * doesn't queue we report only CLD_STOPPED, as if the next
669 		 * CLD_CONTINUED was dropped.
670 		 */
671 		why = 0;
672 		if (signal->flags & SIGNAL_STOP_STOPPED)
673 			why |= SIGNAL_CLD_CONTINUED;
674 		else if (signal->group_stop_count)
675 			why |= SIGNAL_CLD_STOPPED;
676 
677 		if (why) {
678 			/*
679 			 * The first thread which returns from finish_stop()
680 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
681 			 * notify its parent. See get_signal_to_deliver().
682 			 */
683 			signal->flags = why | SIGNAL_STOP_CONTINUED;
684 			signal->group_stop_count = 0;
685 			signal->group_exit_code = 0;
686 		} else {
687 			/*
688 			 * We are not stopped, but there could be a stop
689 			 * signal in the middle of being processed after
690 			 * being removed from the queue.  Clear that too.
691 			 */
692 			signal->flags &= ~SIGNAL_STOP_DEQUEUED;
693 		}
694 	}
695 
696 	return !sig_ignored(p, sig);
697 }
698 
699 /*
700  * Test if P wants to take SIG.  After we've checked all threads with this,
701  * it's equivalent to finding no threads not blocking SIG.  Any threads not
702  * blocking SIG were ruled out because they are not running and already
703  * have pending signals.  Such threads will dequeue from the shared queue
704  * as soon as they're available, so putting the signal on the shared queue
705  * will be equivalent to sending it to one such thread.
706  */
707 static inline int wants_signal(int sig, struct task_struct *p)
708 {
709 	if (sigismember(&p->blocked, sig))
710 		return 0;
711 	if (p->flags & PF_EXITING)
712 		return 0;
713 	if (sig == SIGKILL)
714 		return 1;
715 	if (task_is_stopped_or_traced(p))
716 		return 0;
717 	return task_curr(p) || !signal_pending(p);
718 }
719 
720 static void complete_signal(int sig, struct task_struct *p, int group)
721 {
722 	struct signal_struct *signal = p->signal;
723 	struct task_struct *t;
724 
725 	/*
726 	 * Now find a thread we can wake up to take the signal off the queue.
727 	 *
728 	 * If the main thread wants the signal, it gets first crack.
729 	 * Probably the least surprising to the average bear.
730 	 */
731 	if (wants_signal(sig, p))
732 		t = p;
733 	else if (!group || thread_group_empty(p))
734 		/*
735 		 * There is just one thread and it does not need to be woken.
736 		 * It will dequeue unblocked signals before it runs again.
737 		 */
738 		return;
739 	else {
740 		/*
741 		 * Otherwise try to find a suitable thread.
742 		 */
743 		t = signal->curr_target;
744 		while (!wants_signal(sig, t)) {
745 			t = next_thread(t);
746 			if (t == signal->curr_target)
747 				/*
748 				 * No thread needs to be woken.
749 				 * Any eligible threads will see
750 				 * the signal in the queue soon.
751 				 */
752 				return;
753 		}
754 		signal->curr_target = t;
755 	}
756 
757 	/*
758 	 * Found a killable thread.  If the signal will be fatal,
759 	 * then start taking the whole group down immediately.
760 	 */
761 	if (sig_fatal(p, sig) &&
762 	    !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
763 	    !sigismember(&t->real_blocked, sig) &&
764 	    (sig == SIGKILL ||
765 	     !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) {
766 		/*
767 		 * This signal will be fatal to the whole group.
768 		 */
769 		if (!sig_kernel_coredump(sig)) {
770 			/*
771 			 * Start a group exit and wake everybody up.
772 			 * This way we don't have other threads
773 			 * running and doing things after a slower
774 			 * thread has the fatal signal pending.
775 			 */
776 			signal->flags = SIGNAL_GROUP_EXIT;
777 			signal->group_exit_code = sig;
778 			signal->group_stop_count = 0;
779 			t = p;
780 			do {
781 				sigaddset(&t->pending.signal, SIGKILL);
782 				signal_wake_up(t, 1);
783 			} while_each_thread(p, t);
784 			return;
785 		}
786 	}
787 
788 	/*
789 	 * The signal is already in the shared-pending queue.
790 	 * Tell the chosen thread to wake up and dequeue it.
791 	 */
792 	signal_wake_up(t, sig == SIGKILL);
793 	return;
794 }
795 
796 static inline int legacy_queue(struct sigpending *signals, int sig)
797 {
798 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
799 }
800 
801 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
802 			int group)
803 {
804 	struct sigpending *pending;
805 	struct sigqueue *q;
806 
807 	trace_sched_signal_send(sig, t);
808 
809 	assert_spin_locked(&t->sighand->siglock);
810 	if (!prepare_signal(sig, t))
811 		return 0;
812 
813 	pending = group ? &t->signal->shared_pending : &t->pending;
814 	/*
815 	 * Short-circuit ignored signals and support queuing
816 	 * exactly one non-rt signal, so that we can get more
817 	 * detailed information about the cause of the signal.
818 	 */
819 	if (legacy_queue(pending, sig))
820 		return 0;
821 	/*
822 	 * fast-pathed signals for kernel-internal things like SIGSTOP
823 	 * or SIGKILL.
824 	 */
825 	if (info == SEND_SIG_FORCED)
826 		goto out_set;
827 
828 	/* Real-time signals must be queued if sent by sigqueue, or
829 	   some other real-time mechanism.  It is implementation
830 	   defined whether kill() does so.  We attempt to do so, on
831 	   the principle of least surprise, but since kill is not
832 	   allowed to fail with EAGAIN when low on memory we just
833 	   make sure at least one signal gets delivered and don't
834 	   pass on the info struct.  */
835 
836 	q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
837 					     (is_si_special(info) ||
838 					      info->si_code >= 0)));
839 	if (q) {
840 		list_add_tail(&q->list, &pending->list);
841 		switch ((unsigned long) info) {
842 		case (unsigned long) SEND_SIG_NOINFO:
843 			q->info.si_signo = sig;
844 			q->info.si_errno = 0;
845 			q->info.si_code = SI_USER;
846 			q->info.si_pid = task_pid_vnr(current);
847 			q->info.si_uid = current->uid;
848 			break;
849 		case (unsigned long) SEND_SIG_PRIV:
850 			q->info.si_signo = sig;
851 			q->info.si_errno = 0;
852 			q->info.si_code = SI_KERNEL;
853 			q->info.si_pid = 0;
854 			q->info.si_uid = 0;
855 			break;
856 		default:
857 			copy_siginfo(&q->info, info);
858 			break;
859 		}
860 	} else if (!is_si_special(info)) {
861 		if (sig >= SIGRTMIN && info->si_code != SI_USER)
862 		/*
863 		 * Queue overflow, abort.  We may abort if the signal was rt
864 		 * and sent by user using something other than kill().
865 		 */
866 			return -EAGAIN;
867 	}
868 
869 out_set:
870 	signalfd_notify(t, sig);
871 	sigaddset(&pending->signal, sig);
872 	complete_signal(sig, t, group);
873 	return 0;
874 }
875 
876 int print_fatal_signals;
877 
878 static void print_fatal_signal(struct pt_regs *regs, int signr)
879 {
880 	printk("%s/%d: potentially unexpected fatal signal %d.\n",
881 		current->comm, task_pid_nr(current), signr);
882 
883 #if defined(__i386__) && !defined(__arch_um__)
884 	printk("code at %08lx: ", regs->ip);
885 	{
886 		int i;
887 		for (i = 0; i < 16; i++) {
888 			unsigned char insn;
889 
890 			__get_user(insn, (unsigned char *)(regs->ip + i));
891 			printk("%02x ", insn);
892 		}
893 	}
894 #endif
895 	printk("\n");
896 	show_regs(regs);
897 }
898 
899 static int __init setup_print_fatal_signals(char *str)
900 {
901 	get_option (&str, &print_fatal_signals);
902 
903 	return 1;
904 }
905 
906 __setup("print-fatal-signals=", setup_print_fatal_signals);
907 
908 int
909 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
910 {
911 	return send_signal(sig, info, p, 1);
912 }
913 
914 static int
915 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
916 {
917 	return send_signal(sig, info, t, 0);
918 }
919 
920 /*
921  * Force a signal that the process can't ignore: if necessary
922  * we unblock the signal and change any SIG_IGN to SIG_DFL.
923  *
924  * Note: If we unblock the signal, we always reset it to SIG_DFL,
925  * since we do not want to have a signal handler that was blocked
926  * be invoked when user space had explicitly blocked it.
927  *
928  * We don't want to have recursive SIGSEGV's etc, for example,
929  * that is why we also clear SIGNAL_UNKILLABLE.
930  */
931 int
932 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
933 {
934 	unsigned long int flags;
935 	int ret, blocked, ignored;
936 	struct k_sigaction *action;
937 
938 	spin_lock_irqsave(&t->sighand->siglock, flags);
939 	action = &t->sighand->action[sig-1];
940 	ignored = action->sa.sa_handler == SIG_IGN;
941 	blocked = sigismember(&t->blocked, sig);
942 	if (blocked || ignored) {
943 		action->sa.sa_handler = SIG_DFL;
944 		if (blocked) {
945 			sigdelset(&t->blocked, sig);
946 			recalc_sigpending_and_wake(t);
947 		}
948 	}
949 	if (action->sa.sa_handler == SIG_DFL)
950 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
951 	ret = specific_send_sig_info(sig, info, t);
952 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
953 
954 	return ret;
955 }
956 
957 void
958 force_sig_specific(int sig, struct task_struct *t)
959 {
960 	force_sig_info(sig, SEND_SIG_FORCED, t);
961 }
962 
963 /*
964  * Nuke all other threads in the group.
965  */
966 void zap_other_threads(struct task_struct *p)
967 {
968 	struct task_struct *t;
969 
970 	p->signal->group_stop_count = 0;
971 
972 	for (t = next_thread(p); t != p; t = next_thread(t)) {
973 		/*
974 		 * Don't bother with already dead threads
975 		 */
976 		if (t->exit_state)
977 			continue;
978 
979 		/* SIGKILL will be handled before any pending SIGSTOP */
980 		sigaddset(&t->pending.signal, SIGKILL);
981 		signal_wake_up(t, 1);
982 	}
983 }
984 
985 int __fatal_signal_pending(struct task_struct *tsk)
986 {
987 	return sigismember(&tsk->pending.signal, SIGKILL);
988 }
989 EXPORT_SYMBOL(__fatal_signal_pending);
990 
991 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
992 {
993 	struct sighand_struct *sighand;
994 
995 	rcu_read_lock();
996 	for (;;) {
997 		sighand = rcu_dereference(tsk->sighand);
998 		if (unlikely(sighand == NULL))
999 			break;
1000 
1001 		spin_lock_irqsave(&sighand->siglock, *flags);
1002 		if (likely(sighand == tsk->sighand))
1003 			break;
1004 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1005 	}
1006 	rcu_read_unlock();
1007 
1008 	return sighand;
1009 }
1010 
1011 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1012 {
1013 	unsigned long flags;
1014 	int ret;
1015 
1016 	ret = check_kill_permission(sig, info, p);
1017 
1018 	if (!ret && sig) {
1019 		ret = -ESRCH;
1020 		if (lock_task_sighand(p, &flags)) {
1021 			ret = __group_send_sig_info(sig, info, p);
1022 			unlock_task_sighand(p, &flags);
1023 		}
1024 	}
1025 
1026 	return ret;
1027 }
1028 
1029 /*
1030  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1031  * control characters do (^C, ^Z etc)
1032  */
1033 
1034 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1035 {
1036 	struct task_struct *p = NULL;
1037 	int retval, success;
1038 
1039 	success = 0;
1040 	retval = -ESRCH;
1041 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1042 		int err = group_send_sig_info(sig, info, p);
1043 		success |= !err;
1044 		retval = err;
1045 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1046 	return success ? 0 : retval;
1047 }
1048 
1049 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1050 {
1051 	int error = -ESRCH;
1052 	struct task_struct *p;
1053 
1054 	rcu_read_lock();
1055 retry:
1056 	p = pid_task(pid, PIDTYPE_PID);
1057 	if (p) {
1058 		error = group_send_sig_info(sig, info, p);
1059 		if (unlikely(error == -ESRCH))
1060 			/*
1061 			 * The task was unhashed in between, try again.
1062 			 * If it is dead, pid_task() will return NULL,
1063 			 * if we race with de_thread() it will find the
1064 			 * new leader.
1065 			 */
1066 			goto retry;
1067 	}
1068 	rcu_read_unlock();
1069 
1070 	return error;
1071 }
1072 
1073 int
1074 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1075 {
1076 	int error;
1077 	rcu_read_lock();
1078 	error = kill_pid_info(sig, info, find_vpid(pid));
1079 	rcu_read_unlock();
1080 	return error;
1081 }
1082 
1083 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1084 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1085 		      uid_t uid, uid_t euid, u32 secid)
1086 {
1087 	int ret = -EINVAL;
1088 	struct task_struct *p;
1089 
1090 	if (!valid_signal(sig))
1091 		return ret;
1092 
1093 	read_lock(&tasklist_lock);
1094 	p = pid_task(pid, PIDTYPE_PID);
1095 	if (!p) {
1096 		ret = -ESRCH;
1097 		goto out_unlock;
1098 	}
1099 	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1100 	    && (euid != p->suid) && (euid != p->uid)
1101 	    && (uid != p->suid) && (uid != p->uid)) {
1102 		ret = -EPERM;
1103 		goto out_unlock;
1104 	}
1105 	ret = security_task_kill(p, info, sig, secid);
1106 	if (ret)
1107 		goto out_unlock;
1108 	if (sig && p->sighand) {
1109 		unsigned long flags;
1110 		spin_lock_irqsave(&p->sighand->siglock, flags);
1111 		ret = __group_send_sig_info(sig, info, p);
1112 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1113 	}
1114 out_unlock:
1115 	read_unlock(&tasklist_lock);
1116 	return ret;
1117 }
1118 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1119 
1120 /*
1121  * kill_something_info() interprets pid in interesting ways just like kill(2).
1122  *
1123  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1124  * is probably wrong.  Should make it like BSD or SYSV.
1125  */
1126 
1127 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1128 {
1129 	int ret;
1130 
1131 	if (pid > 0) {
1132 		rcu_read_lock();
1133 		ret = kill_pid_info(sig, info, find_vpid(pid));
1134 		rcu_read_unlock();
1135 		return ret;
1136 	}
1137 
1138 	read_lock(&tasklist_lock);
1139 	if (pid != -1) {
1140 		ret = __kill_pgrp_info(sig, info,
1141 				pid ? find_vpid(-pid) : task_pgrp(current));
1142 	} else {
1143 		int retval = 0, count = 0;
1144 		struct task_struct * p;
1145 
1146 		for_each_process(p) {
1147 			if (task_pid_vnr(p) > 1 &&
1148 					!same_thread_group(p, current)) {
1149 				int err = group_send_sig_info(sig, info, p);
1150 				++count;
1151 				if (err != -EPERM)
1152 					retval = err;
1153 			}
1154 		}
1155 		ret = count ? retval : -ESRCH;
1156 	}
1157 	read_unlock(&tasklist_lock);
1158 
1159 	return ret;
1160 }
1161 
1162 /*
1163  * These are for backward compatibility with the rest of the kernel source.
1164  */
1165 
1166 /*
1167  * The caller must ensure the task can't exit.
1168  */
1169 int
1170 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1171 {
1172 	int ret;
1173 	unsigned long flags;
1174 
1175 	/*
1176 	 * Make sure legacy kernel users don't send in bad values
1177 	 * (normal paths check this in check_kill_permission).
1178 	 */
1179 	if (!valid_signal(sig))
1180 		return -EINVAL;
1181 
1182 	spin_lock_irqsave(&p->sighand->siglock, flags);
1183 	ret = specific_send_sig_info(sig, info, p);
1184 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1185 	return ret;
1186 }
1187 
1188 #define __si_special(priv) \
1189 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1190 
1191 int
1192 send_sig(int sig, struct task_struct *p, int priv)
1193 {
1194 	return send_sig_info(sig, __si_special(priv), p);
1195 }
1196 
1197 void
1198 force_sig(int sig, struct task_struct *p)
1199 {
1200 	force_sig_info(sig, SEND_SIG_PRIV, p);
1201 }
1202 
1203 /*
1204  * When things go south during signal handling, we
1205  * will force a SIGSEGV. And if the signal that caused
1206  * the problem was already a SIGSEGV, we'll want to
1207  * make sure we don't even try to deliver the signal..
1208  */
1209 int
1210 force_sigsegv(int sig, struct task_struct *p)
1211 {
1212 	if (sig == SIGSEGV) {
1213 		unsigned long flags;
1214 		spin_lock_irqsave(&p->sighand->siglock, flags);
1215 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1216 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1217 	}
1218 	force_sig(SIGSEGV, p);
1219 	return 0;
1220 }
1221 
1222 int kill_pgrp(struct pid *pid, int sig, int priv)
1223 {
1224 	int ret;
1225 
1226 	read_lock(&tasklist_lock);
1227 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1228 	read_unlock(&tasklist_lock);
1229 
1230 	return ret;
1231 }
1232 EXPORT_SYMBOL(kill_pgrp);
1233 
1234 int kill_pid(struct pid *pid, int sig, int priv)
1235 {
1236 	return kill_pid_info(sig, __si_special(priv), pid);
1237 }
1238 EXPORT_SYMBOL(kill_pid);
1239 
1240 /*
1241  * These functions support sending signals using preallocated sigqueue
1242  * structures.  This is needed "because realtime applications cannot
1243  * afford to lose notifications of asynchronous events, like timer
1244  * expirations or I/O completions".  In the case of Posix Timers
1245  * we allocate the sigqueue structure from the timer_create.  If this
1246  * allocation fails we are able to report the failure to the application
1247  * with an EAGAIN error.
1248  */
1249 
1250 struct sigqueue *sigqueue_alloc(void)
1251 {
1252 	struct sigqueue *q;
1253 
1254 	if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1255 		q->flags |= SIGQUEUE_PREALLOC;
1256 	return(q);
1257 }
1258 
1259 void sigqueue_free(struct sigqueue *q)
1260 {
1261 	unsigned long flags;
1262 	spinlock_t *lock = &current->sighand->siglock;
1263 
1264 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1265 	/*
1266 	 * We must hold ->siglock while testing q->list
1267 	 * to serialize with collect_signal() or with
1268 	 * __exit_signal()->flush_sigqueue().
1269 	 */
1270 	spin_lock_irqsave(lock, flags);
1271 	q->flags &= ~SIGQUEUE_PREALLOC;
1272 	/*
1273 	 * If it is queued it will be freed when dequeued,
1274 	 * like the "regular" sigqueue.
1275 	 */
1276 	if (!list_empty(&q->list))
1277 		q = NULL;
1278 	spin_unlock_irqrestore(lock, flags);
1279 
1280 	if (q)
1281 		__sigqueue_free(q);
1282 }
1283 
1284 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1285 {
1286 	int sig = q->info.si_signo;
1287 	struct sigpending *pending;
1288 	unsigned long flags;
1289 	int ret;
1290 
1291 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1292 
1293 	ret = -1;
1294 	if (!likely(lock_task_sighand(t, &flags)))
1295 		goto ret;
1296 
1297 	ret = 1; /* the signal is ignored */
1298 	if (!prepare_signal(sig, t))
1299 		goto out;
1300 
1301 	ret = 0;
1302 	if (unlikely(!list_empty(&q->list))) {
1303 		/*
1304 		 * If an SI_TIMER entry is already queue just increment
1305 		 * the overrun count.
1306 		 */
1307 		BUG_ON(q->info.si_code != SI_TIMER);
1308 		q->info.si_overrun++;
1309 		goto out;
1310 	}
1311 	q->info.si_overrun = 0;
1312 
1313 	signalfd_notify(t, sig);
1314 	pending = group ? &t->signal->shared_pending : &t->pending;
1315 	list_add_tail(&q->list, &pending->list);
1316 	sigaddset(&pending->signal, sig);
1317 	complete_signal(sig, t, group);
1318 out:
1319 	unlock_task_sighand(t, &flags);
1320 ret:
1321 	return ret;
1322 }
1323 
1324 /*
1325  * Wake up any threads in the parent blocked in wait* syscalls.
1326  */
1327 static inline void __wake_up_parent(struct task_struct *p,
1328 				    struct task_struct *parent)
1329 {
1330 	wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1331 }
1332 
1333 /*
1334  * Let a parent know about the death of a child.
1335  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1336  *
1337  * Returns -1 if our parent ignored us and so we've switched to
1338  * self-reaping, or else @sig.
1339  */
1340 int do_notify_parent(struct task_struct *tsk, int sig)
1341 {
1342 	struct siginfo info;
1343 	unsigned long flags;
1344 	struct sighand_struct *psig;
1345 	struct task_cputime cputime;
1346 	int ret = sig;
1347 
1348 	BUG_ON(sig == -1);
1349 
1350  	/* do_notify_parent_cldstop should have been called instead.  */
1351  	BUG_ON(task_is_stopped_or_traced(tsk));
1352 
1353 	BUG_ON(!tsk->ptrace &&
1354 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1355 
1356 	info.si_signo = sig;
1357 	info.si_errno = 0;
1358 	/*
1359 	 * we are under tasklist_lock here so our parent is tied to
1360 	 * us and cannot exit and release its namespace.
1361 	 *
1362 	 * the only it can is to switch its nsproxy with sys_unshare,
1363 	 * bu uncharing pid namespaces is not allowed, so we'll always
1364 	 * see relevant namespace
1365 	 *
1366 	 * write_lock() currently calls preempt_disable() which is the
1367 	 * same as rcu_read_lock(), but according to Oleg, this is not
1368 	 * correct to rely on this
1369 	 */
1370 	rcu_read_lock();
1371 	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1372 	rcu_read_unlock();
1373 
1374 	info.si_uid = tsk->uid;
1375 
1376 	thread_group_cputime(tsk, &cputime);
1377 	info.si_utime = cputime_to_jiffies(cputime.utime);
1378 	info.si_stime = cputime_to_jiffies(cputime.stime);
1379 
1380 	info.si_status = tsk->exit_code & 0x7f;
1381 	if (tsk->exit_code & 0x80)
1382 		info.si_code = CLD_DUMPED;
1383 	else if (tsk->exit_code & 0x7f)
1384 		info.si_code = CLD_KILLED;
1385 	else {
1386 		info.si_code = CLD_EXITED;
1387 		info.si_status = tsk->exit_code >> 8;
1388 	}
1389 
1390 	psig = tsk->parent->sighand;
1391 	spin_lock_irqsave(&psig->siglock, flags);
1392 	if (!tsk->ptrace && sig == SIGCHLD &&
1393 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1394 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1395 		/*
1396 		 * We are exiting and our parent doesn't care.  POSIX.1
1397 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1398 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1399 		 * automatically and not left for our parent's wait4 call.
1400 		 * Rather than having the parent do it as a magic kind of
1401 		 * signal handler, we just set this to tell do_exit that we
1402 		 * can be cleaned up without becoming a zombie.  Note that
1403 		 * we still call __wake_up_parent in this case, because a
1404 		 * blocked sys_wait4 might now return -ECHILD.
1405 		 *
1406 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1407 		 * is implementation-defined: we do (if you don't want
1408 		 * it, just use SIG_IGN instead).
1409 		 */
1410 		ret = tsk->exit_signal = -1;
1411 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1412 			sig = -1;
1413 	}
1414 	if (valid_signal(sig) && sig > 0)
1415 		__group_send_sig_info(sig, &info, tsk->parent);
1416 	__wake_up_parent(tsk, tsk->parent);
1417 	spin_unlock_irqrestore(&psig->siglock, flags);
1418 
1419 	return ret;
1420 }
1421 
1422 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1423 {
1424 	struct siginfo info;
1425 	unsigned long flags;
1426 	struct task_struct *parent;
1427 	struct sighand_struct *sighand;
1428 
1429 	if (tsk->ptrace & PT_PTRACED)
1430 		parent = tsk->parent;
1431 	else {
1432 		tsk = tsk->group_leader;
1433 		parent = tsk->real_parent;
1434 	}
1435 
1436 	info.si_signo = SIGCHLD;
1437 	info.si_errno = 0;
1438 	/*
1439 	 * see comment in do_notify_parent() abot the following 3 lines
1440 	 */
1441 	rcu_read_lock();
1442 	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1443 	rcu_read_unlock();
1444 
1445 	info.si_uid = tsk->uid;
1446 
1447 	info.si_utime = cputime_to_clock_t(tsk->utime);
1448 	info.si_stime = cputime_to_clock_t(tsk->stime);
1449 
1450  	info.si_code = why;
1451  	switch (why) {
1452  	case CLD_CONTINUED:
1453  		info.si_status = SIGCONT;
1454  		break;
1455  	case CLD_STOPPED:
1456  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1457  		break;
1458  	case CLD_TRAPPED:
1459  		info.si_status = tsk->exit_code & 0x7f;
1460  		break;
1461  	default:
1462  		BUG();
1463  	}
1464 
1465 	sighand = parent->sighand;
1466 	spin_lock_irqsave(&sighand->siglock, flags);
1467 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1468 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1469 		__group_send_sig_info(SIGCHLD, &info, parent);
1470 	/*
1471 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1472 	 */
1473 	__wake_up_parent(tsk, parent);
1474 	spin_unlock_irqrestore(&sighand->siglock, flags);
1475 }
1476 
1477 static inline int may_ptrace_stop(void)
1478 {
1479 	if (!likely(current->ptrace & PT_PTRACED))
1480 		return 0;
1481 	/*
1482 	 * Are we in the middle of do_coredump?
1483 	 * If so and our tracer is also part of the coredump stopping
1484 	 * is a deadlock situation, and pointless because our tracer
1485 	 * is dead so don't allow us to stop.
1486 	 * If SIGKILL was already sent before the caller unlocked
1487 	 * ->siglock we must see ->core_state != NULL. Otherwise it
1488 	 * is safe to enter schedule().
1489 	 */
1490 	if (unlikely(current->mm->core_state) &&
1491 	    unlikely(current->mm == current->parent->mm))
1492 		return 0;
1493 
1494 	return 1;
1495 }
1496 
1497 /*
1498  * Return nonzero if there is a SIGKILL that should be waking us up.
1499  * Called with the siglock held.
1500  */
1501 static int sigkill_pending(struct task_struct *tsk)
1502 {
1503 	return	sigismember(&tsk->pending.signal, SIGKILL) ||
1504 		sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1505 }
1506 
1507 /*
1508  * This must be called with current->sighand->siglock held.
1509  *
1510  * This should be the path for all ptrace stops.
1511  * We always set current->last_siginfo while stopped here.
1512  * That makes it a way to test a stopped process for
1513  * being ptrace-stopped vs being job-control-stopped.
1514  *
1515  * If we actually decide not to stop at all because the tracer
1516  * is gone, we keep current->exit_code unless clear_code.
1517  */
1518 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1519 {
1520 	if (arch_ptrace_stop_needed(exit_code, info)) {
1521 		/*
1522 		 * The arch code has something special to do before a
1523 		 * ptrace stop.  This is allowed to block, e.g. for faults
1524 		 * on user stack pages.  We can't keep the siglock while
1525 		 * calling arch_ptrace_stop, so we must release it now.
1526 		 * To preserve proper semantics, we must do this before
1527 		 * any signal bookkeeping like checking group_stop_count.
1528 		 * Meanwhile, a SIGKILL could come in before we retake the
1529 		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
1530 		 * So after regaining the lock, we must check for SIGKILL.
1531 		 */
1532 		spin_unlock_irq(&current->sighand->siglock);
1533 		arch_ptrace_stop(exit_code, info);
1534 		spin_lock_irq(&current->sighand->siglock);
1535 		if (sigkill_pending(current))
1536 			return;
1537 	}
1538 
1539 	/*
1540 	 * If there is a group stop in progress,
1541 	 * we must participate in the bookkeeping.
1542 	 */
1543 	if (current->signal->group_stop_count > 0)
1544 		--current->signal->group_stop_count;
1545 
1546 	current->last_siginfo = info;
1547 	current->exit_code = exit_code;
1548 
1549 	/* Let the debugger run.  */
1550 	__set_current_state(TASK_TRACED);
1551 	spin_unlock_irq(&current->sighand->siglock);
1552 	read_lock(&tasklist_lock);
1553 	if (may_ptrace_stop()) {
1554 		do_notify_parent_cldstop(current, CLD_TRAPPED);
1555 		read_unlock(&tasklist_lock);
1556 		schedule();
1557 	} else {
1558 		/*
1559 		 * By the time we got the lock, our tracer went away.
1560 		 * Don't drop the lock yet, another tracer may come.
1561 		 */
1562 		__set_current_state(TASK_RUNNING);
1563 		if (clear_code)
1564 			current->exit_code = 0;
1565 		read_unlock(&tasklist_lock);
1566 	}
1567 
1568 	/*
1569 	 * While in TASK_TRACED, we were considered "frozen enough".
1570 	 * Now that we woke up, it's crucial if we're supposed to be
1571 	 * frozen that we freeze now before running anything substantial.
1572 	 */
1573 	try_to_freeze();
1574 
1575 	/*
1576 	 * We are back.  Now reacquire the siglock before touching
1577 	 * last_siginfo, so that we are sure to have synchronized with
1578 	 * any signal-sending on another CPU that wants to examine it.
1579 	 */
1580 	spin_lock_irq(&current->sighand->siglock);
1581 	current->last_siginfo = NULL;
1582 
1583 	/*
1584 	 * Queued signals ignored us while we were stopped for tracing.
1585 	 * So check for any that we should take before resuming user mode.
1586 	 * This sets TIF_SIGPENDING, but never clears it.
1587 	 */
1588 	recalc_sigpending_tsk(current);
1589 }
1590 
1591 void ptrace_notify(int exit_code)
1592 {
1593 	siginfo_t info;
1594 
1595 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1596 
1597 	memset(&info, 0, sizeof info);
1598 	info.si_signo = SIGTRAP;
1599 	info.si_code = exit_code;
1600 	info.si_pid = task_pid_vnr(current);
1601 	info.si_uid = current->uid;
1602 
1603 	/* Let the debugger run.  */
1604 	spin_lock_irq(&current->sighand->siglock);
1605 	ptrace_stop(exit_code, 1, &info);
1606 	spin_unlock_irq(&current->sighand->siglock);
1607 }
1608 
1609 static void
1610 finish_stop(int stop_count)
1611 {
1612 	/*
1613 	 * If there are no other threads in the group, or if there is
1614 	 * a group stop in progress and we are the last to stop,
1615 	 * report to the parent.  When ptraced, every thread reports itself.
1616 	 */
1617 	if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
1618 		read_lock(&tasklist_lock);
1619 		do_notify_parent_cldstop(current, CLD_STOPPED);
1620 		read_unlock(&tasklist_lock);
1621 	}
1622 
1623 	do {
1624 		schedule();
1625 	} while (try_to_freeze());
1626 	/*
1627 	 * Now we don't run again until continued.
1628 	 */
1629 	current->exit_code = 0;
1630 }
1631 
1632 /*
1633  * This performs the stopping for SIGSTOP and other stop signals.
1634  * We have to stop all threads in the thread group.
1635  * Returns nonzero if we've actually stopped and released the siglock.
1636  * Returns zero if we didn't stop and still hold the siglock.
1637  */
1638 static int do_signal_stop(int signr)
1639 {
1640 	struct signal_struct *sig = current->signal;
1641 	int stop_count;
1642 
1643 	if (sig->group_stop_count > 0) {
1644 		/*
1645 		 * There is a group stop in progress.  We don't need to
1646 		 * start another one.
1647 		 */
1648 		stop_count = --sig->group_stop_count;
1649 	} else {
1650 		struct task_struct *t;
1651 
1652 		if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1653 		    unlikely(signal_group_exit(sig)))
1654 			return 0;
1655 		/*
1656 		 * There is no group stop already in progress.
1657 		 * We must initiate one now.
1658 		 */
1659 		sig->group_exit_code = signr;
1660 
1661 		stop_count = 0;
1662 		for (t = next_thread(current); t != current; t = next_thread(t))
1663 			/*
1664 			 * Setting state to TASK_STOPPED for a group
1665 			 * stop is always done with the siglock held,
1666 			 * so this check has no races.
1667 			 */
1668 			if (!(t->flags & PF_EXITING) &&
1669 			    !task_is_stopped_or_traced(t)) {
1670 				stop_count++;
1671 				signal_wake_up(t, 0);
1672 			}
1673 		sig->group_stop_count = stop_count;
1674 	}
1675 
1676 	if (stop_count == 0)
1677 		sig->flags = SIGNAL_STOP_STOPPED;
1678 	current->exit_code = sig->group_exit_code;
1679 	__set_current_state(TASK_STOPPED);
1680 
1681 	spin_unlock_irq(&current->sighand->siglock);
1682 	finish_stop(stop_count);
1683 	return 1;
1684 }
1685 
1686 static int ptrace_signal(int signr, siginfo_t *info,
1687 			 struct pt_regs *regs, void *cookie)
1688 {
1689 	if (!(current->ptrace & PT_PTRACED))
1690 		return signr;
1691 
1692 	ptrace_signal_deliver(regs, cookie);
1693 
1694 	/* Let the debugger run.  */
1695 	ptrace_stop(signr, 0, info);
1696 
1697 	/* We're back.  Did the debugger cancel the sig?  */
1698 	signr = current->exit_code;
1699 	if (signr == 0)
1700 		return signr;
1701 
1702 	current->exit_code = 0;
1703 
1704 	/* Update the siginfo structure if the signal has
1705 	   changed.  If the debugger wanted something
1706 	   specific in the siginfo structure then it should
1707 	   have updated *info via PTRACE_SETSIGINFO.  */
1708 	if (signr != info->si_signo) {
1709 		info->si_signo = signr;
1710 		info->si_errno = 0;
1711 		info->si_code = SI_USER;
1712 		info->si_pid = task_pid_vnr(current->parent);
1713 		info->si_uid = current->parent->uid;
1714 	}
1715 
1716 	/* If the (new) signal is now blocked, requeue it.  */
1717 	if (sigismember(&current->blocked, signr)) {
1718 		specific_send_sig_info(signr, info, current);
1719 		signr = 0;
1720 	}
1721 
1722 	return signr;
1723 }
1724 
1725 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1726 			  struct pt_regs *regs, void *cookie)
1727 {
1728 	struct sighand_struct *sighand = current->sighand;
1729 	struct signal_struct *signal = current->signal;
1730 	int signr;
1731 
1732 relock:
1733 	/*
1734 	 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1735 	 * While in TASK_STOPPED, we were considered "frozen enough".
1736 	 * Now that we woke up, it's crucial if we're supposed to be
1737 	 * frozen that we freeze now before running anything substantial.
1738 	 */
1739 	try_to_freeze();
1740 
1741 	spin_lock_irq(&sighand->siglock);
1742 	/*
1743 	 * Every stopped thread goes here after wakeup. Check to see if
1744 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
1745 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1746 	 */
1747 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1748 		int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1749 				? CLD_CONTINUED : CLD_STOPPED;
1750 		signal->flags &= ~SIGNAL_CLD_MASK;
1751 		spin_unlock_irq(&sighand->siglock);
1752 
1753 		if (unlikely(!tracehook_notify_jctl(1, why)))
1754 			goto relock;
1755 
1756 		read_lock(&tasklist_lock);
1757 		do_notify_parent_cldstop(current->group_leader, why);
1758 		read_unlock(&tasklist_lock);
1759 		goto relock;
1760 	}
1761 
1762 	for (;;) {
1763 		struct k_sigaction *ka;
1764 
1765 		if (unlikely(signal->group_stop_count > 0) &&
1766 		    do_signal_stop(0))
1767 			goto relock;
1768 
1769 		/*
1770 		 * Tracing can induce an artifical signal and choose sigaction.
1771 		 * The return value in @signr determines the default action,
1772 		 * but @info->si_signo is the signal number we will report.
1773 		 */
1774 		signr = tracehook_get_signal(current, regs, info, return_ka);
1775 		if (unlikely(signr < 0))
1776 			goto relock;
1777 		if (unlikely(signr != 0))
1778 			ka = return_ka;
1779 		else {
1780 			signr = dequeue_signal(current, &current->blocked,
1781 					       info);
1782 
1783 			if (!signr)
1784 				break; /* will return 0 */
1785 
1786 			if (signr != SIGKILL) {
1787 				signr = ptrace_signal(signr, info,
1788 						      regs, cookie);
1789 				if (!signr)
1790 					continue;
1791 			}
1792 
1793 			ka = &sighand->action[signr-1];
1794 		}
1795 
1796 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1797 			continue;
1798 		if (ka->sa.sa_handler != SIG_DFL) {
1799 			/* Run the handler.  */
1800 			*return_ka = *ka;
1801 
1802 			if (ka->sa.sa_flags & SA_ONESHOT)
1803 				ka->sa.sa_handler = SIG_DFL;
1804 
1805 			break; /* will return non-zero "signr" value */
1806 		}
1807 
1808 		/*
1809 		 * Now we are doing the default action for this signal.
1810 		 */
1811 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
1812 			continue;
1813 
1814 		/*
1815 		 * Global init gets no signals it doesn't want.
1816 		 */
1817 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1818 		    !signal_group_exit(signal))
1819 			continue;
1820 
1821 		if (sig_kernel_stop(signr)) {
1822 			/*
1823 			 * The default action is to stop all threads in
1824 			 * the thread group.  The job control signals
1825 			 * do nothing in an orphaned pgrp, but SIGSTOP
1826 			 * always works.  Note that siglock needs to be
1827 			 * dropped during the call to is_orphaned_pgrp()
1828 			 * because of lock ordering with tasklist_lock.
1829 			 * This allows an intervening SIGCONT to be posted.
1830 			 * We need to check for that and bail out if necessary.
1831 			 */
1832 			if (signr != SIGSTOP) {
1833 				spin_unlock_irq(&sighand->siglock);
1834 
1835 				/* signals can be posted during this window */
1836 
1837 				if (is_current_pgrp_orphaned())
1838 					goto relock;
1839 
1840 				spin_lock_irq(&sighand->siglock);
1841 			}
1842 
1843 			if (likely(do_signal_stop(info->si_signo))) {
1844 				/* It released the siglock.  */
1845 				goto relock;
1846 			}
1847 
1848 			/*
1849 			 * We didn't actually stop, due to a race
1850 			 * with SIGCONT or something like that.
1851 			 */
1852 			continue;
1853 		}
1854 
1855 		spin_unlock_irq(&sighand->siglock);
1856 
1857 		/*
1858 		 * Anything else is fatal, maybe with a core dump.
1859 		 */
1860 		current->flags |= PF_SIGNALED;
1861 
1862 		if (sig_kernel_coredump(signr)) {
1863 			if (print_fatal_signals)
1864 				print_fatal_signal(regs, info->si_signo);
1865 			/*
1866 			 * If it was able to dump core, this kills all
1867 			 * other threads in the group and synchronizes with
1868 			 * their demise.  If we lost the race with another
1869 			 * thread getting here, it set group_exit_code
1870 			 * first and our do_group_exit call below will use
1871 			 * that value and ignore the one we pass it.
1872 			 */
1873 			do_coredump(info->si_signo, info->si_signo, regs);
1874 		}
1875 
1876 		/*
1877 		 * Death signals, no core dump.
1878 		 */
1879 		do_group_exit(info->si_signo);
1880 		/* NOTREACHED */
1881 	}
1882 	spin_unlock_irq(&sighand->siglock);
1883 	return signr;
1884 }
1885 
1886 void exit_signals(struct task_struct *tsk)
1887 {
1888 	int group_stop = 0;
1889 	struct task_struct *t;
1890 
1891 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1892 		tsk->flags |= PF_EXITING;
1893 		return;
1894 	}
1895 
1896 	spin_lock_irq(&tsk->sighand->siglock);
1897 	/*
1898 	 * From now this task is not visible for group-wide signals,
1899 	 * see wants_signal(), do_signal_stop().
1900 	 */
1901 	tsk->flags |= PF_EXITING;
1902 	if (!signal_pending(tsk))
1903 		goto out;
1904 
1905 	/* It could be that __group_complete_signal() choose us to
1906 	 * notify about group-wide signal. Another thread should be
1907 	 * woken now to take the signal since we will not.
1908 	 */
1909 	for (t = tsk; (t = next_thread(t)) != tsk; )
1910 		if (!signal_pending(t) && !(t->flags & PF_EXITING))
1911 			recalc_sigpending_and_wake(t);
1912 
1913 	if (unlikely(tsk->signal->group_stop_count) &&
1914 			!--tsk->signal->group_stop_count) {
1915 		tsk->signal->flags = SIGNAL_STOP_STOPPED;
1916 		group_stop = 1;
1917 	}
1918 out:
1919 	spin_unlock_irq(&tsk->sighand->siglock);
1920 
1921 	if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) {
1922 		read_lock(&tasklist_lock);
1923 		do_notify_parent_cldstop(tsk, CLD_STOPPED);
1924 		read_unlock(&tasklist_lock);
1925 	}
1926 }
1927 
1928 EXPORT_SYMBOL(recalc_sigpending);
1929 EXPORT_SYMBOL_GPL(dequeue_signal);
1930 EXPORT_SYMBOL(flush_signals);
1931 EXPORT_SYMBOL(force_sig);
1932 EXPORT_SYMBOL(send_sig);
1933 EXPORT_SYMBOL(send_sig_info);
1934 EXPORT_SYMBOL(sigprocmask);
1935 EXPORT_SYMBOL(block_all_signals);
1936 EXPORT_SYMBOL(unblock_all_signals);
1937 
1938 
1939 /*
1940  * System call entry points.
1941  */
1942 
1943 asmlinkage long sys_restart_syscall(void)
1944 {
1945 	struct restart_block *restart = &current_thread_info()->restart_block;
1946 	return restart->fn(restart);
1947 }
1948 
1949 long do_no_restart_syscall(struct restart_block *param)
1950 {
1951 	return -EINTR;
1952 }
1953 
1954 /*
1955  * We don't need to get the kernel lock - this is all local to this
1956  * particular thread.. (and that's good, because this is _heavily_
1957  * used by various programs)
1958  */
1959 
1960 /*
1961  * This is also useful for kernel threads that want to temporarily
1962  * (or permanently) block certain signals.
1963  *
1964  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1965  * interface happily blocks "unblockable" signals like SIGKILL
1966  * and friends.
1967  */
1968 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1969 {
1970 	int error;
1971 
1972 	spin_lock_irq(&current->sighand->siglock);
1973 	if (oldset)
1974 		*oldset = current->blocked;
1975 
1976 	error = 0;
1977 	switch (how) {
1978 	case SIG_BLOCK:
1979 		sigorsets(&current->blocked, &current->blocked, set);
1980 		break;
1981 	case SIG_UNBLOCK:
1982 		signandsets(&current->blocked, &current->blocked, set);
1983 		break;
1984 	case SIG_SETMASK:
1985 		current->blocked = *set;
1986 		break;
1987 	default:
1988 		error = -EINVAL;
1989 	}
1990 	recalc_sigpending();
1991 	spin_unlock_irq(&current->sighand->siglock);
1992 
1993 	return error;
1994 }
1995 
1996 asmlinkage long
1997 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1998 {
1999 	int error = -EINVAL;
2000 	sigset_t old_set, new_set;
2001 
2002 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2003 	if (sigsetsize != sizeof(sigset_t))
2004 		goto out;
2005 
2006 	if (set) {
2007 		error = -EFAULT;
2008 		if (copy_from_user(&new_set, set, sizeof(*set)))
2009 			goto out;
2010 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2011 
2012 		error = sigprocmask(how, &new_set, &old_set);
2013 		if (error)
2014 			goto out;
2015 		if (oset)
2016 			goto set_old;
2017 	} else if (oset) {
2018 		spin_lock_irq(&current->sighand->siglock);
2019 		old_set = current->blocked;
2020 		spin_unlock_irq(&current->sighand->siglock);
2021 
2022 	set_old:
2023 		error = -EFAULT;
2024 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2025 			goto out;
2026 	}
2027 	error = 0;
2028 out:
2029 	return error;
2030 }
2031 
2032 long do_sigpending(void __user *set, unsigned long sigsetsize)
2033 {
2034 	long error = -EINVAL;
2035 	sigset_t pending;
2036 
2037 	if (sigsetsize > sizeof(sigset_t))
2038 		goto out;
2039 
2040 	spin_lock_irq(&current->sighand->siglock);
2041 	sigorsets(&pending, &current->pending.signal,
2042 		  &current->signal->shared_pending.signal);
2043 	spin_unlock_irq(&current->sighand->siglock);
2044 
2045 	/* Outside the lock because only this thread touches it.  */
2046 	sigandsets(&pending, &current->blocked, &pending);
2047 
2048 	error = -EFAULT;
2049 	if (!copy_to_user(set, &pending, sigsetsize))
2050 		error = 0;
2051 
2052 out:
2053 	return error;
2054 }
2055 
2056 asmlinkage long
2057 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2058 {
2059 	return do_sigpending(set, sigsetsize);
2060 }
2061 
2062 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2063 
2064 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2065 {
2066 	int err;
2067 
2068 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2069 		return -EFAULT;
2070 	if (from->si_code < 0)
2071 		return __copy_to_user(to, from, sizeof(siginfo_t))
2072 			? -EFAULT : 0;
2073 	/*
2074 	 * If you change siginfo_t structure, please be sure
2075 	 * this code is fixed accordingly.
2076 	 * Please remember to update the signalfd_copyinfo() function
2077 	 * inside fs/signalfd.c too, in case siginfo_t changes.
2078 	 * It should never copy any pad contained in the structure
2079 	 * to avoid security leaks, but must copy the generic
2080 	 * 3 ints plus the relevant union member.
2081 	 */
2082 	err = __put_user(from->si_signo, &to->si_signo);
2083 	err |= __put_user(from->si_errno, &to->si_errno);
2084 	err |= __put_user((short)from->si_code, &to->si_code);
2085 	switch (from->si_code & __SI_MASK) {
2086 	case __SI_KILL:
2087 		err |= __put_user(from->si_pid, &to->si_pid);
2088 		err |= __put_user(from->si_uid, &to->si_uid);
2089 		break;
2090 	case __SI_TIMER:
2091 		 err |= __put_user(from->si_tid, &to->si_tid);
2092 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2093 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2094 		break;
2095 	case __SI_POLL:
2096 		err |= __put_user(from->si_band, &to->si_band);
2097 		err |= __put_user(from->si_fd, &to->si_fd);
2098 		break;
2099 	case __SI_FAULT:
2100 		err |= __put_user(from->si_addr, &to->si_addr);
2101 #ifdef __ARCH_SI_TRAPNO
2102 		err |= __put_user(from->si_trapno, &to->si_trapno);
2103 #endif
2104 		break;
2105 	case __SI_CHLD:
2106 		err |= __put_user(from->si_pid, &to->si_pid);
2107 		err |= __put_user(from->si_uid, &to->si_uid);
2108 		err |= __put_user(from->si_status, &to->si_status);
2109 		err |= __put_user(from->si_utime, &to->si_utime);
2110 		err |= __put_user(from->si_stime, &to->si_stime);
2111 		break;
2112 	case __SI_RT: /* This is not generated by the kernel as of now. */
2113 	case __SI_MESGQ: /* But this is */
2114 		err |= __put_user(from->si_pid, &to->si_pid);
2115 		err |= __put_user(from->si_uid, &to->si_uid);
2116 		err |= __put_user(from->si_ptr, &to->si_ptr);
2117 		break;
2118 	default: /* this is just in case for now ... */
2119 		err |= __put_user(from->si_pid, &to->si_pid);
2120 		err |= __put_user(from->si_uid, &to->si_uid);
2121 		break;
2122 	}
2123 	return err;
2124 }
2125 
2126 #endif
2127 
2128 asmlinkage long
2129 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2130 		    siginfo_t __user *uinfo,
2131 		    const struct timespec __user *uts,
2132 		    size_t sigsetsize)
2133 {
2134 	int ret, sig;
2135 	sigset_t these;
2136 	struct timespec ts;
2137 	siginfo_t info;
2138 	long timeout = 0;
2139 
2140 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2141 	if (sigsetsize != sizeof(sigset_t))
2142 		return -EINVAL;
2143 
2144 	if (copy_from_user(&these, uthese, sizeof(these)))
2145 		return -EFAULT;
2146 
2147 	/*
2148 	 * Invert the set of allowed signals to get those we
2149 	 * want to block.
2150 	 */
2151 	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2152 	signotset(&these);
2153 
2154 	if (uts) {
2155 		if (copy_from_user(&ts, uts, sizeof(ts)))
2156 			return -EFAULT;
2157 		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2158 		    || ts.tv_sec < 0)
2159 			return -EINVAL;
2160 	}
2161 
2162 	spin_lock_irq(&current->sighand->siglock);
2163 	sig = dequeue_signal(current, &these, &info);
2164 	if (!sig) {
2165 		timeout = MAX_SCHEDULE_TIMEOUT;
2166 		if (uts)
2167 			timeout = (timespec_to_jiffies(&ts)
2168 				   + (ts.tv_sec || ts.tv_nsec));
2169 
2170 		if (timeout) {
2171 			/* None ready -- temporarily unblock those we're
2172 			 * interested while we are sleeping in so that we'll
2173 			 * be awakened when they arrive.  */
2174 			current->real_blocked = current->blocked;
2175 			sigandsets(&current->blocked, &current->blocked, &these);
2176 			recalc_sigpending();
2177 			spin_unlock_irq(&current->sighand->siglock);
2178 
2179 			timeout = schedule_timeout_interruptible(timeout);
2180 
2181 			spin_lock_irq(&current->sighand->siglock);
2182 			sig = dequeue_signal(current, &these, &info);
2183 			current->blocked = current->real_blocked;
2184 			siginitset(&current->real_blocked, 0);
2185 			recalc_sigpending();
2186 		}
2187 	}
2188 	spin_unlock_irq(&current->sighand->siglock);
2189 
2190 	if (sig) {
2191 		ret = sig;
2192 		if (uinfo) {
2193 			if (copy_siginfo_to_user(uinfo, &info))
2194 				ret = -EFAULT;
2195 		}
2196 	} else {
2197 		ret = -EAGAIN;
2198 		if (timeout)
2199 			ret = -EINTR;
2200 	}
2201 
2202 	return ret;
2203 }
2204 
2205 asmlinkage long
2206 sys_kill(pid_t pid, int sig)
2207 {
2208 	struct siginfo info;
2209 
2210 	info.si_signo = sig;
2211 	info.si_errno = 0;
2212 	info.si_code = SI_USER;
2213 	info.si_pid = task_tgid_vnr(current);
2214 	info.si_uid = current->uid;
2215 
2216 	return kill_something_info(sig, &info, pid);
2217 }
2218 
2219 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2220 {
2221 	int error;
2222 	struct siginfo info;
2223 	struct task_struct *p;
2224 	unsigned long flags;
2225 
2226 	error = -ESRCH;
2227 	info.si_signo = sig;
2228 	info.si_errno = 0;
2229 	info.si_code = SI_TKILL;
2230 	info.si_pid = task_tgid_vnr(current);
2231 	info.si_uid = current->uid;
2232 
2233 	rcu_read_lock();
2234 	p = find_task_by_vpid(pid);
2235 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2236 		error = check_kill_permission(sig, &info, p);
2237 		/*
2238 		 * The null signal is a permissions and process existence
2239 		 * probe.  No signal is actually delivered.
2240 		 *
2241 		 * If lock_task_sighand() fails we pretend the task dies
2242 		 * after receiving the signal. The window is tiny, and the
2243 		 * signal is private anyway.
2244 		 */
2245 		if (!error && sig && lock_task_sighand(p, &flags)) {
2246 			error = specific_send_sig_info(sig, &info, p);
2247 			unlock_task_sighand(p, &flags);
2248 		}
2249 	}
2250 	rcu_read_unlock();
2251 
2252 	return error;
2253 }
2254 
2255 /**
2256  *  sys_tgkill - send signal to one specific thread
2257  *  @tgid: the thread group ID of the thread
2258  *  @pid: the PID of the thread
2259  *  @sig: signal to be sent
2260  *
2261  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2262  *  exists but it's not belonging to the target process anymore. This
2263  *  method solves the problem of threads exiting and PIDs getting reused.
2264  */
2265 asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig)
2266 {
2267 	/* This is only valid for single tasks */
2268 	if (pid <= 0 || tgid <= 0)
2269 		return -EINVAL;
2270 
2271 	return do_tkill(tgid, pid, sig);
2272 }
2273 
2274 /*
2275  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2276  */
2277 asmlinkage long
2278 sys_tkill(pid_t pid, int sig)
2279 {
2280 	/* This is only valid for single tasks */
2281 	if (pid <= 0)
2282 		return -EINVAL;
2283 
2284 	return do_tkill(0, pid, sig);
2285 }
2286 
2287 asmlinkage long
2288 sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo)
2289 {
2290 	siginfo_t info;
2291 
2292 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2293 		return -EFAULT;
2294 
2295 	/* Not even root can pretend to send signals from the kernel.
2296 	   Nor can they impersonate a kill(), which adds source info.  */
2297 	if (info.si_code >= 0)
2298 		return -EPERM;
2299 	info.si_signo = sig;
2300 
2301 	/* POSIX.1b doesn't mention process groups.  */
2302 	return kill_proc_info(sig, &info, pid);
2303 }
2304 
2305 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2306 {
2307 	struct task_struct *t = current;
2308 	struct k_sigaction *k;
2309 	sigset_t mask;
2310 
2311 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2312 		return -EINVAL;
2313 
2314 	k = &t->sighand->action[sig-1];
2315 
2316 	spin_lock_irq(&current->sighand->siglock);
2317 	if (oact)
2318 		*oact = *k;
2319 
2320 	if (act) {
2321 		sigdelsetmask(&act->sa.sa_mask,
2322 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2323 		*k = *act;
2324 		/*
2325 		 * POSIX 3.3.1.3:
2326 		 *  "Setting a signal action to SIG_IGN for a signal that is
2327 		 *   pending shall cause the pending signal to be discarded,
2328 		 *   whether or not it is blocked."
2329 		 *
2330 		 *  "Setting a signal action to SIG_DFL for a signal that is
2331 		 *   pending and whose default action is to ignore the signal
2332 		 *   (for example, SIGCHLD), shall cause the pending signal to
2333 		 *   be discarded, whether or not it is blocked"
2334 		 */
2335 		if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2336 			sigemptyset(&mask);
2337 			sigaddset(&mask, sig);
2338 			rm_from_queue_full(&mask, &t->signal->shared_pending);
2339 			do {
2340 				rm_from_queue_full(&mask, &t->pending);
2341 				t = next_thread(t);
2342 			} while (t != current);
2343 		}
2344 	}
2345 
2346 	spin_unlock_irq(&current->sighand->siglock);
2347 	return 0;
2348 }
2349 
2350 int
2351 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2352 {
2353 	stack_t oss;
2354 	int error;
2355 
2356 	if (uoss) {
2357 		oss.ss_sp = (void __user *) current->sas_ss_sp;
2358 		oss.ss_size = current->sas_ss_size;
2359 		oss.ss_flags = sas_ss_flags(sp);
2360 	}
2361 
2362 	if (uss) {
2363 		void __user *ss_sp;
2364 		size_t ss_size;
2365 		int ss_flags;
2366 
2367 		error = -EFAULT;
2368 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2369 		    || __get_user(ss_sp, &uss->ss_sp)
2370 		    || __get_user(ss_flags, &uss->ss_flags)
2371 		    || __get_user(ss_size, &uss->ss_size))
2372 			goto out;
2373 
2374 		error = -EPERM;
2375 		if (on_sig_stack(sp))
2376 			goto out;
2377 
2378 		error = -EINVAL;
2379 		/*
2380 		 *
2381 		 * Note - this code used to test ss_flags incorrectly
2382 		 *  	  old code may have been written using ss_flags==0
2383 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2384 		 *	  way that worked) - this fix preserves that older
2385 		 *	  mechanism
2386 		 */
2387 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2388 			goto out;
2389 
2390 		if (ss_flags == SS_DISABLE) {
2391 			ss_size = 0;
2392 			ss_sp = NULL;
2393 		} else {
2394 			error = -ENOMEM;
2395 			if (ss_size < MINSIGSTKSZ)
2396 				goto out;
2397 		}
2398 
2399 		current->sas_ss_sp = (unsigned long) ss_sp;
2400 		current->sas_ss_size = ss_size;
2401 	}
2402 
2403 	if (uoss) {
2404 		error = -EFAULT;
2405 		if (copy_to_user(uoss, &oss, sizeof(oss)))
2406 			goto out;
2407 	}
2408 
2409 	error = 0;
2410 out:
2411 	return error;
2412 }
2413 
2414 #ifdef __ARCH_WANT_SYS_SIGPENDING
2415 
2416 asmlinkage long
2417 sys_sigpending(old_sigset_t __user *set)
2418 {
2419 	return do_sigpending(set, sizeof(*set));
2420 }
2421 
2422 #endif
2423 
2424 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2425 /* Some platforms have their own version with special arguments others
2426    support only sys_rt_sigprocmask.  */
2427 
2428 asmlinkage long
2429 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2430 {
2431 	int error;
2432 	old_sigset_t old_set, new_set;
2433 
2434 	if (set) {
2435 		error = -EFAULT;
2436 		if (copy_from_user(&new_set, set, sizeof(*set)))
2437 			goto out;
2438 		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2439 
2440 		spin_lock_irq(&current->sighand->siglock);
2441 		old_set = current->blocked.sig[0];
2442 
2443 		error = 0;
2444 		switch (how) {
2445 		default:
2446 			error = -EINVAL;
2447 			break;
2448 		case SIG_BLOCK:
2449 			sigaddsetmask(&current->blocked, new_set);
2450 			break;
2451 		case SIG_UNBLOCK:
2452 			sigdelsetmask(&current->blocked, new_set);
2453 			break;
2454 		case SIG_SETMASK:
2455 			current->blocked.sig[0] = new_set;
2456 			break;
2457 		}
2458 
2459 		recalc_sigpending();
2460 		spin_unlock_irq(&current->sighand->siglock);
2461 		if (error)
2462 			goto out;
2463 		if (oset)
2464 			goto set_old;
2465 	} else if (oset) {
2466 		old_set = current->blocked.sig[0];
2467 	set_old:
2468 		error = -EFAULT;
2469 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2470 			goto out;
2471 	}
2472 	error = 0;
2473 out:
2474 	return error;
2475 }
2476 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2477 
2478 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2479 asmlinkage long
2480 sys_rt_sigaction(int sig,
2481 		 const struct sigaction __user *act,
2482 		 struct sigaction __user *oact,
2483 		 size_t sigsetsize)
2484 {
2485 	struct k_sigaction new_sa, old_sa;
2486 	int ret = -EINVAL;
2487 
2488 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2489 	if (sigsetsize != sizeof(sigset_t))
2490 		goto out;
2491 
2492 	if (act) {
2493 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2494 			return -EFAULT;
2495 	}
2496 
2497 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2498 
2499 	if (!ret && oact) {
2500 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2501 			return -EFAULT;
2502 	}
2503 out:
2504 	return ret;
2505 }
2506 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2507 
2508 #ifdef __ARCH_WANT_SYS_SGETMASK
2509 
2510 /*
2511  * For backwards compatibility.  Functionality superseded by sigprocmask.
2512  */
2513 asmlinkage long
2514 sys_sgetmask(void)
2515 {
2516 	/* SMP safe */
2517 	return current->blocked.sig[0];
2518 }
2519 
2520 asmlinkage long
2521 sys_ssetmask(int newmask)
2522 {
2523 	int old;
2524 
2525 	spin_lock_irq(&current->sighand->siglock);
2526 	old = current->blocked.sig[0];
2527 
2528 	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2529 						  sigmask(SIGSTOP)));
2530 	recalc_sigpending();
2531 	spin_unlock_irq(&current->sighand->siglock);
2532 
2533 	return old;
2534 }
2535 #endif /* __ARCH_WANT_SGETMASK */
2536 
2537 #ifdef __ARCH_WANT_SYS_SIGNAL
2538 /*
2539  * For backwards compatibility.  Functionality superseded by sigaction.
2540  */
2541 asmlinkage unsigned long
2542 sys_signal(int sig, __sighandler_t handler)
2543 {
2544 	struct k_sigaction new_sa, old_sa;
2545 	int ret;
2546 
2547 	new_sa.sa.sa_handler = handler;
2548 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2549 	sigemptyset(&new_sa.sa.sa_mask);
2550 
2551 	ret = do_sigaction(sig, &new_sa, &old_sa);
2552 
2553 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2554 }
2555 #endif /* __ARCH_WANT_SYS_SIGNAL */
2556 
2557 #ifdef __ARCH_WANT_SYS_PAUSE
2558 
2559 asmlinkage long
2560 sys_pause(void)
2561 {
2562 	current->state = TASK_INTERRUPTIBLE;
2563 	schedule();
2564 	return -ERESTARTNOHAND;
2565 }
2566 
2567 #endif
2568 
2569 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2570 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2571 {
2572 	sigset_t newset;
2573 
2574 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2575 	if (sigsetsize != sizeof(sigset_t))
2576 		return -EINVAL;
2577 
2578 	if (copy_from_user(&newset, unewset, sizeof(newset)))
2579 		return -EFAULT;
2580 	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2581 
2582 	spin_lock_irq(&current->sighand->siglock);
2583 	current->saved_sigmask = current->blocked;
2584 	current->blocked = newset;
2585 	recalc_sigpending();
2586 	spin_unlock_irq(&current->sighand->siglock);
2587 
2588 	current->state = TASK_INTERRUPTIBLE;
2589 	schedule();
2590 	set_restore_sigmask();
2591 	return -ERESTARTNOHAND;
2592 }
2593 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2594 
2595 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2596 {
2597 	return NULL;
2598 }
2599 
2600 void __init signals_init(void)
2601 {
2602 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2603 }
2604