xref: /openbmc/linux/kernel/signal.c (revision 76aac0e9a17742e60d408be1a706e9aaad370891)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/tracehook.h>
26 #include <linux/capability.h>
27 #include <linux/freezer.h>
28 #include <linux/pid_namespace.h>
29 #include <linux/nsproxy.h>
30 #include <trace/sched.h>
31 
32 #include <asm/param.h>
33 #include <asm/uaccess.h>
34 #include <asm/unistd.h>
35 #include <asm/siginfo.h>
36 #include "audit.h"	/* audit_signal_info() */
37 
38 /*
39  * SLAB caches for signal bits.
40  */
41 
42 static struct kmem_cache *sigqueue_cachep;
43 
44 static void __user *sig_handler(struct task_struct *t, int sig)
45 {
46 	return t->sighand->action[sig - 1].sa.sa_handler;
47 }
48 
49 static int sig_handler_ignored(void __user *handler, int sig)
50 {
51 	/* Is it explicitly or implicitly ignored? */
52 	return handler == SIG_IGN ||
53 		(handler == SIG_DFL && sig_kernel_ignore(sig));
54 }
55 
56 static int sig_ignored(struct task_struct *t, int sig)
57 {
58 	void __user *handler;
59 
60 	/*
61 	 * Blocked signals are never ignored, since the
62 	 * signal handler may change by the time it is
63 	 * unblocked.
64 	 */
65 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
66 		return 0;
67 
68 	handler = sig_handler(t, sig);
69 	if (!sig_handler_ignored(handler, sig))
70 		return 0;
71 
72 	/*
73 	 * Tracers may want to know about even ignored signals.
74 	 */
75 	return !tracehook_consider_ignored_signal(t, sig, handler);
76 }
77 
78 /*
79  * Re-calculate pending state from the set of locally pending
80  * signals, globally pending signals, and blocked signals.
81  */
82 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
83 {
84 	unsigned long ready;
85 	long i;
86 
87 	switch (_NSIG_WORDS) {
88 	default:
89 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
90 			ready |= signal->sig[i] &~ blocked->sig[i];
91 		break;
92 
93 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
94 		ready |= signal->sig[2] &~ blocked->sig[2];
95 		ready |= signal->sig[1] &~ blocked->sig[1];
96 		ready |= signal->sig[0] &~ blocked->sig[0];
97 		break;
98 
99 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
100 		ready |= signal->sig[0] &~ blocked->sig[0];
101 		break;
102 
103 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
104 	}
105 	return ready !=	0;
106 }
107 
108 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
109 
110 static int recalc_sigpending_tsk(struct task_struct *t)
111 {
112 	if (t->signal->group_stop_count > 0 ||
113 	    PENDING(&t->pending, &t->blocked) ||
114 	    PENDING(&t->signal->shared_pending, &t->blocked)) {
115 		set_tsk_thread_flag(t, TIF_SIGPENDING);
116 		return 1;
117 	}
118 	/*
119 	 * We must never clear the flag in another thread, or in current
120 	 * when it's possible the current syscall is returning -ERESTART*.
121 	 * So we don't clear it here, and only callers who know they should do.
122 	 */
123 	return 0;
124 }
125 
126 /*
127  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
128  * This is superfluous when called on current, the wakeup is a harmless no-op.
129  */
130 void recalc_sigpending_and_wake(struct task_struct *t)
131 {
132 	if (recalc_sigpending_tsk(t))
133 		signal_wake_up(t, 0);
134 }
135 
136 void recalc_sigpending(void)
137 {
138 	if (unlikely(tracehook_force_sigpending()))
139 		set_thread_flag(TIF_SIGPENDING);
140 	else if (!recalc_sigpending_tsk(current) && !freezing(current))
141 		clear_thread_flag(TIF_SIGPENDING);
142 
143 }
144 
145 /* Given the mask, find the first available signal that should be serviced. */
146 
147 int next_signal(struct sigpending *pending, sigset_t *mask)
148 {
149 	unsigned long i, *s, *m, x;
150 	int sig = 0;
151 
152 	s = pending->signal.sig;
153 	m = mask->sig;
154 	switch (_NSIG_WORDS) {
155 	default:
156 		for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
157 			if ((x = *s &~ *m) != 0) {
158 				sig = ffz(~x) + i*_NSIG_BPW + 1;
159 				break;
160 			}
161 		break;
162 
163 	case 2: if ((x = s[0] &~ m[0]) != 0)
164 			sig = 1;
165 		else if ((x = s[1] &~ m[1]) != 0)
166 			sig = _NSIG_BPW + 1;
167 		else
168 			break;
169 		sig += ffz(~x);
170 		break;
171 
172 	case 1: if ((x = *s &~ *m) != 0)
173 			sig = ffz(~x) + 1;
174 		break;
175 	}
176 
177 	return sig;
178 }
179 
180 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
181 					 int override_rlimit)
182 {
183 	struct sigqueue *q = NULL;
184 	struct user_struct *user;
185 
186 	/*
187 	 * In order to avoid problems with "switch_user()", we want to make
188 	 * sure that the compiler doesn't re-load "t->user"
189 	 */
190 	user = t->user;
191 	barrier();
192 	atomic_inc(&user->sigpending);
193 	if (override_rlimit ||
194 	    atomic_read(&user->sigpending) <=
195 			t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
196 		q = kmem_cache_alloc(sigqueue_cachep, flags);
197 	if (unlikely(q == NULL)) {
198 		atomic_dec(&user->sigpending);
199 	} else {
200 		INIT_LIST_HEAD(&q->list);
201 		q->flags = 0;
202 		q->user = get_uid(user);
203 	}
204 	return(q);
205 }
206 
207 static void __sigqueue_free(struct sigqueue *q)
208 {
209 	if (q->flags & SIGQUEUE_PREALLOC)
210 		return;
211 	atomic_dec(&q->user->sigpending);
212 	free_uid(q->user);
213 	kmem_cache_free(sigqueue_cachep, q);
214 }
215 
216 void flush_sigqueue(struct sigpending *queue)
217 {
218 	struct sigqueue *q;
219 
220 	sigemptyset(&queue->signal);
221 	while (!list_empty(&queue->list)) {
222 		q = list_entry(queue->list.next, struct sigqueue , list);
223 		list_del_init(&q->list);
224 		__sigqueue_free(q);
225 	}
226 }
227 
228 /*
229  * Flush all pending signals for a task.
230  */
231 void flush_signals(struct task_struct *t)
232 {
233 	unsigned long flags;
234 
235 	spin_lock_irqsave(&t->sighand->siglock, flags);
236 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
237 	flush_sigqueue(&t->pending);
238 	flush_sigqueue(&t->signal->shared_pending);
239 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
240 }
241 
242 static void __flush_itimer_signals(struct sigpending *pending)
243 {
244 	sigset_t signal, retain;
245 	struct sigqueue *q, *n;
246 
247 	signal = pending->signal;
248 	sigemptyset(&retain);
249 
250 	list_for_each_entry_safe(q, n, &pending->list, list) {
251 		int sig = q->info.si_signo;
252 
253 		if (likely(q->info.si_code != SI_TIMER)) {
254 			sigaddset(&retain, sig);
255 		} else {
256 			sigdelset(&signal, sig);
257 			list_del_init(&q->list);
258 			__sigqueue_free(q);
259 		}
260 	}
261 
262 	sigorsets(&pending->signal, &signal, &retain);
263 }
264 
265 void flush_itimer_signals(void)
266 {
267 	struct task_struct *tsk = current;
268 	unsigned long flags;
269 
270 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
271 	__flush_itimer_signals(&tsk->pending);
272 	__flush_itimer_signals(&tsk->signal->shared_pending);
273 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
274 }
275 
276 void ignore_signals(struct task_struct *t)
277 {
278 	int i;
279 
280 	for (i = 0; i < _NSIG; ++i)
281 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
282 
283 	flush_signals(t);
284 }
285 
286 /*
287  * Flush all handlers for a task.
288  */
289 
290 void
291 flush_signal_handlers(struct task_struct *t, int force_default)
292 {
293 	int i;
294 	struct k_sigaction *ka = &t->sighand->action[0];
295 	for (i = _NSIG ; i != 0 ; i--) {
296 		if (force_default || ka->sa.sa_handler != SIG_IGN)
297 			ka->sa.sa_handler = SIG_DFL;
298 		ka->sa.sa_flags = 0;
299 		sigemptyset(&ka->sa.sa_mask);
300 		ka++;
301 	}
302 }
303 
304 int unhandled_signal(struct task_struct *tsk, int sig)
305 {
306 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
307 	if (is_global_init(tsk))
308 		return 1;
309 	if (handler != SIG_IGN && handler != SIG_DFL)
310 		return 0;
311 	return !tracehook_consider_fatal_signal(tsk, sig, handler);
312 }
313 
314 
315 /* Notify the system that a driver wants to block all signals for this
316  * process, and wants to be notified if any signals at all were to be
317  * sent/acted upon.  If the notifier routine returns non-zero, then the
318  * signal will be acted upon after all.  If the notifier routine returns 0,
319  * then then signal will be blocked.  Only one block per process is
320  * allowed.  priv is a pointer to private data that the notifier routine
321  * can use to determine if the signal should be blocked or not.  */
322 
323 void
324 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
325 {
326 	unsigned long flags;
327 
328 	spin_lock_irqsave(&current->sighand->siglock, flags);
329 	current->notifier_mask = mask;
330 	current->notifier_data = priv;
331 	current->notifier = notifier;
332 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
333 }
334 
335 /* Notify the system that blocking has ended. */
336 
337 void
338 unblock_all_signals(void)
339 {
340 	unsigned long flags;
341 
342 	spin_lock_irqsave(&current->sighand->siglock, flags);
343 	current->notifier = NULL;
344 	current->notifier_data = NULL;
345 	recalc_sigpending();
346 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
347 }
348 
349 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
350 {
351 	struct sigqueue *q, *first = NULL;
352 
353 	/*
354 	 * Collect the siginfo appropriate to this signal.  Check if
355 	 * there is another siginfo for the same signal.
356 	*/
357 	list_for_each_entry(q, &list->list, list) {
358 		if (q->info.si_signo == sig) {
359 			if (first)
360 				goto still_pending;
361 			first = q;
362 		}
363 	}
364 
365 	sigdelset(&list->signal, sig);
366 
367 	if (first) {
368 still_pending:
369 		list_del_init(&first->list);
370 		copy_siginfo(info, &first->info);
371 		__sigqueue_free(first);
372 	} else {
373 		/* Ok, it wasn't in the queue.  This must be
374 		   a fast-pathed signal or we must have been
375 		   out of queue space.  So zero out the info.
376 		 */
377 		info->si_signo = sig;
378 		info->si_errno = 0;
379 		info->si_code = 0;
380 		info->si_pid = 0;
381 		info->si_uid = 0;
382 	}
383 }
384 
385 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
386 			siginfo_t *info)
387 {
388 	int sig = next_signal(pending, mask);
389 
390 	if (sig) {
391 		if (current->notifier) {
392 			if (sigismember(current->notifier_mask, sig)) {
393 				if (!(current->notifier)(current->notifier_data)) {
394 					clear_thread_flag(TIF_SIGPENDING);
395 					return 0;
396 				}
397 			}
398 		}
399 
400 		collect_signal(sig, pending, info);
401 	}
402 
403 	return sig;
404 }
405 
406 /*
407  * Dequeue a signal and return the element to the caller, which is
408  * expected to free it.
409  *
410  * All callers have to hold the siglock.
411  */
412 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
413 {
414 	int signr;
415 
416 	/* We only dequeue private signals from ourselves, we don't let
417 	 * signalfd steal them
418 	 */
419 	signr = __dequeue_signal(&tsk->pending, mask, info);
420 	if (!signr) {
421 		signr = __dequeue_signal(&tsk->signal->shared_pending,
422 					 mask, info);
423 		/*
424 		 * itimer signal ?
425 		 *
426 		 * itimers are process shared and we restart periodic
427 		 * itimers in the signal delivery path to prevent DoS
428 		 * attacks in the high resolution timer case. This is
429 		 * compliant with the old way of self restarting
430 		 * itimers, as the SIGALRM is a legacy signal and only
431 		 * queued once. Changing the restart behaviour to
432 		 * restart the timer in the signal dequeue path is
433 		 * reducing the timer noise on heavy loaded !highres
434 		 * systems too.
435 		 */
436 		if (unlikely(signr == SIGALRM)) {
437 			struct hrtimer *tmr = &tsk->signal->real_timer;
438 
439 			if (!hrtimer_is_queued(tmr) &&
440 			    tsk->signal->it_real_incr.tv64 != 0) {
441 				hrtimer_forward(tmr, tmr->base->get_time(),
442 						tsk->signal->it_real_incr);
443 				hrtimer_restart(tmr);
444 			}
445 		}
446 	}
447 
448 	recalc_sigpending();
449 	if (!signr)
450 		return 0;
451 
452 	if (unlikely(sig_kernel_stop(signr))) {
453 		/*
454 		 * Set a marker that we have dequeued a stop signal.  Our
455 		 * caller might release the siglock and then the pending
456 		 * stop signal it is about to process is no longer in the
457 		 * pending bitmasks, but must still be cleared by a SIGCONT
458 		 * (and overruled by a SIGKILL).  So those cases clear this
459 		 * shared flag after we've set it.  Note that this flag may
460 		 * remain set after the signal we return is ignored or
461 		 * handled.  That doesn't matter because its only purpose
462 		 * is to alert stop-signal processing code when another
463 		 * processor has come along and cleared the flag.
464 		 */
465 		tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
466 	}
467 	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
468 		/*
469 		 * Release the siglock to ensure proper locking order
470 		 * of timer locks outside of siglocks.  Note, we leave
471 		 * irqs disabled here, since the posix-timers code is
472 		 * about to disable them again anyway.
473 		 */
474 		spin_unlock(&tsk->sighand->siglock);
475 		do_schedule_next_timer(info);
476 		spin_lock(&tsk->sighand->siglock);
477 	}
478 	return signr;
479 }
480 
481 /*
482  * Tell a process that it has a new active signal..
483  *
484  * NOTE! we rely on the previous spin_lock to
485  * lock interrupts for us! We can only be called with
486  * "siglock" held, and the local interrupt must
487  * have been disabled when that got acquired!
488  *
489  * No need to set need_resched since signal event passing
490  * goes through ->blocked
491  */
492 void signal_wake_up(struct task_struct *t, int resume)
493 {
494 	unsigned int mask;
495 
496 	set_tsk_thread_flag(t, TIF_SIGPENDING);
497 
498 	/*
499 	 * For SIGKILL, we want to wake it up in the stopped/traced/killable
500 	 * case. We don't check t->state here because there is a race with it
501 	 * executing another processor and just now entering stopped state.
502 	 * By using wake_up_state, we ensure the process will wake up and
503 	 * handle its death signal.
504 	 */
505 	mask = TASK_INTERRUPTIBLE;
506 	if (resume)
507 		mask |= TASK_WAKEKILL;
508 	if (!wake_up_state(t, mask))
509 		kick_process(t);
510 }
511 
512 /*
513  * Remove signals in mask from the pending set and queue.
514  * Returns 1 if any signals were found.
515  *
516  * All callers must be holding the siglock.
517  *
518  * This version takes a sigset mask and looks at all signals,
519  * not just those in the first mask word.
520  */
521 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
522 {
523 	struct sigqueue *q, *n;
524 	sigset_t m;
525 
526 	sigandsets(&m, mask, &s->signal);
527 	if (sigisemptyset(&m))
528 		return 0;
529 
530 	signandsets(&s->signal, &s->signal, mask);
531 	list_for_each_entry_safe(q, n, &s->list, list) {
532 		if (sigismember(mask, q->info.si_signo)) {
533 			list_del_init(&q->list);
534 			__sigqueue_free(q);
535 		}
536 	}
537 	return 1;
538 }
539 /*
540  * Remove signals in mask from the pending set and queue.
541  * Returns 1 if any signals were found.
542  *
543  * All callers must be holding the siglock.
544  */
545 static int rm_from_queue(unsigned long mask, struct sigpending *s)
546 {
547 	struct sigqueue *q, *n;
548 
549 	if (!sigtestsetmask(&s->signal, mask))
550 		return 0;
551 
552 	sigdelsetmask(&s->signal, mask);
553 	list_for_each_entry_safe(q, n, &s->list, list) {
554 		if (q->info.si_signo < SIGRTMIN &&
555 		    (mask & sigmask(q->info.si_signo))) {
556 			list_del_init(&q->list);
557 			__sigqueue_free(q);
558 		}
559 	}
560 	return 1;
561 }
562 
563 /*
564  * Bad permissions for sending the signal
565  */
566 static int check_kill_permission(int sig, struct siginfo *info,
567 				 struct task_struct *t)
568 {
569 	struct pid *sid;
570 	uid_t uid, euid;
571 	int error;
572 
573 	if (!valid_signal(sig))
574 		return -EINVAL;
575 
576 	if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
577 		return 0;
578 
579 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
580 	if (error)
581 		return error;
582 
583 	uid = current_uid();
584 	euid = current_euid();
585 	if ((euid ^ t->suid) && (euid ^ t->uid) &&
586 	    (uid  ^ t->suid) && (uid  ^ t->uid) &&
587 	    !capable(CAP_KILL)) {
588 		switch (sig) {
589 		case SIGCONT:
590 			sid = task_session(t);
591 			/*
592 			 * We don't return the error if sid == NULL. The
593 			 * task was unhashed, the caller must notice this.
594 			 */
595 			if (!sid || sid == task_session(current))
596 				break;
597 		default:
598 			return -EPERM;
599 		}
600 	}
601 
602 	return security_task_kill(t, info, sig, 0);
603 }
604 
605 /*
606  * Handle magic process-wide effects of stop/continue signals. Unlike
607  * the signal actions, these happen immediately at signal-generation
608  * time regardless of blocking, ignoring, or handling.  This does the
609  * actual continuing for SIGCONT, but not the actual stopping for stop
610  * signals. The process stop is done as a signal action for SIG_DFL.
611  *
612  * Returns true if the signal should be actually delivered, otherwise
613  * it should be dropped.
614  */
615 static int prepare_signal(int sig, struct task_struct *p)
616 {
617 	struct signal_struct *signal = p->signal;
618 	struct task_struct *t;
619 
620 	if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
621 		/*
622 		 * The process is in the middle of dying, nothing to do.
623 		 */
624 	} else if (sig_kernel_stop(sig)) {
625 		/*
626 		 * This is a stop signal.  Remove SIGCONT from all queues.
627 		 */
628 		rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
629 		t = p;
630 		do {
631 			rm_from_queue(sigmask(SIGCONT), &t->pending);
632 		} while_each_thread(p, t);
633 	} else if (sig == SIGCONT) {
634 		unsigned int why;
635 		/*
636 		 * Remove all stop signals from all queues,
637 		 * and wake all threads.
638 		 */
639 		rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
640 		t = p;
641 		do {
642 			unsigned int state;
643 			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
644 			/*
645 			 * If there is a handler for SIGCONT, we must make
646 			 * sure that no thread returns to user mode before
647 			 * we post the signal, in case it was the only
648 			 * thread eligible to run the signal handler--then
649 			 * it must not do anything between resuming and
650 			 * running the handler.  With the TIF_SIGPENDING
651 			 * flag set, the thread will pause and acquire the
652 			 * siglock that we hold now and until we've queued
653 			 * the pending signal.
654 			 *
655 			 * Wake up the stopped thread _after_ setting
656 			 * TIF_SIGPENDING
657 			 */
658 			state = __TASK_STOPPED;
659 			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
660 				set_tsk_thread_flag(t, TIF_SIGPENDING);
661 				state |= TASK_INTERRUPTIBLE;
662 			}
663 			wake_up_state(t, state);
664 		} while_each_thread(p, t);
665 
666 		/*
667 		 * Notify the parent with CLD_CONTINUED if we were stopped.
668 		 *
669 		 * If we were in the middle of a group stop, we pretend it
670 		 * was already finished, and then continued. Since SIGCHLD
671 		 * doesn't queue we report only CLD_STOPPED, as if the next
672 		 * CLD_CONTINUED was dropped.
673 		 */
674 		why = 0;
675 		if (signal->flags & SIGNAL_STOP_STOPPED)
676 			why |= SIGNAL_CLD_CONTINUED;
677 		else if (signal->group_stop_count)
678 			why |= SIGNAL_CLD_STOPPED;
679 
680 		if (why) {
681 			/*
682 			 * The first thread which returns from finish_stop()
683 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
684 			 * notify its parent. See get_signal_to_deliver().
685 			 */
686 			signal->flags = why | SIGNAL_STOP_CONTINUED;
687 			signal->group_stop_count = 0;
688 			signal->group_exit_code = 0;
689 		} else {
690 			/*
691 			 * We are not stopped, but there could be a stop
692 			 * signal in the middle of being processed after
693 			 * being removed from the queue.  Clear that too.
694 			 */
695 			signal->flags &= ~SIGNAL_STOP_DEQUEUED;
696 		}
697 	}
698 
699 	return !sig_ignored(p, sig);
700 }
701 
702 /*
703  * Test if P wants to take SIG.  After we've checked all threads with this,
704  * it's equivalent to finding no threads not blocking SIG.  Any threads not
705  * blocking SIG were ruled out because they are not running and already
706  * have pending signals.  Such threads will dequeue from the shared queue
707  * as soon as they're available, so putting the signal on the shared queue
708  * will be equivalent to sending it to one such thread.
709  */
710 static inline int wants_signal(int sig, struct task_struct *p)
711 {
712 	if (sigismember(&p->blocked, sig))
713 		return 0;
714 	if (p->flags & PF_EXITING)
715 		return 0;
716 	if (sig == SIGKILL)
717 		return 1;
718 	if (task_is_stopped_or_traced(p))
719 		return 0;
720 	return task_curr(p) || !signal_pending(p);
721 }
722 
723 static void complete_signal(int sig, struct task_struct *p, int group)
724 {
725 	struct signal_struct *signal = p->signal;
726 	struct task_struct *t;
727 
728 	/*
729 	 * Now find a thread we can wake up to take the signal off the queue.
730 	 *
731 	 * If the main thread wants the signal, it gets first crack.
732 	 * Probably the least surprising to the average bear.
733 	 */
734 	if (wants_signal(sig, p))
735 		t = p;
736 	else if (!group || thread_group_empty(p))
737 		/*
738 		 * There is just one thread and it does not need to be woken.
739 		 * It will dequeue unblocked signals before it runs again.
740 		 */
741 		return;
742 	else {
743 		/*
744 		 * Otherwise try to find a suitable thread.
745 		 */
746 		t = signal->curr_target;
747 		while (!wants_signal(sig, t)) {
748 			t = next_thread(t);
749 			if (t == signal->curr_target)
750 				/*
751 				 * No thread needs to be woken.
752 				 * Any eligible threads will see
753 				 * the signal in the queue soon.
754 				 */
755 				return;
756 		}
757 		signal->curr_target = t;
758 	}
759 
760 	/*
761 	 * Found a killable thread.  If the signal will be fatal,
762 	 * then start taking the whole group down immediately.
763 	 */
764 	if (sig_fatal(p, sig) &&
765 	    !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
766 	    !sigismember(&t->real_blocked, sig) &&
767 	    (sig == SIGKILL ||
768 	     !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) {
769 		/*
770 		 * This signal will be fatal to the whole group.
771 		 */
772 		if (!sig_kernel_coredump(sig)) {
773 			/*
774 			 * Start a group exit and wake everybody up.
775 			 * This way we don't have other threads
776 			 * running and doing things after a slower
777 			 * thread has the fatal signal pending.
778 			 */
779 			signal->flags = SIGNAL_GROUP_EXIT;
780 			signal->group_exit_code = sig;
781 			signal->group_stop_count = 0;
782 			t = p;
783 			do {
784 				sigaddset(&t->pending.signal, SIGKILL);
785 				signal_wake_up(t, 1);
786 			} while_each_thread(p, t);
787 			return;
788 		}
789 	}
790 
791 	/*
792 	 * The signal is already in the shared-pending queue.
793 	 * Tell the chosen thread to wake up and dequeue it.
794 	 */
795 	signal_wake_up(t, sig == SIGKILL);
796 	return;
797 }
798 
799 static inline int legacy_queue(struct sigpending *signals, int sig)
800 {
801 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
802 }
803 
804 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
805 			int group)
806 {
807 	struct sigpending *pending;
808 	struct sigqueue *q;
809 
810 	trace_sched_signal_send(sig, t);
811 
812 	assert_spin_locked(&t->sighand->siglock);
813 	if (!prepare_signal(sig, t))
814 		return 0;
815 
816 	pending = group ? &t->signal->shared_pending : &t->pending;
817 	/*
818 	 * Short-circuit ignored signals and support queuing
819 	 * exactly one non-rt signal, so that we can get more
820 	 * detailed information about the cause of the signal.
821 	 */
822 	if (legacy_queue(pending, sig))
823 		return 0;
824 	/*
825 	 * fast-pathed signals for kernel-internal things like SIGSTOP
826 	 * or SIGKILL.
827 	 */
828 	if (info == SEND_SIG_FORCED)
829 		goto out_set;
830 
831 	/* Real-time signals must be queued if sent by sigqueue, or
832 	   some other real-time mechanism.  It is implementation
833 	   defined whether kill() does so.  We attempt to do so, on
834 	   the principle of least surprise, but since kill is not
835 	   allowed to fail with EAGAIN when low on memory we just
836 	   make sure at least one signal gets delivered and don't
837 	   pass on the info struct.  */
838 
839 	q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
840 					     (is_si_special(info) ||
841 					      info->si_code >= 0)));
842 	if (q) {
843 		list_add_tail(&q->list, &pending->list);
844 		switch ((unsigned long) info) {
845 		case (unsigned long) SEND_SIG_NOINFO:
846 			q->info.si_signo = sig;
847 			q->info.si_errno = 0;
848 			q->info.si_code = SI_USER;
849 			q->info.si_pid = task_pid_vnr(current);
850 			q->info.si_uid = current_uid();
851 			break;
852 		case (unsigned long) SEND_SIG_PRIV:
853 			q->info.si_signo = sig;
854 			q->info.si_errno = 0;
855 			q->info.si_code = SI_KERNEL;
856 			q->info.si_pid = 0;
857 			q->info.si_uid = 0;
858 			break;
859 		default:
860 			copy_siginfo(&q->info, info);
861 			break;
862 		}
863 	} else if (!is_si_special(info)) {
864 		if (sig >= SIGRTMIN && info->si_code != SI_USER)
865 		/*
866 		 * Queue overflow, abort.  We may abort if the signal was rt
867 		 * and sent by user using something other than kill().
868 		 */
869 			return -EAGAIN;
870 	}
871 
872 out_set:
873 	signalfd_notify(t, sig);
874 	sigaddset(&pending->signal, sig);
875 	complete_signal(sig, t, group);
876 	return 0;
877 }
878 
879 int print_fatal_signals;
880 
881 static void print_fatal_signal(struct pt_regs *regs, int signr)
882 {
883 	printk("%s/%d: potentially unexpected fatal signal %d.\n",
884 		current->comm, task_pid_nr(current), signr);
885 
886 #if defined(__i386__) && !defined(__arch_um__)
887 	printk("code at %08lx: ", regs->ip);
888 	{
889 		int i;
890 		for (i = 0; i < 16; i++) {
891 			unsigned char insn;
892 
893 			__get_user(insn, (unsigned char *)(regs->ip + i));
894 			printk("%02x ", insn);
895 		}
896 	}
897 #endif
898 	printk("\n");
899 	show_regs(regs);
900 }
901 
902 static int __init setup_print_fatal_signals(char *str)
903 {
904 	get_option (&str, &print_fatal_signals);
905 
906 	return 1;
907 }
908 
909 __setup("print-fatal-signals=", setup_print_fatal_signals);
910 
911 int
912 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
913 {
914 	return send_signal(sig, info, p, 1);
915 }
916 
917 static int
918 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
919 {
920 	return send_signal(sig, info, t, 0);
921 }
922 
923 /*
924  * Force a signal that the process can't ignore: if necessary
925  * we unblock the signal and change any SIG_IGN to SIG_DFL.
926  *
927  * Note: If we unblock the signal, we always reset it to SIG_DFL,
928  * since we do not want to have a signal handler that was blocked
929  * be invoked when user space had explicitly blocked it.
930  *
931  * We don't want to have recursive SIGSEGV's etc, for example,
932  * that is why we also clear SIGNAL_UNKILLABLE.
933  */
934 int
935 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
936 {
937 	unsigned long int flags;
938 	int ret, blocked, ignored;
939 	struct k_sigaction *action;
940 
941 	spin_lock_irqsave(&t->sighand->siglock, flags);
942 	action = &t->sighand->action[sig-1];
943 	ignored = action->sa.sa_handler == SIG_IGN;
944 	blocked = sigismember(&t->blocked, sig);
945 	if (blocked || ignored) {
946 		action->sa.sa_handler = SIG_DFL;
947 		if (blocked) {
948 			sigdelset(&t->blocked, sig);
949 			recalc_sigpending_and_wake(t);
950 		}
951 	}
952 	if (action->sa.sa_handler == SIG_DFL)
953 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
954 	ret = specific_send_sig_info(sig, info, t);
955 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
956 
957 	return ret;
958 }
959 
960 void
961 force_sig_specific(int sig, struct task_struct *t)
962 {
963 	force_sig_info(sig, SEND_SIG_FORCED, t);
964 }
965 
966 /*
967  * Nuke all other threads in the group.
968  */
969 void zap_other_threads(struct task_struct *p)
970 {
971 	struct task_struct *t;
972 
973 	p->signal->group_stop_count = 0;
974 
975 	for (t = next_thread(p); t != p; t = next_thread(t)) {
976 		/*
977 		 * Don't bother with already dead threads
978 		 */
979 		if (t->exit_state)
980 			continue;
981 
982 		/* SIGKILL will be handled before any pending SIGSTOP */
983 		sigaddset(&t->pending.signal, SIGKILL);
984 		signal_wake_up(t, 1);
985 	}
986 }
987 
988 int __fatal_signal_pending(struct task_struct *tsk)
989 {
990 	return sigismember(&tsk->pending.signal, SIGKILL);
991 }
992 EXPORT_SYMBOL(__fatal_signal_pending);
993 
994 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
995 {
996 	struct sighand_struct *sighand;
997 
998 	rcu_read_lock();
999 	for (;;) {
1000 		sighand = rcu_dereference(tsk->sighand);
1001 		if (unlikely(sighand == NULL))
1002 			break;
1003 
1004 		spin_lock_irqsave(&sighand->siglock, *flags);
1005 		if (likely(sighand == tsk->sighand))
1006 			break;
1007 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1008 	}
1009 	rcu_read_unlock();
1010 
1011 	return sighand;
1012 }
1013 
1014 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1015 {
1016 	unsigned long flags;
1017 	int ret;
1018 
1019 	ret = check_kill_permission(sig, info, p);
1020 
1021 	if (!ret && sig) {
1022 		ret = -ESRCH;
1023 		if (lock_task_sighand(p, &flags)) {
1024 			ret = __group_send_sig_info(sig, info, p);
1025 			unlock_task_sighand(p, &flags);
1026 		}
1027 	}
1028 
1029 	return ret;
1030 }
1031 
1032 /*
1033  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1034  * control characters do (^C, ^Z etc)
1035  */
1036 
1037 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1038 {
1039 	struct task_struct *p = NULL;
1040 	int retval, success;
1041 
1042 	success = 0;
1043 	retval = -ESRCH;
1044 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1045 		int err = group_send_sig_info(sig, info, p);
1046 		success |= !err;
1047 		retval = err;
1048 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1049 	return success ? 0 : retval;
1050 }
1051 
1052 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1053 {
1054 	int error = -ESRCH;
1055 	struct task_struct *p;
1056 
1057 	rcu_read_lock();
1058 retry:
1059 	p = pid_task(pid, PIDTYPE_PID);
1060 	if (p) {
1061 		error = group_send_sig_info(sig, info, p);
1062 		if (unlikely(error == -ESRCH))
1063 			/*
1064 			 * The task was unhashed in between, try again.
1065 			 * If it is dead, pid_task() will return NULL,
1066 			 * if we race with de_thread() it will find the
1067 			 * new leader.
1068 			 */
1069 			goto retry;
1070 	}
1071 	rcu_read_unlock();
1072 
1073 	return error;
1074 }
1075 
1076 int
1077 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1078 {
1079 	int error;
1080 	rcu_read_lock();
1081 	error = kill_pid_info(sig, info, find_vpid(pid));
1082 	rcu_read_unlock();
1083 	return error;
1084 }
1085 
1086 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1087 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1088 		      uid_t uid, uid_t euid, u32 secid)
1089 {
1090 	int ret = -EINVAL;
1091 	struct task_struct *p;
1092 
1093 	if (!valid_signal(sig))
1094 		return ret;
1095 
1096 	read_lock(&tasklist_lock);
1097 	p = pid_task(pid, PIDTYPE_PID);
1098 	if (!p) {
1099 		ret = -ESRCH;
1100 		goto out_unlock;
1101 	}
1102 	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1103 	    && (euid != p->suid) && (euid != p->uid)
1104 	    && (uid != p->suid) && (uid != p->uid)) {
1105 		ret = -EPERM;
1106 		goto out_unlock;
1107 	}
1108 	ret = security_task_kill(p, info, sig, secid);
1109 	if (ret)
1110 		goto out_unlock;
1111 	if (sig && p->sighand) {
1112 		unsigned long flags;
1113 		spin_lock_irqsave(&p->sighand->siglock, flags);
1114 		ret = __group_send_sig_info(sig, info, p);
1115 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1116 	}
1117 out_unlock:
1118 	read_unlock(&tasklist_lock);
1119 	return ret;
1120 }
1121 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1122 
1123 /*
1124  * kill_something_info() interprets pid in interesting ways just like kill(2).
1125  *
1126  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1127  * is probably wrong.  Should make it like BSD or SYSV.
1128  */
1129 
1130 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1131 {
1132 	int ret;
1133 
1134 	if (pid > 0) {
1135 		rcu_read_lock();
1136 		ret = kill_pid_info(sig, info, find_vpid(pid));
1137 		rcu_read_unlock();
1138 		return ret;
1139 	}
1140 
1141 	read_lock(&tasklist_lock);
1142 	if (pid != -1) {
1143 		ret = __kill_pgrp_info(sig, info,
1144 				pid ? find_vpid(-pid) : task_pgrp(current));
1145 	} else {
1146 		int retval = 0, count = 0;
1147 		struct task_struct * p;
1148 
1149 		for_each_process(p) {
1150 			if (task_pid_vnr(p) > 1 &&
1151 					!same_thread_group(p, current)) {
1152 				int err = group_send_sig_info(sig, info, p);
1153 				++count;
1154 				if (err != -EPERM)
1155 					retval = err;
1156 			}
1157 		}
1158 		ret = count ? retval : -ESRCH;
1159 	}
1160 	read_unlock(&tasklist_lock);
1161 
1162 	return ret;
1163 }
1164 
1165 /*
1166  * These are for backward compatibility with the rest of the kernel source.
1167  */
1168 
1169 /*
1170  * The caller must ensure the task can't exit.
1171  */
1172 int
1173 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1174 {
1175 	int ret;
1176 	unsigned long flags;
1177 
1178 	/*
1179 	 * Make sure legacy kernel users don't send in bad values
1180 	 * (normal paths check this in check_kill_permission).
1181 	 */
1182 	if (!valid_signal(sig))
1183 		return -EINVAL;
1184 
1185 	spin_lock_irqsave(&p->sighand->siglock, flags);
1186 	ret = specific_send_sig_info(sig, info, p);
1187 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1188 	return ret;
1189 }
1190 
1191 #define __si_special(priv) \
1192 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1193 
1194 int
1195 send_sig(int sig, struct task_struct *p, int priv)
1196 {
1197 	return send_sig_info(sig, __si_special(priv), p);
1198 }
1199 
1200 void
1201 force_sig(int sig, struct task_struct *p)
1202 {
1203 	force_sig_info(sig, SEND_SIG_PRIV, p);
1204 }
1205 
1206 /*
1207  * When things go south during signal handling, we
1208  * will force a SIGSEGV. And if the signal that caused
1209  * the problem was already a SIGSEGV, we'll want to
1210  * make sure we don't even try to deliver the signal..
1211  */
1212 int
1213 force_sigsegv(int sig, struct task_struct *p)
1214 {
1215 	if (sig == SIGSEGV) {
1216 		unsigned long flags;
1217 		spin_lock_irqsave(&p->sighand->siglock, flags);
1218 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1219 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1220 	}
1221 	force_sig(SIGSEGV, p);
1222 	return 0;
1223 }
1224 
1225 int kill_pgrp(struct pid *pid, int sig, int priv)
1226 {
1227 	int ret;
1228 
1229 	read_lock(&tasklist_lock);
1230 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1231 	read_unlock(&tasklist_lock);
1232 
1233 	return ret;
1234 }
1235 EXPORT_SYMBOL(kill_pgrp);
1236 
1237 int kill_pid(struct pid *pid, int sig, int priv)
1238 {
1239 	return kill_pid_info(sig, __si_special(priv), pid);
1240 }
1241 EXPORT_SYMBOL(kill_pid);
1242 
1243 /*
1244  * These functions support sending signals using preallocated sigqueue
1245  * structures.  This is needed "because realtime applications cannot
1246  * afford to lose notifications of asynchronous events, like timer
1247  * expirations or I/O completions".  In the case of Posix Timers
1248  * we allocate the sigqueue structure from the timer_create.  If this
1249  * allocation fails we are able to report the failure to the application
1250  * with an EAGAIN error.
1251  */
1252 
1253 struct sigqueue *sigqueue_alloc(void)
1254 {
1255 	struct sigqueue *q;
1256 
1257 	if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1258 		q->flags |= SIGQUEUE_PREALLOC;
1259 	return(q);
1260 }
1261 
1262 void sigqueue_free(struct sigqueue *q)
1263 {
1264 	unsigned long flags;
1265 	spinlock_t *lock = &current->sighand->siglock;
1266 
1267 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1268 	/*
1269 	 * We must hold ->siglock while testing q->list
1270 	 * to serialize with collect_signal() or with
1271 	 * __exit_signal()->flush_sigqueue().
1272 	 */
1273 	spin_lock_irqsave(lock, flags);
1274 	q->flags &= ~SIGQUEUE_PREALLOC;
1275 	/*
1276 	 * If it is queued it will be freed when dequeued,
1277 	 * like the "regular" sigqueue.
1278 	 */
1279 	if (!list_empty(&q->list))
1280 		q = NULL;
1281 	spin_unlock_irqrestore(lock, flags);
1282 
1283 	if (q)
1284 		__sigqueue_free(q);
1285 }
1286 
1287 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1288 {
1289 	int sig = q->info.si_signo;
1290 	struct sigpending *pending;
1291 	unsigned long flags;
1292 	int ret;
1293 
1294 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1295 
1296 	ret = -1;
1297 	if (!likely(lock_task_sighand(t, &flags)))
1298 		goto ret;
1299 
1300 	ret = 1; /* the signal is ignored */
1301 	if (!prepare_signal(sig, t))
1302 		goto out;
1303 
1304 	ret = 0;
1305 	if (unlikely(!list_empty(&q->list))) {
1306 		/*
1307 		 * If an SI_TIMER entry is already queue just increment
1308 		 * the overrun count.
1309 		 */
1310 		BUG_ON(q->info.si_code != SI_TIMER);
1311 		q->info.si_overrun++;
1312 		goto out;
1313 	}
1314 	q->info.si_overrun = 0;
1315 
1316 	signalfd_notify(t, sig);
1317 	pending = group ? &t->signal->shared_pending : &t->pending;
1318 	list_add_tail(&q->list, &pending->list);
1319 	sigaddset(&pending->signal, sig);
1320 	complete_signal(sig, t, group);
1321 out:
1322 	unlock_task_sighand(t, &flags);
1323 ret:
1324 	return ret;
1325 }
1326 
1327 /*
1328  * Wake up any threads in the parent blocked in wait* syscalls.
1329  */
1330 static inline void __wake_up_parent(struct task_struct *p,
1331 				    struct task_struct *parent)
1332 {
1333 	wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1334 }
1335 
1336 /*
1337  * Let a parent know about the death of a child.
1338  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1339  *
1340  * Returns -1 if our parent ignored us and so we've switched to
1341  * self-reaping, or else @sig.
1342  */
1343 int do_notify_parent(struct task_struct *tsk, int sig)
1344 {
1345 	struct siginfo info;
1346 	unsigned long flags;
1347 	struct sighand_struct *psig;
1348 	struct task_cputime cputime;
1349 	int ret = sig;
1350 
1351 	BUG_ON(sig == -1);
1352 
1353  	/* do_notify_parent_cldstop should have been called instead.  */
1354  	BUG_ON(task_is_stopped_or_traced(tsk));
1355 
1356 	BUG_ON(!tsk->ptrace &&
1357 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1358 
1359 	info.si_signo = sig;
1360 	info.si_errno = 0;
1361 	/*
1362 	 * we are under tasklist_lock here so our parent is tied to
1363 	 * us and cannot exit and release its namespace.
1364 	 *
1365 	 * the only it can is to switch its nsproxy with sys_unshare,
1366 	 * bu uncharing pid namespaces is not allowed, so we'll always
1367 	 * see relevant namespace
1368 	 *
1369 	 * write_lock() currently calls preempt_disable() which is the
1370 	 * same as rcu_read_lock(), but according to Oleg, this is not
1371 	 * correct to rely on this
1372 	 */
1373 	rcu_read_lock();
1374 	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1375 	rcu_read_unlock();
1376 
1377 	info.si_uid = tsk->uid;
1378 
1379 	thread_group_cputime(tsk, &cputime);
1380 	info.si_utime = cputime_to_jiffies(cputime.utime);
1381 	info.si_stime = cputime_to_jiffies(cputime.stime);
1382 
1383 	info.si_status = tsk->exit_code & 0x7f;
1384 	if (tsk->exit_code & 0x80)
1385 		info.si_code = CLD_DUMPED;
1386 	else if (tsk->exit_code & 0x7f)
1387 		info.si_code = CLD_KILLED;
1388 	else {
1389 		info.si_code = CLD_EXITED;
1390 		info.si_status = tsk->exit_code >> 8;
1391 	}
1392 
1393 	psig = tsk->parent->sighand;
1394 	spin_lock_irqsave(&psig->siglock, flags);
1395 	if (!tsk->ptrace && sig == SIGCHLD &&
1396 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1397 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1398 		/*
1399 		 * We are exiting and our parent doesn't care.  POSIX.1
1400 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1401 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1402 		 * automatically and not left for our parent's wait4 call.
1403 		 * Rather than having the parent do it as a magic kind of
1404 		 * signal handler, we just set this to tell do_exit that we
1405 		 * can be cleaned up without becoming a zombie.  Note that
1406 		 * we still call __wake_up_parent in this case, because a
1407 		 * blocked sys_wait4 might now return -ECHILD.
1408 		 *
1409 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1410 		 * is implementation-defined: we do (if you don't want
1411 		 * it, just use SIG_IGN instead).
1412 		 */
1413 		ret = tsk->exit_signal = -1;
1414 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1415 			sig = -1;
1416 	}
1417 	if (valid_signal(sig) && sig > 0)
1418 		__group_send_sig_info(sig, &info, tsk->parent);
1419 	__wake_up_parent(tsk, tsk->parent);
1420 	spin_unlock_irqrestore(&psig->siglock, flags);
1421 
1422 	return ret;
1423 }
1424 
1425 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1426 {
1427 	struct siginfo info;
1428 	unsigned long flags;
1429 	struct task_struct *parent;
1430 	struct sighand_struct *sighand;
1431 
1432 	if (tsk->ptrace & PT_PTRACED)
1433 		parent = tsk->parent;
1434 	else {
1435 		tsk = tsk->group_leader;
1436 		parent = tsk->real_parent;
1437 	}
1438 
1439 	info.si_signo = SIGCHLD;
1440 	info.si_errno = 0;
1441 	/*
1442 	 * see comment in do_notify_parent() abot the following 3 lines
1443 	 */
1444 	rcu_read_lock();
1445 	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1446 	rcu_read_unlock();
1447 
1448 	info.si_uid = tsk->uid;
1449 
1450 	info.si_utime = cputime_to_clock_t(tsk->utime);
1451 	info.si_stime = cputime_to_clock_t(tsk->stime);
1452 
1453  	info.si_code = why;
1454  	switch (why) {
1455  	case CLD_CONTINUED:
1456  		info.si_status = SIGCONT;
1457  		break;
1458  	case CLD_STOPPED:
1459  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1460  		break;
1461  	case CLD_TRAPPED:
1462  		info.si_status = tsk->exit_code & 0x7f;
1463  		break;
1464  	default:
1465  		BUG();
1466  	}
1467 
1468 	sighand = parent->sighand;
1469 	spin_lock_irqsave(&sighand->siglock, flags);
1470 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1471 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1472 		__group_send_sig_info(SIGCHLD, &info, parent);
1473 	/*
1474 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1475 	 */
1476 	__wake_up_parent(tsk, parent);
1477 	spin_unlock_irqrestore(&sighand->siglock, flags);
1478 }
1479 
1480 static inline int may_ptrace_stop(void)
1481 {
1482 	if (!likely(current->ptrace & PT_PTRACED))
1483 		return 0;
1484 	/*
1485 	 * Are we in the middle of do_coredump?
1486 	 * If so and our tracer is also part of the coredump stopping
1487 	 * is a deadlock situation, and pointless because our tracer
1488 	 * is dead so don't allow us to stop.
1489 	 * If SIGKILL was already sent before the caller unlocked
1490 	 * ->siglock we must see ->core_state != NULL. Otherwise it
1491 	 * is safe to enter schedule().
1492 	 */
1493 	if (unlikely(current->mm->core_state) &&
1494 	    unlikely(current->mm == current->parent->mm))
1495 		return 0;
1496 
1497 	return 1;
1498 }
1499 
1500 /*
1501  * Return nonzero if there is a SIGKILL that should be waking us up.
1502  * Called with the siglock held.
1503  */
1504 static int sigkill_pending(struct task_struct *tsk)
1505 {
1506 	return	sigismember(&tsk->pending.signal, SIGKILL) ||
1507 		sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1508 }
1509 
1510 /*
1511  * This must be called with current->sighand->siglock held.
1512  *
1513  * This should be the path for all ptrace stops.
1514  * We always set current->last_siginfo while stopped here.
1515  * That makes it a way to test a stopped process for
1516  * being ptrace-stopped vs being job-control-stopped.
1517  *
1518  * If we actually decide not to stop at all because the tracer
1519  * is gone, we keep current->exit_code unless clear_code.
1520  */
1521 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1522 {
1523 	if (arch_ptrace_stop_needed(exit_code, info)) {
1524 		/*
1525 		 * The arch code has something special to do before a
1526 		 * ptrace stop.  This is allowed to block, e.g. for faults
1527 		 * on user stack pages.  We can't keep the siglock while
1528 		 * calling arch_ptrace_stop, so we must release it now.
1529 		 * To preserve proper semantics, we must do this before
1530 		 * any signal bookkeeping like checking group_stop_count.
1531 		 * Meanwhile, a SIGKILL could come in before we retake the
1532 		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
1533 		 * So after regaining the lock, we must check for SIGKILL.
1534 		 */
1535 		spin_unlock_irq(&current->sighand->siglock);
1536 		arch_ptrace_stop(exit_code, info);
1537 		spin_lock_irq(&current->sighand->siglock);
1538 		if (sigkill_pending(current))
1539 			return;
1540 	}
1541 
1542 	/*
1543 	 * If there is a group stop in progress,
1544 	 * we must participate in the bookkeeping.
1545 	 */
1546 	if (current->signal->group_stop_count > 0)
1547 		--current->signal->group_stop_count;
1548 
1549 	current->last_siginfo = info;
1550 	current->exit_code = exit_code;
1551 
1552 	/* Let the debugger run.  */
1553 	__set_current_state(TASK_TRACED);
1554 	spin_unlock_irq(&current->sighand->siglock);
1555 	read_lock(&tasklist_lock);
1556 	if (may_ptrace_stop()) {
1557 		do_notify_parent_cldstop(current, CLD_TRAPPED);
1558 		read_unlock(&tasklist_lock);
1559 		schedule();
1560 	} else {
1561 		/*
1562 		 * By the time we got the lock, our tracer went away.
1563 		 * Don't drop the lock yet, another tracer may come.
1564 		 */
1565 		__set_current_state(TASK_RUNNING);
1566 		if (clear_code)
1567 			current->exit_code = 0;
1568 		read_unlock(&tasklist_lock);
1569 	}
1570 
1571 	/*
1572 	 * While in TASK_TRACED, we were considered "frozen enough".
1573 	 * Now that we woke up, it's crucial if we're supposed to be
1574 	 * frozen that we freeze now before running anything substantial.
1575 	 */
1576 	try_to_freeze();
1577 
1578 	/*
1579 	 * We are back.  Now reacquire the siglock before touching
1580 	 * last_siginfo, so that we are sure to have synchronized with
1581 	 * any signal-sending on another CPU that wants to examine it.
1582 	 */
1583 	spin_lock_irq(&current->sighand->siglock);
1584 	current->last_siginfo = NULL;
1585 
1586 	/*
1587 	 * Queued signals ignored us while we were stopped for tracing.
1588 	 * So check for any that we should take before resuming user mode.
1589 	 * This sets TIF_SIGPENDING, but never clears it.
1590 	 */
1591 	recalc_sigpending_tsk(current);
1592 }
1593 
1594 void ptrace_notify(int exit_code)
1595 {
1596 	siginfo_t info;
1597 
1598 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1599 
1600 	memset(&info, 0, sizeof info);
1601 	info.si_signo = SIGTRAP;
1602 	info.si_code = exit_code;
1603 	info.si_pid = task_pid_vnr(current);
1604 	info.si_uid = current_uid();
1605 
1606 	/* Let the debugger run.  */
1607 	spin_lock_irq(&current->sighand->siglock);
1608 	ptrace_stop(exit_code, 1, &info);
1609 	spin_unlock_irq(&current->sighand->siglock);
1610 }
1611 
1612 static void
1613 finish_stop(int stop_count)
1614 {
1615 	/*
1616 	 * If there are no other threads in the group, or if there is
1617 	 * a group stop in progress and we are the last to stop,
1618 	 * report to the parent.  When ptraced, every thread reports itself.
1619 	 */
1620 	if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
1621 		read_lock(&tasklist_lock);
1622 		do_notify_parent_cldstop(current, CLD_STOPPED);
1623 		read_unlock(&tasklist_lock);
1624 	}
1625 
1626 	do {
1627 		schedule();
1628 	} while (try_to_freeze());
1629 	/*
1630 	 * Now we don't run again until continued.
1631 	 */
1632 	current->exit_code = 0;
1633 }
1634 
1635 /*
1636  * This performs the stopping for SIGSTOP and other stop signals.
1637  * We have to stop all threads in the thread group.
1638  * Returns nonzero if we've actually stopped and released the siglock.
1639  * Returns zero if we didn't stop and still hold the siglock.
1640  */
1641 static int do_signal_stop(int signr)
1642 {
1643 	struct signal_struct *sig = current->signal;
1644 	int stop_count;
1645 
1646 	if (sig->group_stop_count > 0) {
1647 		/*
1648 		 * There is a group stop in progress.  We don't need to
1649 		 * start another one.
1650 		 */
1651 		stop_count = --sig->group_stop_count;
1652 	} else {
1653 		struct task_struct *t;
1654 
1655 		if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1656 		    unlikely(signal_group_exit(sig)))
1657 			return 0;
1658 		/*
1659 		 * There is no group stop already in progress.
1660 		 * We must initiate one now.
1661 		 */
1662 		sig->group_exit_code = signr;
1663 
1664 		stop_count = 0;
1665 		for (t = next_thread(current); t != current; t = next_thread(t))
1666 			/*
1667 			 * Setting state to TASK_STOPPED for a group
1668 			 * stop is always done with the siglock held,
1669 			 * so this check has no races.
1670 			 */
1671 			if (!(t->flags & PF_EXITING) &&
1672 			    !task_is_stopped_or_traced(t)) {
1673 				stop_count++;
1674 				signal_wake_up(t, 0);
1675 			}
1676 		sig->group_stop_count = stop_count;
1677 	}
1678 
1679 	if (stop_count == 0)
1680 		sig->flags = SIGNAL_STOP_STOPPED;
1681 	current->exit_code = sig->group_exit_code;
1682 	__set_current_state(TASK_STOPPED);
1683 
1684 	spin_unlock_irq(&current->sighand->siglock);
1685 	finish_stop(stop_count);
1686 	return 1;
1687 }
1688 
1689 static int ptrace_signal(int signr, siginfo_t *info,
1690 			 struct pt_regs *regs, void *cookie)
1691 {
1692 	if (!(current->ptrace & PT_PTRACED))
1693 		return signr;
1694 
1695 	ptrace_signal_deliver(regs, cookie);
1696 
1697 	/* Let the debugger run.  */
1698 	ptrace_stop(signr, 0, info);
1699 
1700 	/* We're back.  Did the debugger cancel the sig?  */
1701 	signr = current->exit_code;
1702 	if (signr == 0)
1703 		return signr;
1704 
1705 	current->exit_code = 0;
1706 
1707 	/* Update the siginfo structure if the signal has
1708 	   changed.  If the debugger wanted something
1709 	   specific in the siginfo structure then it should
1710 	   have updated *info via PTRACE_SETSIGINFO.  */
1711 	if (signr != info->si_signo) {
1712 		info->si_signo = signr;
1713 		info->si_errno = 0;
1714 		info->si_code = SI_USER;
1715 		info->si_pid = task_pid_vnr(current->parent);
1716 		info->si_uid = current->parent->uid;
1717 	}
1718 
1719 	/* If the (new) signal is now blocked, requeue it.  */
1720 	if (sigismember(&current->blocked, signr)) {
1721 		specific_send_sig_info(signr, info, current);
1722 		signr = 0;
1723 	}
1724 
1725 	return signr;
1726 }
1727 
1728 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1729 			  struct pt_regs *regs, void *cookie)
1730 {
1731 	struct sighand_struct *sighand = current->sighand;
1732 	struct signal_struct *signal = current->signal;
1733 	int signr;
1734 
1735 relock:
1736 	/*
1737 	 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1738 	 * While in TASK_STOPPED, we were considered "frozen enough".
1739 	 * Now that we woke up, it's crucial if we're supposed to be
1740 	 * frozen that we freeze now before running anything substantial.
1741 	 */
1742 	try_to_freeze();
1743 
1744 	spin_lock_irq(&sighand->siglock);
1745 	/*
1746 	 * Every stopped thread goes here after wakeup. Check to see if
1747 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
1748 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1749 	 */
1750 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1751 		int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1752 				? CLD_CONTINUED : CLD_STOPPED;
1753 		signal->flags &= ~SIGNAL_CLD_MASK;
1754 		spin_unlock_irq(&sighand->siglock);
1755 
1756 		if (unlikely(!tracehook_notify_jctl(1, why)))
1757 			goto relock;
1758 
1759 		read_lock(&tasklist_lock);
1760 		do_notify_parent_cldstop(current->group_leader, why);
1761 		read_unlock(&tasklist_lock);
1762 		goto relock;
1763 	}
1764 
1765 	for (;;) {
1766 		struct k_sigaction *ka;
1767 
1768 		if (unlikely(signal->group_stop_count > 0) &&
1769 		    do_signal_stop(0))
1770 			goto relock;
1771 
1772 		/*
1773 		 * Tracing can induce an artifical signal and choose sigaction.
1774 		 * The return value in @signr determines the default action,
1775 		 * but @info->si_signo is the signal number we will report.
1776 		 */
1777 		signr = tracehook_get_signal(current, regs, info, return_ka);
1778 		if (unlikely(signr < 0))
1779 			goto relock;
1780 		if (unlikely(signr != 0))
1781 			ka = return_ka;
1782 		else {
1783 			signr = dequeue_signal(current, &current->blocked,
1784 					       info);
1785 
1786 			if (!signr)
1787 				break; /* will return 0 */
1788 
1789 			if (signr != SIGKILL) {
1790 				signr = ptrace_signal(signr, info,
1791 						      regs, cookie);
1792 				if (!signr)
1793 					continue;
1794 			}
1795 
1796 			ka = &sighand->action[signr-1];
1797 		}
1798 
1799 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1800 			continue;
1801 		if (ka->sa.sa_handler != SIG_DFL) {
1802 			/* Run the handler.  */
1803 			*return_ka = *ka;
1804 
1805 			if (ka->sa.sa_flags & SA_ONESHOT)
1806 				ka->sa.sa_handler = SIG_DFL;
1807 
1808 			break; /* will return non-zero "signr" value */
1809 		}
1810 
1811 		/*
1812 		 * Now we are doing the default action for this signal.
1813 		 */
1814 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
1815 			continue;
1816 
1817 		/*
1818 		 * Global init gets no signals it doesn't want.
1819 		 */
1820 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1821 		    !signal_group_exit(signal))
1822 			continue;
1823 
1824 		if (sig_kernel_stop(signr)) {
1825 			/*
1826 			 * The default action is to stop all threads in
1827 			 * the thread group.  The job control signals
1828 			 * do nothing in an orphaned pgrp, but SIGSTOP
1829 			 * always works.  Note that siglock needs to be
1830 			 * dropped during the call to is_orphaned_pgrp()
1831 			 * because of lock ordering with tasklist_lock.
1832 			 * This allows an intervening SIGCONT to be posted.
1833 			 * We need to check for that and bail out if necessary.
1834 			 */
1835 			if (signr != SIGSTOP) {
1836 				spin_unlock_irq(&sighand->siglock);
1837 
1838 				/* signals can be posted during this window */
1839 
1840 				if (is_current_pgrp_orphaned())
1841 					goto relock;
1842 
1843 				spin_lock_irq(&sighand->siglock);
1844 			}
1845 
1846 			if (likely(do_signal_stop(info->si_signo))) {
1847 				/* It released the siglock.  */
1848 				goto relock;
1849 			}
1850 
1851 			/*
1852 			 * We didn't actually stop, due to a race
1853 			 * with SIGCONT or something like that.
1854 			 */
1855 			continue;
1856 		}
1857 
1858 		spin_unlock_irq(&sighand->siglock);
1859 
1860 		/*
1861 		 * Anything else is fatal, maybe with a core dump.
1862 		 */
1863 		current->flags |= PF_SIGNALED;
1864 
1865 		if (sig_kernel_coredump(signr)) {
1866 			if (print_fatal_signals)
1867 				print_fatal_signal(regs, info->si_signo);
1868 			/*
1869 			 * If it was able to dump core, this kills all
1870 			 * other threads in the group and synchronizes with
1871 			 * their demise.  If we lost the race with another
1872 			 * thread getting here, it set group_exit_code
1873 			 * first and our do_group_exit call below will use
1874 			 * that value and ignore the one we pass it.
1875 			 */
1876 			do_coredump(info->si_signo, info->si_signo, regs);
1877 		}
1878 
1879 		/*
1880 		 * Death signals, no core dump.
1881 		 */
1882 		do_group_exit(info->si_signo);
1883 		/* NOTREACHED */
1884 	}
1885 	spin_unlock_irq(&sighand->siglock);
1886 	return signr;
1887 }
1888 
1889 void exit_signals(struct task_struct *tsk)
1890 {
1891 	int group_stop = 0;
1892 	struct task_struct *t;
1893 
1894 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1895 		tsk->flags |= PF_EXITING;
1896 		return;
1897 	}
1898 
1899 	spin_lock_irq(&tsk->sighand->siglock);
1900 	/*
1901 	 * From now this task is not visible for group-wide signals,
1902 	 * see wants_signal(), do_signal_stop().
1903 	 */
1904 	tsk->flags |= PF_EXITING;
1905 	if (!signal_pending(tsk))
1906 		goto out;
1907 
1908 	/* It could be that __group_complete_signal() choose us to
1909 	 * notify about group-wide signal. Another thread should be
1910 	 * woken now to take the signal since we will not.
1911 	 */
1912 	for (t = tsk; (t = next_thread(t)) != tsk; )
1913 		if (!signal_pending(t) && !(t->flags & PF_EXITING))
1914 			recalc_sigpending_and_wake(t);
1915 
1916 	if (unlikely(tsk->signal->group_stop_count) &&
1917 			!--tsk->signal->group_stop_count) {
1918 		tsk->signal->flags = SIGNAL_STOP_STOPPED;
1919 		group_stop = 1;
1920 	}
1921 out:
1922 	spin_unlock_irq(&tsk->sighand->siglock);
1923 
1924 	if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) {
1925 		read_lock(&tasklist_lock);
1926 		do_notify_parent_cldstop(tsk, CLD_STOPPED);
1927 		read_unlock(&tasklist_lock);
1928 	}
1929 }
1930 
1931 EXPORT_SYMBOL(recalc_sigpending);
1932 EXPORT_SYMBOL_GPL(dequeue_signal);
1933 EXPORT_SYMBOL(flush_signals);
1934 EXPORT_SYMBOL(force_sig);
1935 EXPORT_SYMBOL(send_sig);
1936 EXPORT_SYMBOL(send_sig_info);
1937 EXPORT_SYMBOL(sigprocmask);
1938 EXPORT_SYMBOL(block_all_signals);
1939 EXPORT_SYMBOL(unblock_all_signals);
1940 
1941 
1942 /*
1943  * System call entry points.
1944  */
1945 
1946 asmlinkage long sys_restart_syscall(void)
1947 {
1948 	struct restart_block *restart = &current_thread_info()->restart_block;
1949 	return restart->fn(restart);
1950 }
1951 
1952 long do_no_restart_syscall(struct restart_block *param)
1953 {
1954 	return -EINTR;
1955 }
1956 
1957 /*
1958  * We don't need to get the kernel lock - this is all local to this
1959  * particular thread.. (and that's good, because this is _heavily_
1960  * used by various programs)
1961  */
1962 
1963 /*
1964  * This is also useful for kernel threads that want to temporarily
1965  * (or permanently) block certain signals.
1966  *
1967  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1968  * interface happily blocks "unblockable" signals like SIGKILL
1969  * and friends.
1970  */
1971 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1972 {
1973 	int error;
1974 
1975 	spin_lock_irq(&current->sighand->siglock);
1976 	if (oldset)
1977 		*oldset = current->blocked;
1978 
1979 	error = 0;
1980 	switch (how) {
1981 	case SIG_BLOCK:
1982 		sigorsets(&current->blocked, &current->blocked, set);
1983 		break;
1984 	case SIG_UNBLOCK:
1985 		signandsets(&current->blocked, &current->blocked, set);
1986 		break;
1987 	case SIG_SETMASK:
1988 		current->blocked = *set;
1989 		break;
1990 	default:
1991 		error = -EINVAL;
1992 	}
1993 	recalc_sigpending();
1994 	spin_unlock_irq(&current->sighand->siglock);
1995 
1996 	return error;
1997 }
1998 
1999 asmlinkage long
2000 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2001 {
2002 	int error = -EINVAL;
2003 	sigset_t old_set, new_set;
2004 
2005 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2006 	if (sigsetsize != sizeof(sigset_t))
2007 		goto out;
2008 
2009 	if (set) {
2010 		error = -EFAULT;
2011 		if (copy_from_user(&new_set, set, sizeof(*set)))
2012 			goto out;
2013 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2014 
2015 		error = sigprocmask(how, &new_set, &old_set);
2016 		if (error)
2017 			goto out;
2018 		if (oset)
2019 			goto set_old;
2020 	} else if (oset) {
2021 		spin_lock_irq(&current->sighand->siglock);
2022 		old_set = current->blocked;
2023 		spin_unlock_irq(&current->sighand->siglock);
2024 
2025 	set_old:
2026 		error = -EFAULT;
2027 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2028 			goto out;
2029 	}
2030 	error = 0;
2031 out:
2032 	return error;
2033 }
2034 
2035 long do_sigpending(void __user *set, unsigned long sigsetsize)
2036 {
2037 	long error = -EINVAL;
2038 	sigset_t pending;
2039 
2040 	if (sigsetsize > sizeof(sigset_t))
2041 		goto out;
2042 
2043 	spin_lock_irq(&current->sighand->siglock);
2044 	sigorsets(&pending, &current->pending.signal,
2045 		  &current->signal->shared_pending.signal);
2046 	spin_unlock_irq(&current->sighand->siglock);
2047 
2048 	/* Outside the lock because only this thread touches it.  */
2049 	sigandsets(&pending, &current->blocked, &pending);
2050 
2051 	error = -EFAULT;
2052 	if (!copy_to_user(set, &pending, sigsetsize))
2053 		error = 0;
2054 
2055 out:
2056 	return error;
2057 }
2058 
2059 asmlinkage long
2060 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2061 {
2062 	return do_sigpending(set, sigsetsize);
2063 }
2064 
2065 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2066 
2067 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2068 {
2069 	int err;
2070 
2071 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2072 		return -EFAULT;
2073 	if (from->si_code < 0)
2074 		return __copy_to_user(to, from, sizeof(siginfo_t))
2075 			? -EFAULT : 0;
2076 	/*
2077 	 * If you change siginfo_t structure, please be sure
2078 	 * this code is fixed accordingly.
2079 	 * Please remember to update the signalfd_copyinfo() function
2080 	 * inside fs/signalfd.c too, in case siginfo_t changes.
2081 	 * It should never copy any pad contained in the structure
2082 	 * to avoid security leaks, but must copy the generic
2083 	 * 3 ints plus the relevant union member.
2084 	 */
2085 	err = __put_user(from->si_signo, &to->si_signo);
2086 	err |= __put_user(from->si_errno, &to->si_errno);
2087 	err |= __put_user((short)from->si_code, &to->si_code);
2088 	switch (from->si_code & __SI_MASK) {
2089 	case __SI_KILL:
2090 		err |= __put_user(from->si_pid, &to->si_pid);
2091 		err |= __put_user(from->si_uid, &to->si_uid);
2092 		break;
2093 	case __SI_TIMER:
2094 		 err |= __put_user(from->si_tid, &to->si_tid);
2095 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2096 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2097 		break;
2098 	case __SI_POLL:
2099 		err |= __put_user(from->si_band, &to->si_band);
2100 		err |= __put_user(from->si_fd, &to->si_fd);
2101 		break;
2102 	case __SI_FAULT:
2103 		err |= __put_user(from->si_addr, &to->si_addr);
2104 #ifdef __ARCH_SI_TRAPNO
2105 		err |= __put_user(from->si_trapno, &to->si_trapno);
2106 #endif
2107 		break;
2108 	case __SI_CHLD:
2109 		err |= __put_user(from->si_pid, &to->si_pid);
2110 		err |= __put_user(from->si_uid, &to->si_uid);
2111 		err |= __put_user(from->si_status, &to->si_status);
2112 		err |= __put_user(from->si_utime, &to->si_utime);
2113 		err |= __put_user(from->si_stime, &to->si_stime);
2114 		break;
2115 	case __SI_RT: /* This is not generated by the kernel as of now. */
2116 	case __SI_MESGQ: /* But this is */
2117 		err |= __put_user(from->si_pid, &to->si_pid);
2118 		err |= __put_user(from->si_uid, &to->si_uid);
2119 		err |= __put_user(from->si_ptr, &to->si_ptr);
2120 		break;
2121 	default: /* this is just in case for now ... */
2122 		err |= __put_user(from->si_pid, &to->si_pid);
2123 		err |= __put_user(from->si_uid, &to->si_uid);
2124 		break;
2125 	}
2126 	return err;
2127 }
2128 
2129 #endif
2130 
2131 asmlinkage long
2132 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2133 		    siginfo_t __user *uinfo,
2134 		    const struct timespec __user *uts,
2135 		    size_t sigsetsize)
2136 {
2137 	int ret, sig;
2138 	sigset_t these;
2139 	struct timespec ts;
2140 	siginfo_t info;
2141 	long timeout = 0;
2142 
2143 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2144 	if (sigsetsize != sizeof(sigset_t))
2145 		return -EINVAL;
2146 
2147 	if (copy_from_user(&these, uthese, sizeof(these)))
2148 		return -EFAULT;
2149 
2150 	/*
2151 	 * Invert the set of allowed signals to get those we
2152 	 * want to block.
2153 	 */
2154 	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2155 	signotset(&these);
2156 
2157 	if (uts) {
2158 		if (copy_from_user(&ts, uts, sizeof(ts)))
2159 			return -EFAULT;
2160 		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2161 		    || ts.tv_sec < 0)
2162 			return -EINVAL;
2163 	}
2164 
2165 	spin_lock_irq(&current->sighand->siglock);
2166 	sig = dequeue_signal(current, &these, &info);
2167 	if (!sig) {
2168 		timeout = MAX_SCHEDULE_TIMEOUT;
2169 		if (uts)
2170 			timeout = (timespec_to_jiffies(&ts)
2171 				   + (ts.tv_sec || ts.tv_nsec));
2172 
2173 		if (timeout) {
2174 			/* None ready -- temporarily unblock those we're
2175 			 * interested while we are sleeping in so that we'll
2176 			 * be awakened when they arrive.  */
2177 			current->real_blocked = current->blocked;
2178 			sigandsets(&current->blocked, &current->blocked, &these);
2179 			recalc_sigpending();
2180 			spin_unlock_irq(&current->sighand->siglock);
2181 
2182 			timeout = schedule_timeout_interruptible(timeout);
2183 
2184 			spin_lock_irq(&current->sighand->siglock);
2185 			sig = dequeue_signal(current, &these, &info);
2186 			current->blocked = current->real_blocked;
2187 			siginitset(&current->real_blocked, 0);
2188 			recalc_sigpending();
2189 		}
2190 	}
2191 	spin_unlock_irq(&current->sighand->siglock);
2192 
2193 	if (sig) {
2194 		ret = sig;
2195 		if (uinfo) {
2196 			if (copy_siginfo_to_user(uinfo, &info))
2197 				ret = -EFAULT;
2198 		}
2199 	} else {
2200 		ret = -EAGAIN;
2201 		if (timeout)
2202 			ret = -EINTR;
2203 	}
2204 
2205 	return ret;
2206 }
2207 
2208 asmlinkage long
2209 sys_kill(pid_t pid, int sig)
2210 {
2211 	struct siginfo info;
2212 
2213 	info.si_signo = sig;
2214 	info.si_errno = 0;
2215 	info.si_code = SI_USER;
2216 	info.si_pid = task_tgid_vnr(current);
2217 	info.si_uid = current_uid();
2218 
2219 	return kill_something_info(sig, &info, pid);
2220 }
2221 
2222 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2223 {
2224 	int error;
2225 	struct siginfo info;
2226 	struct task_struct *p;
2227 	unsigned long flags;
2228 
2229 	error = -ESRCH;
2230 	info.si_signo = sig;
2231 	info.si_errno = 0;
2232 	info.si_code = SI_TKILL;
2233 	info.si_pid = task_tgid_vnr(current);
2234 	info.si_uid = current_uid();
2235 
2236 	rcu_read_lock();
2237 	p = find_task_by_vpid(pid);
2238 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2239 		error = check_kill_permission(sig, &info, p);
2240 		/*
2241 		 * The null signal is a permissions and process existence
2242 		 * probe.  No signal is actually delivered.
2243 		 *
2244 		 * If lock_task_sighand() fails we pretend the task dies
2245 		 * after receiving the signal. The window is tiny, and the
2246 		 * signal is private anyway.
2247 		 */
2248 		if (!error && sig && lock_task_sighand(p, &flags)) {
2249 			error = specific_send_sig_info(sig, &info, p);
2250 			unlock_task_sighand(p, &flags);
2251 		}
2252 	}
2253 	rcu_read_unlock();
2254 
2255 	return error;
2256 }
2257 
2258 /**
2259  *  sys_tgkill - send signal to one specific thread
2260  *  @tgid: the thread group ID of the thread
2261  *  @pid: the PID of the thread
2262  *  @sig: signal to be sent
2263  *
2264  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2265  *  exists but it's not belonging to the target process anymore. This
2266  *  method solves the problem of threads exiting and PIDs getting reused.
2267  */
2268 asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig)
2269 {
2270 	/* This is only valid for single tasks */
2271 	if (pid <= 0 || tgid <= 0)
2272 		return -EINVAL;
2273 
2274 	return do_tkill(tgid, pid, sig);
2275 }
2276 
2277 /*
2278  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2279  */
2280 asmlinkage long
2281 sys_tkill(pid_t pid, int sig)
2282 {
2283 	/* This is only valid for single tasks */
2284 	if (pid <= 0)
2285 		return -EINVAL;
2286 
2287 	return do_tkill(0, pid, sig);
2288 }
2289 
2290 asmlinkage long
2291 sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo)
2292 {
2293 	siginfo_t info;
2294 
2295 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2296 		return -EFAULT;
2297 
2298 	/* Not even root can pretend to send signals from the kernel.
2299 	   Nor can they impersonate a kill(), which adds source info.  */
2300 	if (info.si_code >= 0)
2301 		return -EPERM;
2302 	info.si_signo = sig;
2303 
2304 	/* POSIX.1b doesn't mention process groups.  */
2305 	return kill_proc_info(sig, &info, pid);
2306 }
2307 
2308 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2309 {
2310 	struct task_struct *t = current;
2311 	struct k_sigaction *k;
2312 	sigset_t mask;
2313 
2314 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2315 		return -EINVAL;
2316 
2317 	k = &t->sighand->action[sig-1];
2318 
2319 	spin_lock_irq(&current->sighand->siglock);
2320 	if (oact)
2321 		*oact = *k;
2322 
2323 	if (act) {
2324 		sigdelsetmask(&act->sa.sa_mask,
2325 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2326 		*k = *act;
2327 		/*
2328 		 * POSIX 3.3.1.3:
2329 		 *  "Setting a signal action to SIG_IGN for a signal that is
2330 		 *   pending shall cause the pending signal to be discarded,
2331 		 *   whether or not it is blocked."
2332 		 *
2333 		 *  "Setting a signal action to SIG_DFL for a signal that is
2334 		 *   pending and whose default action is to ignore the signal
2335 		 *   (for example, SIGCHLD), shall cause the pending signal to
2336 		 *   be discarded, whether or not it is blocked"
2337 		 */
2338 		if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2339 			sigemptyset(&mask);
2340 			sigaddset(&mask, sig);
2341 			rm_from_queue_full(&mask, &t->signal->shared_pending);
2342 			do {
2343 				rm_from_queue_full(&mask, &t->pending);
2344 				t = next_thread(t);
2345 			} while (t != current);
2346 		}
2347 	}
2348 
2349 	spin_unlock_irq(&current->sighand->siglock);
2350 	return 0;
2351 }
2352 
2353 int
2354 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2355 {
2356 	stack_t oss;
2357 	int error;
2358 
2359 	if (uoss) {
2360 		oss.ss_sp = (void __user *) current->sas_ss_sp;
2361 		oss.ss_size = current->sas_ss_size;
2362 		oss.ss_flags = sas_ss_flags(sp);
2363 	}
2364 
2365 	if (uss) {
2366 		void __user *ss_sp;
2367 		size_t ss_size;
2368 		int ss_flags;
2369 
2370 		error = -EFAULT;
2371 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2372 		    || __get_user(ss_sp, &uss->ss_sp)
2373 		    || __get_user(ss_flags, &uss->ss_flags)
2374 		    || __get_user(ss_size, &uss->ss_size))
2375 			goto out;
2376 
2377 		error = -EPERM;
2378 		if (on_sig_stack(sp))
2379 			goto out;
2380 
2381 		error = -EINVAL;
2382 		/*
2383 		 *
2384 		 * Note - this code used to test ss_flags incorrectly
2385 		 *  	  old code may have been written using ss_flags==0
2386 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2387 		 *	  way that worked) - this fix preserves that older
2388 		 *	  mechanism
2389 		 */
2390 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2391 			goto out;
2392 
2393 		if (ss_flags == SS_DISABLE) {
2394 			ss_size = 0;
2395 			ss_sp = NULL;
2396 		} else {
2397 			error = -ENOMEM;
2398 			if (ss_size < MINSIGSTKSZ)
2399 				goto out;
2400 		}
2401 
2402 		current->sas_ss_sp = (unsigned long) ss_sp;
2403 		current->sas_ss_size = ss_size;
2404 	}
2405 
2406 	if (uoss) {
2407 		error = -EFAULT;
2408 		if (copy_to_user(uoss, &oss, sizeof(oss)))
2409 			goto out;
2410 	}
2411 
2412 	error = 0;
2413 out:
2414 	return error;
2415 }
2416 
2417 #ifdef __ARCH_WANT_SYS_SIGPENDING
2418 
2419 asmlinkage long
2420 sys_sigpending(old_sigset_t __user *set)
2421 {
2422 	return do_sigpending(set, sizeof(*set));
2423 }
2424 
2425 #endif
2426 
2427 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2428 /* Some platforms have their own version with special arguments others
2429    support only sys_rt_sigprocmask.  */
2430 
2431 asmlinkage long
2432 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2433 {
2434 	int error;
2435 	old_sigset_t old_set, new_set;
2436 
2437 	if (set) {
2438 		error = -EFAULT;
2439 		if (copy_from_user(&new_set, set, sizeof(*set)))
2440 			goto out;
2441 		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2442 
2443 		spin_lock_irq(&current->sighand->siglock);
2444 		old_set = current->blocked.sig[0];
2445 
2446 		error = 0;
2447 		switch (how) {
2448 		default:
2449 			error = -EINVAL;
2450 			break;
2451 		case SIG_BLOCK:
2452 			sigaddsetmask(&current->blocked, new_set);
2453 			break;
2454 		case SIG_UNBLOCK:
2455 			sigdelsetmask(&current->blocked, new_set);
2456 			break;
2457 		case SIG_SETMASK:
2458 			current->blocked.sig[0] = new_set;
2459 			break;
2460 		}
2461 
2462 		recalc_sigpending();
2463 		spin_unlock_irq(&current->sighand->siglock);
2464 		if (error)
2465 			goto out;
2466 		if (oset)
2467 			goto set_old;
2468 	} else if (oset) {
2469 		old_set = current->blocked.sig[0];
2470 	set_old:
2471 		error = -EFAULT;
2472 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2473 			goto out;
2474 	}
2475 	error = 0;
2476 out:
2477 	return error;
2478 }
2479 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2480 
2481 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2482 asmlinkage long
2483 sys_rt_sigaction(int sig,
2484 		 const struct sigaction __user *act,
2485 		 struct sigaction __user *oact,
2486 		 size_t sigsetsize)
2487 {
2488 	struct k_sigaction new_sa, old_sa;
2489 	int ret = -EINVAL;
2490 
2491 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2492 	if (sigsetsize != sizeof(sigset_t))
2493 		goto out;
2494 
2495 	if (act) {
2496 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2497 			return -EFAULT;
2498 	}
2499 
2500 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2501 
2502 	if (!ret && oact) {
2503 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2504 			return -EFAULT;
2505 	}
2506 out:
2507 	return ret;
2508 }
2509 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2510 
2511 #ifdef __ARCH_WANT_SYS_SGETMASK
2512 
2513 /*
2514  * For backwards compatibility.  Functionality superseded by sigprocmask.
2515  */
2516 asmlinkage long
2517 sys_sgetmask(void)
2518 {
2519 	/* SMP safe */
2520 	return current->blocked.sig[0];
2521 }
2522 
2523 asmlinkage long
2524 sys_ssetmask(int newmask)
2525 {
2526 	int old;
2527 
2528 	spin_lock_irq(&current->sighand->siglock);
2529 	old = current->blocked.sig[0];
2530 
2531 	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2532 						  sigmask(SIGSTOP)));
2533 	recalc_sigpending();
2534 	spin_unlock_irq(&current->sighand->siglock);
2535 
2536 	return old;
2537 }
2538 #endif /* __ARCH_WANT_SGETMASK */
2539 
2540 #ifdef __ARCH_WANT_SYS_SIGNAL
2541 /*
2542  * For backwards compatibility.  Functionality superseded by sigaction.
2543  */
2544 asmlinkage unsigned long
2545 sys_signal(int sig, __sighandler_t handler)
2546 {
2547 	struct k_sigaction new_sa, old_sa;
2548 	int ret;
2549 
2550 	new_sa.sa.sa_handler = handler;
2551 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2552 	sigemptyset(&new_sa.sa.sa_mask);
2553 
2554 	ret = do_sigaction(sig, &new_sa, &old_sa);
2555 
2556 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2557 }
2558 #endif /* __ARCH_WANT_SYS_SIGNAL */
2559 
2560 #ifdef __ARCH_WANT_SYS_PAUSE
2561 
2562 asmlinkage long
2563 sys_pause(void)
2564 {
2565 	current->state = TASK_INTERRUPTIBLE;
2566 	schedule();
2567 	return -ERESTARTNOHAND;
2568 }
2569 
2570 #endif
2571 
2572 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2573 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2574 {
2575 	sigset_t newset;
2576 
2577 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2578 	if (sigsetsize != sizeof(sigset_t))
2579 		return -EINVAL;
2580 
2581 	if (copy_from_user(&newset, unewset, sizeof(newset)))
2582 		return -EFAULT;
2583 	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2584 
2585 	spin_lock_irq(&current->sighand->siglock);
2586 	current->saved_sigmask = current->blocked;
2587 	current->blocked = newset;
2588 	recalc_sigpending();
2589 	spin_unlock_irq(&current->sighand->siglock);
2590 
2591 	current->state = TASK_INTERRUPTIBLE;
2592 	schedule();
2593 	set_restore_sigmask();
2594 	return -ERESTARTNOHAND;
2595 }
2596 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2597 
2598 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2599 {
2600 	return NULL;
2601 }
2602 
2603 void __init signals_init(void)
2604 {
2605 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2606 }
2607