xref: /openbmc/linux/kernel/signal.c (revision 61a3e166)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/ratelimit.h>
26 #include <linux/tracehook.h>
27 #include <linux/capability.h>
28 #include <linux/freezer.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/nsproxy.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/signal.h>
33 
34 #include <asm/param.h>
35 #include <asm/uaccess.h>
36 #include <asm/unistd.h>
37 #include <asm/siginfo.h>
38 #include "audit.h"	/* audit_signal_info() */
39 
40 /*
41  * SLAB caches for signal bits.
42  */
43 
44 static struct kmem_cache *sigqueue_cachep;
45 
46 int print_fatal_signals __read_mostly;
47 
48 static void __user *sig_handler(struct task_struct *t, int sig)
49 {
50 	return t->sighand->action[sig - 1].sa.sa_handler;
51 }
52 
53 static int sig_handler_ignored(void __user *handler, int sig)
54 {
55 	/* Is it explicitly or implicitly ignored? */
56 	return handler == SIG_IGN ||
57 		(handler == SIG_DFL && sig_kernel_ignore(sig));
58 }
59 
60 static int sig_task_ignored(struct task_struct *t, int sig,
61 		int from_ancestor_ns)
62 {
63 	void __user *handler;
64 
65 	handler = sig_handler(t, sig);
66 
67 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
68 			handler == SIG_DFL && !from_ancestor_ns)
69 		return 1;
70 
71 	return sig_handler_ignored(handler, sig);
72 }
73 
74 static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
75 {
76 	/*
77 	 * Blocked signals are never ignored, since the
78 	 * signal handler may change by the time it is
79 	 * unblocked.
80 	 */
81 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
82 		return 0;
83 
84 	if (!sig_task_ignored(t, sig, from_ancestor_ns))
85 		return 0;
86 
87 	/*
88 	 * Tracers may want to know about even ignored signals.
89 	 */
90 	return !tracehook_consider_ignored_signal(t, sig);
91 }
92 
93 /*
94  * Re-calculate pending state from the set of locally pending
95  * signals, globally pending signals, and blocked signals.
96  */
97 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
98 {
99 	unsigned long ready;
100 	long i;
101 
102 	switch (_NSIG_WORDS) {
103 	default:
104 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
105 			ready |= signal->sig[i] &~ blocked->sig[i];
106 		break;
107 
108 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
109 		ready |= signal->sig[2] &~ blocked->sig[2];
110 		ready |= signal->sig[1] &~ blocked->sig[1];
111 		ready |= signal->sig[0] &~ blocked->sig[0];
112 		break;
113 
114 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
115 		ready |= signal->sig[0] &~ blocked->sig[0];
116 		break;
117 
118 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
119 	}
120 	return ready !=	0;
121 }
122 
123 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
124 
125 static int recalc_sigpending_tsk(struct task_struct *t)
126 {
127 	if (t->signal->group_stop_count > 0 ||
128 	    PENDING(&t->pending, &t->blocked) ||
129 	    PENDING(&t->signal->shared_pending, &t->blocked)) {
130 		set_tsk_thread_flag(t, TIF_SIGPENDING);
131 		return 1;
132 	}
133 	/*
134 	 * We must never clear the flag in another thread, or in current
135 	 * when it's possible the current syscall is returning -ERESTART*.
136 	 * So we don't clear it here, and only callers who know they should do.
137 	 */
138 	return 0;
139 }
140 
141 /*
142  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
143  * This is superfluous when called on current, the wakeup is a harmless no-op.
144  */
145 void recalc_sigpending_and_wake(struct task_struct *t)
146 {
147 	if (recalc_sigpending_tsk(t))
148 		signal_wake_up(t, 0);
149 }
150 
151 void recalc_sigpending(void)
152 {
153 	if (unlikely(tracehook_force_sigpending()))
154 		set_thread_flag(TIF_SIGPENDING);
155 	else if (!recalc_sigpending_tsk(current) && !freezing(current))
156 		clear_thread_flag(TIF_SIGPENDING);
157 
158 }
159 
160 /* Given the mask, find the first available signal that should be serviced. */
161 
162 #define SYNCHRONOUS_MASK \
163 	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
164 	 sigmask(SIGTRAP) | sigmask(SIGFPE))
165 
166 int next_signal(struct sigpending *pending, sigset_t *mask)
167 {
168 	unsigned long i, *s, *m, x;
169 	int sig = 0;
170 
171 	s = pending->signal.sig;
172 	m = mask->sig;
173 
174 	/*
175 	 * Handle the first word specially: it contains the
176 	 * synchronous signals that need to be dequeued first.
177 	 */
178 	x = *s &~ *m;
179 	if (x) {
180 		if (x & SYNCHRONOUS_MASK)
181 			x &= SYNCHRONOUS_MASK;
182 		sig = ffz(~x) + 1;
183 		return sig;
184 	}
185 
186 	switch (_NSIG_WORDS) {
187 	default:
188 		for (i = 1; i < _NSIG_WORDS; ++i) {
189 			x = *++s &~ *++m;
190 			if (!x)
191 				continue;
192 			sig = ffz(~x) + i*_NSIG_BPW + 1;
193 			break;
194 		}
195 		break;
196 
197 	case 2:
198 		x = s[1] &~ m[1];
199 		if (!x)
200 			break;
201 		sig = ffz(~x) + _NSIG_BPW + 1;
202 		break;
203 
204 	case 1:
205 		/* Nothing to do */
206 		break;
207 	}
208 
209 	return sig;
210 }
211 
212 static inline void print_dropped_signal(int sig)
213 {
214 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
215 
216 	if (!print_fatal_signals)
217 		return;
218 
219 	if (!__ratelimit(&ratelimit_state))
220 		return;
221 
222 	printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
223 				current->comm, current->pid, sig);
224 }
225 
226 /*
227  * allocate a new signal queue record
228  * - this may be called without locks if and only if t == current, otherwise an
229  *   appopriate lock must be held to stop the target task from exiting
230  */
231 static struct sigqueue *
232 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
233 {
234 	struct sigqueue *q = NULL;
235 	struct user_struct *user;
236 
237 	/*
238 	 * Protect access to @t credentials. This can go away when all
239 	 * callers hold rcu read lock.
240 	 */
241 	rcu_read_lock();
242 	user = get_uid(__task_cred(t)->user);
243 	atomic_inc(&user->sigpending);
244 	rcu_read_unlock();
245 
246 	if (override_rlimit ||
247 	    atomic_read(&user->sigpending) <=
248 			task_rlimit(t, RLIMIT_SIGPENDING)) {
249 		q = kmem_cache_alloc(sigqueue_cachep, flags);
250 	} else {
251 		print_dropped_signal(sig);
252 	}
253 
254 	if (unlikely(q == NULL)) {
255 		atomic_dec(&user->sigpending);
256 		free_uid(user);
257 	} else {
258 		INIT_LIST_HEAD(&q->list);
259 		q->flags = 0;
260 		q->user = user;
261 	}
262 
263 	return q;
264 }
265 
266 static void __sigqueue_free(struct sigqueue *q)
267 {
268 	if (q->flags & SIGQUEUE_PREALLOC)
269 		return;
270 	atomic_dec(&q->user->sigpending);
271 	free_uid(q->user);
272 	kmem_cache_free(sigqueue_cachep, q);
273 }
274 
275 void flush_sigqueue(struct sigpending *queue)
276 {
277 	struct sigqueue *q;
278 
279 	sigemptyset(&queue->signal);
280 	while (!list_empty(&queue->list)) {
281 		q = list_entry(queue->list.next, struct sigqueue , list);
282 		list_del_init(&q->list);
283 		__sigqueue_free(q);
284 	}
285 }
286 
287 /*
288  * Flush all pending signals for a task.
289  */
290 void __flush_signals(struct task_struct *t)
291 {
292 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
293 	flush_sigqueue(&t->pending);
294 	flush_sigqueue(&t->signal->shared_pending);
295 }
296 
297 void flush_signals(struct task_struct *t)
298 {
299 	unsigned long flags;
300 
301 	spin_lock_irqsave(&t->sighand->siglock, flags);
302 	__flush_signals(t);
303 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
304 }
305 
306 static void __flush_itimer_signals(struct sigpending *pending)
307 {
308 	sigset_t signal, retain;
309 	struct sigqueue *q, *n;
310 
311 	signal = pending->signal;
312 	sigemptyset(&retain);
313 
314 	list_for_each_entry_safe(q, n, &pending->list, list) {
315 		int sig = q->info.si_signo;
316 
317 		if (likely(q->info.si_code != SI_TIMER)) {
318 			sigaddset(&retain, sig);
319 		} else {
320 			sigdelset(&signal, sig);
321 			list_del_init(&q->list);
322 			__sigqueue_free(q);
323 		}
324 	}
325 
326 	sigorsets(&pending->signal, &signal, &retain);
327 }
328 
329 void flush_itimer_signals(void)
330 {
331 	struct task_struct *tsk = current;
332 	unsigned long flags;
333 
334 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
335 	__flush_itimer_signals(&tsk->pending);
336 	__flush_itimer_signals(&tsk->signal->shared_pending);
337 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
338 }
339 
340 void ignore_signals(struct task_struct *t)
341 {
342 	int i;
343 
344 	for (i = 0; i < _NSIG; ++i)
345 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
346 
347 	flush_signals(t);
348 }
349 
350 /*
351  * Flush all handlers for a task.
352  */
353 
354 void
355 flush_signal_handlers(struct task_struct *t, int force_default)
356 {
357 	int i;
358 	struct k_sigaction *ka = &t->sighand->action[0];
359 	for (i = _NSIG ; i != 0 ; i--) {
360 		if (force_default || ka->sa.sa_handler != SIG_IGN)
361 			ka->sa.sa_handler = SIG_DFL;
362 		ka->sa.sa_flags = 0;
363 		sigemptyset(&ka->sa.sa_mask);
364 		ka++;
365 	}
366 }
367 
368 int unhandled_signal(struct task_struct *tsk, int sig)
369 {
370 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
371 	if (is_global_init(tsk))
372 		return 1;
373 	if (handler != SIG_IGN && handler != SIG_DFL)
374 		return 0;
375 	return !tracehook_consider_fatal_signal(tsk, sig);
376 }
377 
378 
379 /* Notify the system that a driver wants to block all signals for this
380  * process, and wants to be notified if any signals at all were to be
381  * sent/acted upon.  If the notifier routine returns non-zero, then the
382  * signal will be acted upon after all.  If the notifier routine returns 0,
383  * then then signal will be blocked.  Only one block per process is
384  * allowed.  priv is a pointer to private data that the notifier routine
385  * can use to determine if the signal should be blocked or not.  */
386 
387 void
388 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
389 {
390 	unsigned long flags;
391 
392 	spin_lock_irqsave(&current->sighand->siglock, flags);
393 	current->notifier_mask = mask;
394 	current->notifier_data = priv;
395 	current->notifier = notifier;
396 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
397 }
398 
399 /* Notify the system that blocking has ended. */
400 
401 void
402 unblock_all_signals(void)
403 {
404 	unsigned long flags;
405 
406 	spin_lock_irqsave(&current->sighand->siglock, flags);
407 	current->notifier = NULL;
408 	current->notifier_data = NULL;
409 	recalc_sigpending();
410 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
411 }
412 
413 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
414 {
415 	struct sigqueue *q, *first = NULL;
416 
417 	/*
418 	 * Collect the siginfo appropriate to this signal.  Check if
419 	 * there is another siginfo for the same signal.
420 	*/
421 	list_for_each_entry(q, &list->list, list) {
422 		if (q->info.si_signo == sig) {
423 			if (first)
424 				goto still_pending;
425 			first = q;
426 		}
427 	}
428 
429 	sigdelset(&list->signal, sig);
430 
431 	if (first) {
432 still_pending:
433 		list_del_init(&first->list);
434 		copy_siginfo(info, &first->info);
435 		__sigqueue_free(first);
436 	} else {
437 		/* Ok, it wasn't in the queue.  This must be
438 		   a fast-pathed signal or we must have been
439 		   out of queue space.  So zero out the info.
440 		 */
441 		info->si_signo = sig;
442 		info->si_errno = 0;
443 		info->si_code = SI_USER;
444 		info->si_pid = 0;
445 		info->si_uid = 0;
446 	}
447 }
448 
449 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
450 			siginfo_t *info)
451 {
452 	int sig = next_signal(pending, mask);
453 
454 	if (sig) {
455 		if (current->notifier) {
456 			if (sigismember(current->notifier_mask, sig)) {
457 				if (!(current->notifier)(current->notifier_data)) {
458 					clear_thread_flag(TIF_SIGPENDING);
459 					return 0;
460 				}
461 			}
462 		}
463 
464 		collect_signal(sig, pending, info);
465 	}
466 
467 	return sig;
468 }
469 
470 /*
471  * Dequeue a signal and return the element to the caller, which is
472  * expected to free it.
473  *
474  * All callers have to hold the siglock.
475  */
476 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
477 {
478 	int signr;
479 
480 	/* We only dequeue private signals from ourselves, we don't let
481 	 * signalfd steal them
482 	 */
483 	signr = __dequeue_signal(&tsk->pending, mask, info);
484 	if (!signr) {
485 		signr = __dequeue_signal(&tsk->signal->shared_pending,
486 					 mask, info);
487 		/*
488 		 * itimer signal ?
489 		 *
490 		 * itimers are process shared and we restart periodic
491 		 * itimers in the signal delivery path to prevent DoS
492 		 * attacks in the high resolution timer case. This is
493 		 * compliant with the old way of self restarting
494 		 * itimers, as the SIGALRM is a legacy signal and only
495 		 * queued once. Changing the restart behaviour to
496 		 * restart the timer in the signal dequeue path is
497 		 * reducing the timer noise on heavy loaded !highres
498 		 * systems too.
499 		 */
500 		if (unlikely(signr == SIGALRM)) {
501 			struct hrtimer *tmr = &tsk->signal->real_timer;
502 
503 			if (!hrtimer_is_queued(tmr) &&
504 			    tsk->signal->it_real_incr.tv64 != 0) {
505 				hrtimer_forward(tmr, tmr->base->get_time(),
506 						tsk->signal->it_real_incr);
507 				hrtimer_restart(tmr);
508 			}
509 		}
510 	}
511 
512 	recalc_sigpending();
513 	if (!signr)
514 		return 0;
515 
516 	if (unlikely(sig_kernel_stop(signr))) {
517 		/*
518 		 * Set a marker that we have dequeued a stop signal.  Our
519 		 * caller might release the siglock and then the pending
520 		 * stop signal it is about to process is no longer in the
521 		 * pending bitmasks, but must still be cleared by a SIGCONT
522 		 * (and overruled by a SIGKILL).  So those cases clear this
523 		 * shared flag after we've set it.  Note that this flag may
524 		 * remain set after the signal we return is ignored or
525 		 * handled.  That doesn't matter because its only purpose
526 		 * is to alert stop-signal processing code when another
527 		 * processor has come along and cleared the flag.
528 		 */
529 		tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
530 	}
531 	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
532 		/*
533 		 * Release the siglock to ensure proper locking order
534 		 * of timer locks outside of siglocks.  Note, we leave
535 		 * irqs disabled here, since the posix-timers code is
536 		 * about to disable them again anyway.
537 		 */
538 		spin_unlock(&tsk->sighand->siglock);
539 		do_schedule_next_timer(info);
540 		spin_lock(&tsk->sighand->siglock);
541 	}
542 	return signr;
543 }
544 
545 /*
546  * Tell a process that it has a new active signal..
547  *
548  * NOTE! we rely on the previous spin_lock to
549  * lock interrupts for us! We can only be called with
550  * "siglock" held, and the local interrupt must
551  * have been disabled when that got acquired!
552  *
553  * No need to set need_resched since signal event passing
554  * goes through ->blocked
555  */
556 void signal_wake_up(struct task_struct *t, int resume)
557 {
558 	unsigned int mask;
559 
560 	set_tsk_thread_flag(t, TIF_SIGPENDING);
561 
562 	/*
563 	 * For SIGKILL, we want to wake it up in the stopped/traced/killable
564 	 * case. We don't check t->state here because there is a race with it
565 	 * executing another processor and just now entering stopped state.
566 	 * By using wake_up_state, we ensure the process will wake up and
567 	 * handle its death signal.
568 	 */
569 	mask = TASK_INTERRUPTIBLE;
570 	if (resume)
571 		mask |= TASK_WAKEKILL;
572 	if (!wake_up_state(t, mask))
573 		kick_process(t);
574 }
575 
576 /*
577  * Remove signals in mask from the pending set and queue.
578  * Returns 1 if any signals were found.
579  *
580  * All callers must be holding the siglock.
581  *
582  * This version takes a sigset mask and looks at all signals,
583  * not just those in the first mask word.
584  */
585 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
586 {
587 	struct sigqueue *q, *n;
588 	sigset_t m;
589 
590 	sigandsets(&m, mask, &s->signal);
591 	if (sigisemptyset(&m))
592 		return 0;
593 
594 	signandsets(&s->signal, &s->signal, mask);
595 	list_for_each_entry_safe(q, n, &s->list, list) {
596 		if (sigismember(mask, q->info.si_signo)) {
597 			list_del_init(&q->list);
598 			__sigqueue_free(q);
599 		}
600 	}
601 	return 1;
602 }
603 /*
604  * Remove signals in mask from the pending set and queue.
605  * Returns 1 if any signals were found.
606  *
607  * All callers must be holding the siglock.
608  */
609 static int rm_from_queue(unsigned long mask, struct sigpending *s)
610 {
611 	struct sigqueue *q, *n;
612 
613 	if (!sigtestsetmask(&s->signal, mask))
614 		return 0;
615 
616 	sigdelsetmask(&s->signal, mask);
617 	list_for_each_entry_safe(q, n, &s->list, list) {
618 		if (q->info.si_signo < SIGRTMIN &&
619 		    (mask & sigmask(q->info.si_signo))) {
620 			list_del_init(&q->list);
621 			__sigqueue_free(q);
622 		}
623 	}
624 	return 1;
625 }
626 
627 static inline int is_si_special(const struct siginfo *info)
628 {
629 	return info <= SEND_SIG_FORCED;
630 }
631 
632 static inline bool si_fromuser(const struct siginfo *info)
633 {
634 	return info == SEND_SIG_NOINFO ||
635 		(!is_si_special(info) && SI_FROMUSER(info));
636 }
637 
638 /*
639  * Bad permissions for sending the signal
640  * - the caller must hold the RCU read lock
641  */
642 static int check_kill_permission(int sig, struct siginfo *info,
643 				 struct task_struct *t)
644 {
645 	const struct cred *cred, *tcred;
646 	struct pid *sid;
647 	int error;
648 
649 	if (!valid_signal(sig))
650 		return -EINVAL;
651 
652 	if (!si_fromuser(info))
653 		return 0;
654 
655 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
656 	if (error)
657 		return error;
658 
659 	cred = current_cred();
660 	tcred = __task_cred(t);
661 	if (!same_thread_group(current, t) &&
662 	    (cred->euid ^ tcred->suid) &&
663 	    (cred->euid ^ tcred->uid) &&
664 	    (cred->uid  ^ tcred->suid) &&
665 	    (cred->uid  ^ tcred->uid) &&
666 	    !capable(CAP_KILL)) {
667 		switch (sig) {
668 		case SIGCONT:
669 			sid = task_session(t);
670 			/*
671 			 * We don't return the error if sid == NULL. The
672 			 * task was unhashed, the caller must notice this.
673 			 */
674 			if (!sid || sid == task_session(current))
675 				break;
676 		default:
677 			return -EPERM;
678 		}
679 	}
680 
681 	return security_task_kill(t, info, sig, 0);
682 }
683 
684 /*
685  * Handle magic process-wide effects of stop/continue signals. Unlike
686  * the signal actions, these happen immediately at signal-generation
687  * time regardless of blocking, ignoring, or handling.  This does the
688  * actual continuing for SIGCONT, but not the actual stopping for stop
689  * signals. The process stop is done as a signal action for SIG_DFL.
690  *
691  * Returns true if the signal should be actually delivered, otherwise
692  * it should be dropped.
693  */
694 static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
695 {
696 	struct signal_struct *signal = p->signal;
697 	struct task_struct *t;
698 
699 	if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
700 		/*
701 		 * The process is in the middle of dying, nothing to do.
702 		 */
703 	} else if (sig_kernel_stop(sig)) {
704 		/*
705 		 * This is a stop signal.  Remove SIGCONT from all queues.
706 		 */
707 		rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
708 		t = p;
709 		do {
710 			rm_from_queue(sigmask(SIGCONT), &t->pending);
711 		} while_each_thread(p, t);
712 	} else if (sig == SIGCONT) {
713 		unsigned int why;
714 		/*
715 		 * Remove all stop signals from all queues,
716 		 * and wake all threads.
717 		 */
718 		rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
719 		t = p;
720 		do {
721 			unsigned int state;
722 			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
723 			/*
724 			 * If there is a handler for SIGCONT, we must make
725 			 * sure that no thread returns to user mode before
726 			 * we post the signal, in case it was the only
727 			 * thread eligible to run the signal handler--then
728 			 * it must not do anything between resuming and
729 			 * running the handler.  With the TIF_SIGPENDING
730 			 * flag set, the thread will pause and acquire the
731 			 * siglock that we hold now and until we've queued
732 			 * the pending signal.
733 			 *
734 			 * Wake up the stopped thread _after_ setting
735 			 * TIF_SIGPENDING
736 			 */
737 			state = __TASK_STOPPED;
738 			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
739 				set_tsk_thread_flag(t, TIF_SIGPENDING);
740 				state |= TASK_INTERRUPTIBLE;
741 			}
742 			wake_up_state(t, state);
743 		} while_each_thread(p, t);
744 
745 		/*
746 		 * Notify the parent with CLD_CONTINUED if we were stopped.
747 		 *
748 		 * If we were in the middle of a group stop, we pretend it
749 		 * was already finished, and then continued. Since SIGCHLD
750 		 * doesn't queue we report only CLD_STOPPED, as if the next
751 		 * CLD_CONTINUED was dropped.
752 		 */
753 		why = 0;
754 		if (signal->flags & SIGNAL_STOP_STOPPED)
755 			why |= SIGNAL_CLD_CONTINUED;
756 		else if (signal->group_stop_count)
757 			why |= SIGNAL_CLD_STOPPED;
758 
759 		if (why) {
760 			/*
761 			 * The first thread which returns from do_signal_stop()
762 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
763 			 * notify its parent. See get_signal_to_deliver().
764 			 */
765 			signal->flags = why | SIGNAL_STOP_CONTINUED;
766 			signal->group_stop_count = 0;
767 			signal->group_exit_code = 0;
768 		} else {
769 			/*
770 			 * We are not stopped, but there could be a stop
771 			 * signal in the middle of being processed after
772 			 * being removed from the queue.  Clear that too.
773 			 */
774 			signal->flags &= ~SIGNAL_STOP_DEQUEUED;
775 		}
776 	}
777 
778 	return !sig_ignored(p, sig, from_ancestor_ns);
779 }
780 
781 /*
782  * Test if P wants to take SIG.  After we've checked all threads with this,
783  * it's equivalent to finding no threads not blocking SIG.  Any threads not
784  * blocking SIG were ruled out because they are not running and already
785  * have pending signals.  Such threads will dequeue from the shared queue
786  * as soon as they're available, so putting the signal on the shared queue
787  * will be equivalent to sending it to one such thread.
788  */
789 static inline int wants_signal(int sig, struct task_struct *p)
790 {
791 	if (sigismember(&p->blocked, sig))
792 		return 0;
793 	if (p->flags & PF_EXITING)
794 		return 0;
795 	if (sig == SIGKILL)
796 		return 1;
797 	if (task_is_stopped_or_traced(p))
798 		return 0;
799 	return task_curr(p) || !signal_pending(p);
800 }
801 
802 static void complete_signal(int sig, struct task_struct *p, int group)
803 {
804 	struct signal_struct *signal = p->signal;
805 	struct task_struct *t;
806 
807 	/*
808 	 * Now find a thread we can wake up to take the signal off the queue.
809 	 *
810 	 * If the main thread wants the signal, it gets first crack.
811 	 * Probably the least surprising to the average bear.
812 	 */
813 	if (wants_signal(sig, p))
814 		t = p;
815 	else if (!group || thread_group_empty(p))
816 		/*
817 		 * There is just one thread and it does not need to be woken.
818 		 * It will dequeue unblocked signals before it runs again.
819 		 */
820 		return;
821 	else {
822 		/*
823 		 * Otherwise try to find a suitable thread.
824 		 */
825 		t = signal->curr_target;
826 		while (!wants_signal(sig, t)) {
827 			t = next_thread(t);
828 			if (t == signal->curr_target)
829 				/*
830 				 * No thread needs to be woken.
831 				 * Any eligible threads will see
832 				 * the signal in the queue soon.
833 				 */
834 				return;
835 		}
836 		signal->curr_target = t;
837 	}
838 
839 	/*
840 	 * Found a killable thread.  If the signal will be fatal,
841 	 * then start taking the whole group down immediately.
842 	 */
843 	if (sig_fatal(p, sig) &&
844 	    !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
845 	    !sigismember(&t->real_blocked, sig) &&
846 	    (sig == SIGKILL ||
847 	     !tracehook_consider_fatal_signal(t, sig))) {
848 		/*
849 		 * This signal will be fatal to the whole group.
850 		 */
851 		if (!sig_kernel_coredump(sig)) {
852 			/*
853 			 * Start a group exit and wake everybody up.
854 			 * This way we don't have other threads
855 			 * running and doing things after a slower
856 			 * thread has the fatal signal pending.
857 			 */
858 			signal->flags = SIGNAL_GROUP_EXIT;
859 			signal->group_exit_code = sig;
860 			signal->group_stop_count = 0;
861 			t = p;
862 			do {
863 				sigaddset(&t->pending.signal, SIGKILL);
864 				signal_wake_up(t, 1);
865 			} while_each_thread(p, t);
866 			return;
867 		}
868 	}
869 
870 	/*
871 	 * The signal is already in the shared-pending queue.
872 	 * Tell the chosen thread to wake up and dequeue it.
873 	 */
874 	signal_wake_up(t, sig == SIGKILL);
875 	return;
876 }
877 
878 static inline int legacy_queue(struct sigpending *signals, int sig)
879 {
880 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
881 }
882 
883 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
884 			int group, int from_ancestor_ns)
885 {
886 	struct sigpending *pending;
887 	struct sigqueue *q;
888 	int override_rlimit;
889 
890 	trace_signal_generate(sig, info, t);
891 
892 	assert_spin_locked(&t->sighand->siglock);
893 
894 	if (!prepare_signal(sig, t, from_ancestor_ns))
895 		return 0;
896 
897 	pending = group ? &t->signal->shared_pending : &t->pending;
898 	/*
899 	 * Short-circuit ignored signals and support queuing
900 	 * exactly one non-rt signal, so that we can get more
901 	 * detailed information about the cause of the signal.
902 	 */
903 	if (legacy_queue(pending, sig))
904 		return 0;
905 	/*
906 	 * fast-pathed signals for kernel-internal things like SIGSTOP
907 	 * or SIGKILL.
908 	 */
909 	if (info == SEND_SIG_FORCED)
910 		goto out_set;
911 
912 	/* Real-time signals must be queued if sent by sigqueue, or
913 	   some other real-time mechanism.  It is implementation
914 	   defined whether kill() does so.  We attempt to do so, on
915 	   the principle of least surprise, but since kill is not
916 	   allowed to fail with EAGAIN when low on memory we just
917 	   make sure at least one signal gets delivered and don't
918 	   pass on the info struct.  */
919 
920 	if (sig < SIGRTMIN)
921 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
922 	else
923 		override_rlimit = 0;
924 
925 	q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
926 		override_rlimit);
927 	if (q) {
928 		list_add_tail(&q->list, &pending->list);
929 		switch ((unsigned long) info) {
930 		case (unsigned long) SEND_SIG_NOINFO:
931 			q->info.si_signo = sig;
932 			q->info.si_errno = 0;
933 			q->info.si_code = SI_USER;
934 			q->info.si_pid = task_tgid_nr_ns(current,
935 							task_active_pid_ns(t));
936 			q->info.si_uid = current_uid();
937 			break;
938 		case (unsigned long) SEND_SIG_PRIV:
939 			q->info.si_signo = sig;
940 			q->info.si_errno = 0;
941 			q->info.si_code = SI_KERNEL;
942 			q->info.si_pid = 0;
943 			q->info.si_uid = 0;
944 			break;
945 		default:
946 			copy_siginfo(&q->info, info);
947 			if (from_ancestor_ns)
948 				q->info.si_pid = 0;
949 			break;
950 		}
951 	} else if (!is_si_special(info)) {
952 		if (sig >= SIGRTMIN && info->si_code != SI_USER) {
953 			/*
954 			 * Queue overflow, abort.  We may abort if the
955 			 * signal was rt and sent by user using something
956 			 * other than kill().
957 			 */
958 			trace_signal_overflow_fail(sig, group, info);
959 			return -EAGAIN;
960 		} else {
961 			/*
962 			 * This is a silent loss of information.  We still
963 			 * send the signal, but the *info bits are lost.
964 			 */
965 			trace_signal_lose_info(sig, group, info);
966 		}
967 	}
968 
969 out_set:
970 	signalfd_notify(t, sig);
971 	sigaddset(&pending->signal, sig);
972 	complete_signal(sig, t, group);
973 	return 0;
974 }
975 
976 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
977 			int group)
978 {
979 	int from_ancestor_ns = 0;
980 
981 #ifdef CONFIG_PID_NS
982 	from_ancestor_ns = si_fromuser(info) &&
983 			   !task_pid_nr_ns(current, task_active_pid_ns(t));
984 #endif
985 
986 	return __send_signal(sig, info, t, group, from_ancestor_ns);
987 }
988 
989 static void print_fatal_signal(struct pt_regs *regs, int signr)
990 {
991 	printk("%s/%d: potentially unexpected fatal signal %d.\n",
992 		current->comm, task_pid_nr(current), signr);
993 
994 #if defined(__i386__) && !defined(__arch_um__)
995 	printk("code at %08lx: ", regs->ip);
996 	{
997 		int i;
998 		for (i = 0; i < 16; i++) {
999 			unsigned char insn;
1000 
1001 			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1002 				break;
1003 			printk("%02x ", insn);
1004 		}
1005 	}
1006 #endif
1007 	printk("\n");
1008 	preempt_disable();
1009 	show_regs(regs);
1010 	preempt_enable();
1011 }
1012 
1013 static int __init setup_print_fatal_signals(char *str)
1014 {
1015 	get_option (&str, &print_fatal_signals);
1016 
1017 	return 1;
1018 }
1019 
1020 __setup("print-fatal-signals=", setup_print_fatal_signals);
1021 
1022 int
1023 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1024 {
1025 	return send_signal(sig, info, p, 1);
1026 }
1027 
1028 static int
1029 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1030 {
1031 	return send_signal(sig, info, t, 0);
1032 }
1033 
1034 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1035 			bool group)
1036 {
1037 	unsigned long flags;
1038 	int ret = -ESRCH;
1039 
1040 	if (lock_task_sighand(p, &flags)) {
1041 		ret = send_signal(sig, info, p, group);
1042 		unlock_task_sighand(p, &flags);
1043 	}
1044 
1045 	return ret;
1046 }
1047 
1048 /*
1049  * Force a signal that the process can't ignore: if necessary
1050  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1051  *
1052  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1053  * since we do not want to have a signal handler that was blocked
1054  * be invoked when user space had explicitly blocked it.
1055  *
1056  * We don't want to have recursive SIGSEGV's etc, for example,
1057  * that is why we also clear SIGNAL_UNKILLABLE.
1058  */
1059 int
1060 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1061 {
1062 	unsigned long int flags;
1063 	int ret, blocked, ignored;
1064 	struct k_sigaction *action;
1065 
1066 	spin_lock_irqsave(&t->sighand->siglock, flags);
1067 	action = &t->sighand->action[sig-1];
1068 	ignored = action->sa.sa_handler == SIG_IGN;
1069 	blocked = sigismember(&t->blocked, sig);
1070 	if (blocked || ignored) {
1071 		action->sa.sa_handler = SIG_DFL;
1072 		if (blocked) {
1073 			sigdelset(&t->blocked, sig);
1074 			recalc_sigpending_and_wake(t);
1075 		}
1076 	}
1077 	if (action->sa.sa_handler == SIG_DFL)
1078 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1079 	ret = specific_send_sig_info(sig, info, t);
1080 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1081 
1082 	return ret;
1083 }
1084 
1085 /*
1086  * Nuke all other threads in the group.
1087  */
1088 int zap_other_threads(struct task_struct *p)
1089 {
1090 	struct task_struct *t = p;
1091 	int count = 0;
1092 
1093 	p->signal->group_stop_count = 0;
1094 
1095 	while_each_thread(p, t) {
1096 		count++;
1097 
1098 		/* Don't bother with already dead threads */
1099 		if (t->exit_state)
1100 			continue;
1101 		sigaddset(&t->pending.signal, SIGKILL);
1102 		signal_wake_up(t, 1);
1103 	}
1104 
1105 	return count;
1106 }
1107 
1108 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1109 {
1110 	struct sighand_struct *sighand;
1111 
1112 	rcu_read_lock();
1113 	for (;;) {
1114 		sighand = rcu_dereference(tsk->sighand);
1115 		if (unlikely(sighand == NULL))
1116 			break;
1117 
1118 		spin_lock_irqsave(&sighand->siglock, *flags);
1119 		if (likely(sighand == tsk->sighand))
1120 			break;
1121 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1122 	}
1123 	rcu_read_unlock();
1124 
1125 	return sighand;
1126 }
1127 
1128 /*
1129  * send signal info to all the members of a group
1130  */
1131 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1132 {
1133 	int ret;
1134 
1135 	rcu_read_lock();
1136 	ret = check_kill_permission(sig, info, p);
1137 	rcu_read_unlock();
1138 
1139 	if (!ret && sig)
1140 		ret = do_send_sig_info(sig, info, p, true);
1141 
1142 	return ret;
1143 }
1144 
1145 /*
1146  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1147  * control characters do (^C, ^Z etc)
1148  * - the caller must hold at least a readlock on tasklist_lock
1149  */
1150 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1151 {
1152 	struct task_struct *p = NULL;
1153 	int retval, success;
1154 
1155 	success = 0;
1156 	retval = -ESRCH;
1157 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1158 		int err = group_send_sig_info(sig, info, p);
1159 		success |= !err;
1160 		retval = err;
1161 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1162 	return success ? 0 : retval;
1163 }
1164 
1165 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1166 {
1167 	int error = -ESRCH;
1168 	struct task_struct *p;
1169 
1170 	rcu_read_lock();
1171 retry:
1172 	p = pid_task(pid, PIDTYPE_PID);
1173 	if (p) {
1174 		error = group_send_sig_info(sig, info, p);
1175 		if (unlikely(error == -ESRCH))
1176 			/*
1177 			 * The task was unhashed in between, try again.
1178 			 * If it is dead, pid_task() will return NULL,
1179 			 * if we race with de_thread() it will find the
1180 			 * new leader.
1181 			 */
1182 			goto retry;
1183 	}
1184 	rcu_read_unlock();
1185 
1186 	return error;
1187 }
1188 
1189 int
1190 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1191 {
1192 	int error;
1193 	rcu_read_lock();
1194 	error = kill_pid_info(sig, info, find_vpid(pid));
1195 	rcu_read_unlock();
1196 	return error;
1197 }
1198 
1199 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1200 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1201 		      uid_t uid, uid_t euid, u32 secid)
1202 {
1203 	int ret = -EINVAL;
1204 	struct task_struct *p;
1205 	const struct cred *pcred;
1206 	unsigned long flags;
1207 
1208 	if (!valid_signal(sig))
1209 		return ret;
1210 
1211 	rcu_read_lock();
1212 	p = pid_task(pid, PIDTYPE_PID);
1213 	if (!p) {
1214 		ret = -ESRCH;
1215 		goto out_unlock;
1216 	}
1217 	pcred = __task_cred(p);
1218 	if (si_fromuser(info) &&
1219 	    euid != pcred->suid && euid != pcred->uid &&
1220 	    uid  != pcred->suid && uid  != pcred->uid) {
1221 		ret = -EPERM;
1222 		goto out_unlock;
1223 	}
1224 	ret = security_task_kill(p, info, sig, secid);
1225 	if (ret)
1226 		goto out_unlock;
1227 
1228 	if (sig) {
1229 		if (lock_task_sighand(p, &flags)) {
1230 			ret = __send_signal(sig, info, p, 1, 0);
1231 			unlock_task_sighand(p, &flags);
1232 		} else
1233 			ret = -ESRCH;
1234 	}
1235 out_unlock:
1236 	rcu_read_unlock();
1237 	return ret;
1238 }
1239 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1240 
1241 /*
1242  * kill_something_info() interprets pid in interesting ways just like kill(2).
1243  *
1244  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1245  * is probably wrong.  Should make it like BSD or SYSV.
1246  */
1247 
1248 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1249 {
1250 	int ret;
1251 
1252 	if (pid > 0) {
1253 		rcu_read_lock();
1254 		ret = kill_pid_info(sig, info, find_vpid(pid));
1255 		rcu_read_unlock();
1256 		return ret;
1257 	}
1258 
1259 	read_lock(&tasklist_lock);
1260 	if (pid != -1) {
1261 		ret = __kill_pgrp_info(sig, info,
1262 				pid ? find_vpid(-pid) : task_pgrp(current));
1263 	} else {
1264 		int retval = 0, count = 0;
1265 		struct task_struct * p;
1266 
1267 		for_each_process(p) {
1268 			if (task_pid_vnr(p) > 1 &&
1269 					!same_thread_group(p, current)) {
1270 				int err = group_send_sig_info(sig, info, p);
1271 				++count;
1272 				if (err != -EPERM)
1273 					retval = err;
1274 			}
1275 		}
1276 		ret = count ? retval : -ESRCH;
1277 	}
1278 	read_unlock(&tasklist_lock);
1279 
1280 	return ret;
1281 }
1282 
1283 /*
1284  * These are for backward compatibility with the rest of the kernel source.
1285  */
1286 
1287 int
1288 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1289 {
1290 	/*
1291 	 * Make sure legacy kernel users don't send in bad values
1292 	 * (normal paths check this in check_kill_permission).
1293 	 */
1294 	if (!valid_signal(sig))
1295 		return -EINVAL;
1296 
1297 	return do_send_sig_info(sig, info, p, false);
1298 }
1299 
1300 #define __si_special(priv) \
1301 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1302 
1303 int
1304 send_sig(int sig, struct task_struct *p, int priv)
1305 {
1306 	return send_sig_info(sig, __si_special(priv), p);
1307 }
1308 
1309 void
1310 force_sig(int sig, struct task_struct *p)
1311 {
1312 	force_sig_info(sig, SEND_SIG_PRIV, p);
1313 }
1314 
1315 /*
1316  * When things go south during signal handling, we
1317  * will force a SIGSEGV. And if the signal that caused
1318  * the problem was already a SIGSEGV, we'll want to
1319  * make sure we don't even try to deliver the signal..
1320  */
1321 int
1322 force_sigsegv(int sig, struct task_struct *p)
1323 {
1324 	if (sig == SIGSEGV) {
1325 		unsigned long flags;
1326 		spin_lock_irqsave(&p->sighand->siglock, flags);
1327 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1328 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1329 	}
1330 	force_sig(SIGSEGV, p);
1331 	return 0;
1332 }
1333 
1334 int kill_pgrp(struct pid *pid, int sig, int priv)
1335 {
1336 	int ret;
1337 
1338 	read_lock(&tasklist_lock);
1339 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1340 	read_unlock(&tasklist_lock);
1341 
1342 	return ret;
1343 }
1344 EXPORT_SYMBOL(kill_pgrp);
1345 
1346 int kill_pid(struct pid *pid, int sig, int priv)
1347 {
1348 	return kill_pid_info(sig, __si_special(priv), pid);
1349 }
1350 EXPORT_SYMBOL(kill_pid);
1351 
1352 /*
1353  * These functions support sending signals using preallocated sigqueue
1354  * structures.  This is needed "because realtime applications cannot
1355  * afford to lose notifications of asynchronous events, like timer
1356  * expirations or I/O completions".  In the case of Posix Timers
1357  * we allocate the sigqueue structure from the timer_create.  If this
1358  * allocation fails we are able to report the failure to the application
1359  * with an EAGAIN error.
1360  */
1361 struct sigqueue *sigqueue_alloc(void)
1362 {
1363 	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1364 
1365 	if (q)
1366 		q->flags |= SIGQUEUE_PREALLOC;
1367 
1368 	return q;
1369 }
1370 
1371 void sigqueue_free(struct sigqueue *q)
1372 {
1373 	unsigned long flags;
1374 	spinlock_t *lock = &current->sighand->siglock;
1375 
1376 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1377 	/*
1378 	 * We must hold ->siglock while testing q->list
1379 	 * to serialize with collect_signal() or with
1380 	 * __exit_signal()->flush_sigqueue().
1381 	 */
1382 	spin_lock_irqsave(lock, flags);
1383 	q->flags &= ~SIGQUEUE_PREALLOC;
1384 	/*
1385 	 * If it is queued it will be freed when dequeued,
1386 	 * like the "regular" sigqueue.
1387 	 */
1388 	if (!list_empty(&q->list))
1389 		q = NULL;
1390 	spin_unlock_irqrestore(lock, flags);
1391 
1392 	if (q)
1393 		__sigqueue_free(q);
1394 }
1395 
1396 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1397 {
1398 	int sig = q->info.si_signo;
1399 	struct sigpending *pending;
1400 	unsigned long flags;
1401 	int ret;
1402 
1403 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1404 
1405 	ret = -1;
1406 	if (!likely(lock_task_sighand(t, &flags)))
1407 		goto ret;
1408 
1409 	ret = 1; /* the signal is ignored */
1410 	if (!prepare_signal(sig, t, 0))
1411 		goto out;
1412 
1413 	ret = 0;
1414 	if (unlikely(!list_empty(&q->list))) {
1415 		/*
1416 		 * If an SI_TIMER entry is already queue just increment
1417 		 * the overrun count.
1418 		 */
1419 		BUG_ON(q->info.si_code != SI_TIMER);
1420 		q->info.si_overrun++;
1421 		goto out;
1422 	}
1423 	q->info.si_overrun = 0;
1424 
1425 	signalfd_notify(t, sig);
1426 	pending = group ? &t->signal->shared_pending : &t->pending;
1427 	list_add_tail(&q->list, &pending->list);
1428 	sigaddset(&pending->signal, sig);
1429 	complete_signal(sig, t, group);
1430 out:
1431 	unlock_task_sighand(t, &flags);
1432 ret:
1433 	return ret;
1434 }
1435 
1436 /*
1437  * Let a parent know about the death of a child.
1438  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1439  *
1440  * Returns -1 if our parent ignored us and so we've switched to
1441  * self-reaping, or else @sig.
1442  */
1443 int do_notify_parent(struct task_struct *tsk, int sig)
1444 {
1445 	struct siginfo info;
1446 	unsigned long flags;
1447 	struct sighand_struct *psig;
1448 	int ret = sig;
1449 
1450 	BUG_ON(sig == -1);
1451 
1452  	/* do_notify_parent_cldstop should have been called instead.  */
1453  	BUG_ON(task_is_stopped_or_traced(tsk));
1454 
1455 	BUG_ON(!task_ptrace(tsk) &&
1456 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1457 
1458 	info.si_signo = sig;
1459 	info.si_errno = 0;
1460 	/*
1461 	 * we are under tasklist_lock here so our parent is tied to
1462 	 * us and cannot exit and release its namespace.
1463 	 *
1464 	 * the only it can is to switch its nsproxy with sys_unshare,
1465 	 * bu uncharing pid namespaces is not allowed, so we'll always
1466 	 * see relevant namespace
1467 	 *
1468 	 * write_lock() currently calls preempt_disable() which is the
1469 	 * same as rcu_read_lock(), but according to Oleg, this is not
1470 	 * correct to rely on this
1471 	 */
1472 	rcu_read_lock();
1473 	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1474 	info.si_uid = __task_cred(tsk)->uid;
1475 	rcu_read_unlock();
1476 
1477 	info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1478 				tsk->signal->utime));
1479 	info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1480 				tsk->signal->stime));
1481 
1482 	info.si_status = tsk->exit_code & 0x7f;
1483 	if (tsk->exit_code & 0x80)
1484 		info.si_code = CLD_DUMPED;
1485 	else if (tsk->exit_code & 0x7f)
1486 		info.si_code = CLD_KILLED;
1487 	else {
1488 		info.si_code = CLD_EXITED;
1489 		info.si_status = tsk->exit_code >> 8;
1490 	}
1491 
1492 	psig = tsk->parent->sighand;
1493 	spin_lock_irqsave(&psig->siglock, flags);
1494 	if (!task_ptrace(tsk) && sig == SIGCHLD &&
1495 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1496 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1497 		/*
1498 		 * We are exiting and our parent doesn't care.  POSIX.1
1499 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1500 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1501 		 * automatically and not left for our parent's wait4 call.
1502 		 * Rather than having the parent do it as a magic kind of
1503 		 * signal handler, we just set this to tell do_exit that we
1504 		 * can be cleaned up without becoming a zombie.  Note that
1505 		 * we still call __wake_up_parent in this case, because a
1506 		 * blocked sys_wait4 might now return -ECHILD.
1507 		 *
1508 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1509 		 * is implementation-defined: we do (if you don't want
1510 		 * it, just use SIG_IGN instead).
1511 		 */
1512 		ret = tsk->exit_signal = -1;
1513 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1514 			sig = -1;
1515 	}
1516 	if (valid_signal(sig) && sig > 0)
1517 		__group_send_sig_info(sig, &info, tsk->parent);
1518 	__wake_up_parent(tsk, tsk->parent);
1519 	spin_unlock_irqrestore(&psig->siglock, flags);
1520 
1521 	return ret;
1522 }
1523 
1524 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1525 {
1526 	struct siginfo info;
1527 	unsigned long flags;
1528 	struct task_struct *parent;
1529 	struct sighand_struct *sighand;
1530 
1531 	if (task_ptrace(tsk))
1532 		parent = tsk->parent;
1533 	else {
1534 		tsk = tsk->group_leader;
1535 		parent = tsk->real_parent;
1536 	}
1537 
1538 	info.si_signo = SIGCHLD;
1539 	info.si_errno = 0;
1540 	/*
1541 	 * see comment in do_notify_parent() abot the following 3 lines
1542 	 */
1543 	rcu_read_lock();
1544 	info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1545 	info.si_uid = __task_cred(tsk)->uid;
1546 	rcu_read_unlock();
1547 
1548 	info.si_utime = cputime_to_clock_t(tsk->utime);
1549 	info.si_stime = cputime_to_clock_t(tsk->stime);
1550 
1551  	info.si_code = why;
1552  	switch (why) {
1553  	case CLD_CONTINUED:
1554  		info.si_status = SIGCONT;
1555  		break;
1556  	case CLD_STOPPED:
1557  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1558  		break;
1559  	case CLD_TRAPPED:
1560  		info.si_status = tsk->exit_code & 0x7f;
1561  		break;
1562  	default:
1563  		BUG();
1564  	}
1565 
1566 	sighand = parent->sighand;
1567 	spin_lock_irqsave(&sighand->siglock, flags);
1568 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1569 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1570 		__group_send_sig_info(SIGCHLD, &info, parent);
1571 	/*
1572 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1573 	 */
1574 	__wake_up_parent(tsk, parent);
1575 	spin_unlock_irqrestore(&sighand->siglock, flags);
1576 }
1577 
1578 static inline int may_ptrace_stop(void)
1579 {
1580 	if (!likely(task_ptrace(current)))
1581 		return 0;
1582 	/*
1583 	 * Are we in the middle of do_coredump?
1584 	 * If so and our tracer is also part of the coredump stopping
1585 	 * is a deadlock situation, and pointless because our tracer
1586 	 * is dead so don't allow us to stop.
1587 	 * If SIGKILL was already sent before the caller unlocked
1588 	 * ->siglock we must see ->core_state != NULL. Otherwise it
1589 	 * is safe to enter schedule().
1590 	 */
1591 	if (unlikely(current->mm->core_state) &&
1592 	    unlikely(current->mm == current->parent->mm))
1593 		return 0;
1594 
1595 	return 1;
1596 }
1597 
1598 /*
1599  * Return nonzero if there is a SIGKILL that should be waking us up.
1600  * Called with the siglock held.
1601  */
1602 static int sigkill_pending(struct task_struct *tsk)
1603 {
1604 	return	sigismember(&tsk->pending.signal, SIGKILL) ||
1605 		sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1606 }
1607 
1608 /*
1609  * This must be called with current->sighand->siglock held.
1610  *
1611  * This should be the path for all ptrace stops.
1612  * We always set current->last_siginfo while stopped here.
1613  * That makes it a way to test a stopped process for
1614  * being ptrace-stopped vs being job-control-stopped.
1615  *
1616  * If we actually decide not to stop at all because the tracer
1617  * is gone, we keep current->exit_code unless clear_code.
1618  */
1619 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1620 {
1621 	if (arch_ptrace_stop_needed(exit_code, info)) {
1622 		/*
1623 		 * The arch code has something special to do before a
1624 		 * ptrace stop.  This is allowed to block, e.g. for faults
1625 		 * on user stack pages.  We can't keep the siglock while
1626 		 * calling arch_ptrace_stop, so we must release it now.
1627 		 * To preserve proper semantics, we must do this before
1628 		 * any signal bookkeeping like checking group_stop_count.
1629 		 * Meanwhile, a SIGKILL could come in before we retake the
1630 		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
1631 		 * So after regaining the lock, we must check for SIGKILL.
1632 		 */
1633 		spin_unlock_irq(&current->sighand->siglock);
1634 		arch_ptrace_stop(exit_code, info);
1635 		spin_lock_irq(&current->sighand->siglock);
1636 		if (sigkill_pending(current))
1637 			return;
1638 	}
1639 
1640 	/*
1641 	 * If there is a group stop in progress,
1642 	 * we must participate in the bookkeeping.
1643 	 */
1644 	if (current->signal->group_stop_count > 0)
1645 		--current->signal->group_stop_count;
1646 
1647 	current->last_siginfo = info;
1648 	current->exit_code = exit_code;
1649 
1650 	/* Let the debugger run.  */
1651 	__set_current_state(TASK_TRACED);
1652 	spin_unlock_irq(&current->sighand->siglock);
1653 	read_lock(&tasklist_lock);
1654 	if (may_ptrace_stop()) {
1655 		do_notify_parent_cldstop(current, CLD_TRAPPED);
1656 		/*
1657 		 * Don't want to allow preemption here, because
1658 		 * sys_ptrace() needs this task to be inactive.
1659 		 *
1660 		 * XXX: implement read_unlock_no_resched().
1661 		 */
1662 		preempt_disable();
1663 		read_unlock(&tasklist_lock);
1664 		preempt_enable_no_resched();
1665 		schedule();
1666 	} else {
1667 		/*
1668 		 * By the time we got the lock, our tracer went away.
1669 		 * Don't drop the lock yet, another tracer may come.
1670 		 */
1671 		__set_current_state(TASK_RUNNING);
1672 		if (clear_code)
1673 			current->exit_code = 0;
1674 		read_unlock(&tasklist_lock);
1675 	}
1676 
1677 	/*
1678 	 * While in TASK_TRACED, we were considered "frozen enough".
1679 	 * Now that we woke up, it's crucial if we're supposed to be
1680 	 * frozen that we freeze now before running anything substantial.
1681 	 */
1682 	try_to_freeze();
1683 
1684 	/*
1685 	 * We are back.  Now reacquire the siglock before touching
1686 	 * last_siginfo, so that we are sure to have synchronized with
1687 	 * any signal-sending on another CPU that wants to examine it.
1688 	 */
1689 	spin_lock_irq(&current->sighand->siglock);
1690 	current->last_siginfo = NULL;
1691 
1692 	/*
1693 	 * Queued signals ignored us while we were stopped for tracing.
1694 	 * So check for any that we should take before resuming user mode.
1695 	 * This sets TIF_SIGPENDING, but never clears it.
1696 	 */
1697 	recalc_sigpending_tsk(current);
1698 }
1699 
1700 void ptrace_notify(int exit_code)
1701 {
1702 	siginfo_t info;
1703 
1704 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1705 
1706 	memset(&info, 0, sizeof info);
1707 	info.si_signo = SIGTRAP;
1708 	info.si_code = exit_code;
1709 	info.si_pid = task_pid_vnr(current);
1710 	info.si_uid = current_uid();
1711 
1712 	/* Let the debugger run.  */
1713 	spin_lock_irq(&current->sighand->siglock);
1714 	ptrace_stop(exit_code, 1, &info);
1715 	spin_unlock_irq(&current->sighand->siglock);
1716 }
1717 
1718 /*
1719  * This performs the stopping for SIGSTOP and other stop signals.
1720  * We have to stop all threads in the thread group.
1721  * Returns nonzero if we've actually stopped and released the siglock.
1722  * Returns zero if we didn't stop and still hold the siglock.
1723  */
1724 static int do_signal_stop(int signr)
1725 {
1726 	struct signal_struct *sig = current->signal;
1727 	int notify;
1728 
1729 	if (!sig->group_stop_count) {
1730 		struct task_struct *t;
1731 
1732 		if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1733 		    unlikely(signal_group_exit(sig)))
1734 			return 0;
1735 		/*
1736 		 * There is no group stop already in progress.
1737 		 * We must initiate one now.
1738 		 */
1739 		sig->group_exit_code = signr;
1740 
1741 		sig->group_stop_count = 1;
1742 		for (t = next_thread(current); t != current; t = next_thread(t))
1743 			/*
1744 			 * Setting state to TASK_STOPPED for a group
1745 			 * stop is always done with the siglock held,
1746 			 * so this check has no races.
1747 			 */
1748 			if (!(t->flags & PF_EXITING) &&
1749 			    !task_is_stopped_or_traced(t)) {
1750 				sig->group_stop_count++;
1751 				signal_wake_up(t, 0);
1752 			}
1753 	}
1754 	/*
1755 	 * If there are no other threads in the group, or if there is
1756 	 * a group stop in progress and we are the last to stop, report
1757 	 * to the parent.  When ptraced, every thread reports itself.
1758 	 */
1759 	notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0;
1760 	notify = tracehook_notify_jctl(notify, CLD_STOPPED);
1761 	/*
1762 	 * tracehook_notify_jctl() can drop and reacquire siglock, so
1763 	 * we keep ->group_stop_count != 0 before the call. If SIGCONT
1764 	 * or SIGKILL comes in between ->group_stop_count == 0.
1765 	 */
1766 	if (sig->group_stop_count) {
1767 		if (!--sig->group_stop_count)
1768 			sig->flags = SIGNAL_STOP_STOPPED;
1769 		current->exit_code = sig->group_exit_code;
1770 		__set_current_state(TASK_STOPPED);
1771 	}
1772 	spin_unlock_irq(&current->sighand->siglock);
1773 
1774 	if (notify) {
1775 		read_lock(&tasklist_lock);
1776 		do_notify_parent_cldstop(current, notify);
1777 		read_unlock(&tasklist_lock);
1778 	}
1779 
1780 	/* Now we don't run again until woken by SIGCONT or SIGKILL */
1781 	do {
1782 		schedule();
1783 	} while (try_to_freeze());
1784 
1785 	tracehook_finish_jctl();
1786 	current->exit_code = 0;
1787 
1788 	return 1;
1789 }
1790 
1791 static int ptrace_signal(int signr, siginfo_t *info,
1792 			 struct pt_regs *regs, void *cookie)
1793 {
1794 	if (!task_ptrace(current))
1795 		return signr;
1796 
1797 	ptrace_signal_deliver(regs, cookie);
1798 
1799 	/* Let the debugger run.  */
1800 	ptrace_stop(signr, 0, info);
1801 
1802 	/* We're back.  Did the debugger cancel the sig?  */
1803 	signr = current->exit_code;
1804 	if (signr == 0)
1805 		return signr;
1806 
1807 	current->exit_code = 0;
1808 
1809 	/* Update the siginfo structure if the signal has
1810 	   changed.  If the debugger wanted something
1811 	   specific in the siginfo structure then it should
1812 	   have updated *info via PTRACE_SETSIGINFO.  */
1813 	if (signr != info->si_signo) {
1814 		info->si_signo = signr;
1815 		info->si_errno = 0;
1816 		info->si_code = SI_USER;
1817 		info->si_pid = task_pid_vnr(current->parent);
1818 		info->si_uid = task_uid(current->parent);
1819 	}
1820 
1821 	/* If the (new) signal is now blocked, requeue it.  */
1822 	if (sigismember(&current->blocked, signr)) {
1823 		specific_send_sig_info(signr, info, current);
1824 		signr = 0;
1825 	}
1826 
1827 	return signr;
1828 }
1829 
1830 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1831 			  struct pt_regs *regs, void *cookie)
1832 {
1833 	struct sighand_struct *sighand = current->sighand;
1834 	struct signal_struct *signal = current->signal;
1835 	int signr;
1836 
1837 relock:
1838 	/*
1839 	 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1840 	 * While in TASK_STOPPED, we were considered "frozen enough".
1841 	 * Now that we woke up, it's crucial if we're supposed to be
1842 	 * frozen that we freeze now before running anything substantial.
1843 	 */
1844 	try_to_freeze();
1845 
1846 	spin_lock_irq(&sighand->siglock);
1847 	/*
1848 	 * Every stopped thread goes here after wakeup. Check to see if
1849 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
1850 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1851 	 */
1852 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1853 		int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1854 				? CLD_CONTINUED : CLD_STOPPED;
1855 		signal->flags &= ~SIGNAL_CLD_MASK;
1856 
1857 		why = tracehook_notify_jctl(why, CLD_CONTINUED);
1858 		spin_unlock_irq(&sighand->siglock);
1859 
1860 		if (why) {
1861 			read_lock(&tasklist_lock);
1862 			do_notify_parent_cldstop(current->group_leader, why);
1863 			read_unlock(&tasklist_lock);
1864 		}
1865 		goto relock;
1866 	}
1867 
1868 	for (;;) {
1869 		struct k_sigaction *ka;
1870 		/*
1871 		 * Tracing can induce an artifical signal and choose sigaction.
1872 		 * The return value in @signr determines the default action,
1873 		 * but @info->si_signo is the signal number we will report.
1874 		 */
1875 		signr = tracehook_get_signal(current, regs, info, return_ka);
1876 		if (unlikely(signr < 0))
1877 			goto relock;
1878 		if (unlikely(signr != 0))
1879 			ka = return_ka;
1880 		else {
1881 			if (unlikely(signal->group_stop_count > 0) &&
1882 			    do_signal_stop(0))
1883 				goto relock;
1884 
1885 			signr = dequeue_signal(current, &current->blocked,
1886 					       info);
1887 
1888 			if (!signr)
1889 				break; /* will return 0 */
1890 
1891 			if (signr != SIGKILL) {
1892 				signr = ptrace_signal(signr, info,
1893 						      regs, cookie);
1894 				if (!signr)
1895 					continue;
1896 			}
1897 
1898 			ka = &sighand->action[signr-1];
1899 		}
1900 
1901 		/* Trace actually delivered signals. */
1902 		trace_signal_deliver(signr, info, ka);
1903 
1904 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1905 			continue;
1906 		if (ka->sa.sa_handler != SIG_DFL) {
1907 			/* Run the handler.  */
1908 			*return_ka = *ka;
1909 
1910 			if (ka->sa.sa_flags & SA_ONESHOT)
1911 				ka->sa.sa_handler = SIG_DFL;
1912 
1913 			break; /* will return non-zero "signr" value */
1914 		}
1915 
1916 		/*
1917 		 * Now we are doing the default action for this signal.
1918 		 */
1919 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
1920 			continue;
1921 
1922 		/*
1923 		 * Global init gets no signals it doesn't want.
1924 		 * Container-init gets no signals it doesn't want from same
1925 		 * container.
1926 		 *
1927 		 * Note that if global/container-init sees a sig_kernel_only()
1928 		 * signal here, the signal must have been generated internally
1929 		 * or must have come from an ancestor namespace. In either
1930 		 * case, the signal cannot be dropped.
1931 		 */
1932 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1933 				!sig_kernel_only(signr))
1934 			continue;
1935 
1936 		if (sig_kernel_stop(signr)) {
1937 			/*
1938 			 * The default action is to stop all threads in
1939 			 * the thread group.  The job control signals
1940 			 * do nothing in an orphaned pgrp, but SIGSTOP
1941 			 * always works.  Note that siglock needs to be
1942 			 * dropped during the call to is_orphaned_pgrp()
1943 			 * because of lock ordering with tasklist_lock.
1944 			 * This allows an intervening SIGCONT to be posted.
1945 			 * We need to check for that and bail out if necessary.
1946 			 */
1947 			if (signr != SIGSTOP) {
1948 				spin_unlock_irq(&sighand->siglock);
1949 
1950 				/* signals can be posted during this window */
1951 
1952 				if (is_current_pgrp_orphaned())
1953 					goto relock;
1954 
1955 				spin_lock_irq(&sighand->siglock);
1956 			}
1957 
1958 			if (likely(do_signal_stop(info->si_signo))) {
1959 				/* It released the siglock.  */
1960 				goto relock;
1961 			}
1962 
1963 			/*
1964 			 * We didn't actually stop, due to a race
1965 			 * with SIGCONT or something like that.
1966 			 */
1967 			continue;
1968 		}
1969 
1970 		spin_unlock_irq(&sighand->siglock);
1971 
1972 		/*
1973 		 * Anything else is fatal, maybe with a core dump.
1974 		 */
1975 		current->flags |= PF_SIGNALED;
1976 
1977 		if (sig_kernel_coredump(signr)) {
1978 			if (print_fatal_signals)
1979 				print_fatal_signal(regs, info->si_signo);
1980 			/*
1981 			 * If it was able to dump core, this kills all
1982 			 * other threads in the group and synchronizes with
1983 			 * their demise.  If we lost the race with another
1984 			 * thread getting here, it set group_exit_code
1985 			 * first and our do_group_exit call below will use
1986 			 * that value and ignore the one we pass it.
1987 			 */
1988 			do_coredump(info->si_signo, info->si_signo, regs);
1989 		}
1990 
1991 		/*
1992 		 * Death signals, no core dump.
1993 		 */
1994 		do_group_exit(info->si_signo);
1995 		/* NOTREACHED */
1996 	}
1997 	spin_unlock_irq(&sighand->siglock);
1998 	return signr;
1999 }
2000 
2001 void exit_signals(struct task_struct *tsk)
2002 {
2003 	int group_stop = 0;
2004 	struct task_struct *t;
2005 
2006 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2007 		tsk->flags |= PF_EXITING;
2008 		return;
2009 	}
2010 
2011 	spin_lock_irq(&tsk->sighand->siglock);
2012 	/*
2013 	 * From now this task is not visible for group-wide signals,
2014 	 * see wants_signal(), do_signal_stop().
2015 	 */
2016 	tsk->flags |= PF_EXITING;
2017 	if (!signal_pending(tsk))
2018 		goto out;
2019 
2020 	/* It could be that __group_complete_signal() choose us to
2021 	 * notify about group-wide signal. Another thread should be
2022 	 * woken now to take the signal since we will not.
2023 	 */
2024 	for (t = tsk; (t = next_thread(t)) != tsk; )
2025 		if (!signal_pending(t) && !(t->flags & PF_EXITING))
2026 			recalc_sigpending_and_wake(t);
2027 
2028 	if (unlikely(tsk->signal->group_stop_count) &&
2029 			!--tsk->signal->group_stop_count) {
2030 		tsk->signal->flags = SIGNAL_STOP_STOPPED;
2031 		group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED);
2032 	}
2033 out:
2034 	spin_unlock_irq(&tsk->sighand->siglock);
2035 
2036 	if (unlikely(group_stop)) {
2037 		read_lock(&tasklist_lock);
2038 		do_notify_parent_cldstop(tsk, group_stop);
2039 		read_unlock(&tasklist_lock);
2040 	}
2041 }
2042 
2043 EXPORT_SYMBOL(recalc_sigpending);
2044 EXPORT_SYMBOL_GPL(dequeue_signal);
2045 EXPORT_SYMBOL(flush_signals);
2046 EXPORT_SYMBOL(force_sig);
2047 EXPORT_SYMBOL(send_sig);
2048 EXPORT_SYMBOL(send_sig_info);
2049 EXPORT_SYMBOL(sigprocmask);
2050 EXPORT_SYMBOL(block_all_signals);
2051 EXPORT_SYMBOL(unblock_all_signals);
2052 
2053 
2054 /*
2055  * System call entry points.
2056  */
2057 
2058 SYSCALL_DEFINE0(restart_syscall)
2059 {
2060 	struct restart_block *restart = &current_thread_info()->restart_block;
2061 	return restart->fn(restart);
2062 }
2063 
2064 long do_no_restart_syscall(struct restart_block *param)
2065 {
2066 	return -EINTR;
2067 }
2068 
2069 /*
2070  * We don't need to get the kernel lock - this is all local to this
2071  * particular thread.. (and that's good, because this is _heavily_
2072  * used by various programs)
2073  */
2074 
2075 /*
2076  * This is also useful for kernel threads that want to temporarily
2077  * (or permanently) block certain signals.
2078  *
2079  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2080  * interface happily blocks "unblockable" signals like SIGKILL
2081  * and friends.
2082  */
2083 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2084 {
2085 	int error;
2086 
2087 	spin_lock_irq(&current->sighand->siglock);
2088 	if (oldset)
2089 		*oldset = current->blocked;
2090 
2091 	error = 0;
2092 	switch (how) {
2093 	case SIG_BLOCK:
2094 		sigorsets(&current->blocked, &current->blocked, set);
2095 		break;
2096 	case SIG_UNBLOCK:
2097 		signandsets(&current->blocked, &current->blocked, set);
2098 		break;
2099 	case SIG_SETMASK:
2100 		current->blocked = *set;
2101 		break;
2102 	default:
2103 		error = -EINVAL;
2104 	}
2105 	recalc_sigpending();
2106 	spin_unlock_irq(&current->sighand->siglock);
2107 
2108 	return error;
2109 }
2110 
2111 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2112 		sigset_t __user *, oset, size_t, sigsetsize)
2113 {
2114 	int error = -EINVAL;
2115 	sigset_t old_set, new_set;
2116 
2117 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2118 	if (sigsetsize != sizeof(sigset_t))
2119 		goto out;
2120 
2121 	if (set) {
2122 		error = -EFAULT;
2123 		if (copy_from_user(&new_set, set, sizeof(*set)))
2124 			goto out;
2125 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2126 
2127 		error = sigprocmask(how, &new_set, &old_set);
2128 		if (error)
2129 			goto out;
2130 		if (oset)
2131 			goto set_old;
2132 	} else if (oset) {
2133 		spin_lock_irq(&current->sighand->siglock);
2134 		old_set = current->blocked;
2135 		spin_unlock_irq(&current->sighand->siglock);
2136 
2137 	set_old:
2138 		error = -EFAULT;
2139 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2140 			goto out;
2141 	}
2142 	error = 0;
2143 out:
2144 	return error;
2145 }
2146 
2147 long do_sigpending(void __user *set, unsigned long sigsetsize)
2148 {
2149 	long error = -EINVAL;
2150 	sigset_t pending;
2151 
2152 	if (sigsetsize > sizeof(sigset_t))
2153 		goto out;
2154 
2155 	spin_lock_irq(&current->sighand->siglock);
2156 	sigorsets(&pending, &current->pending.signal,
2157 		  &current->signal->shared_pending.signal);
2158 	spin_unlock_irq(&current->sighand->siglock);
2159 
2160 	/* Outside the lock because only this thread touches it.  */
2161 	sigandsets(&pending, &current->blocked, &pending);
2162 
2163 	error = -EFAULT;
2164 	if (!copy_to_user(set, &pending, sigsetsize))
2165 		error = 0;
2166 
2167 out:
2168 	return error;
2169 }
2170 
2171 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2172 {
2173 	return do_sigpending(set, sigsetsize);
2174 }
2175 
2176 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2177 
2178 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2179 {
2180 	int err;
2181 
2182 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2183 		return -EFAULT;
2184 	if (from->si_code < 0)
2185 		return __copy_to_user(to, from, sizeof(siginfo_t))
2186 			? -EFAULT : 0;
2187 	/*
2188 	 * If you change siginfo_t structure, please be sure
2189 	 * this code is fixed accordingly.
2190 	 * Please remember to update the signalfd_copyinfo() function
2191 	 * inside fs/signalfd.c too, in case siginfo_t changes.
2192 	 * It should never copy any pad contained in the structure
2193 	 * to avoid security leaks, but must copy the generic
2194 	 * 3 ints plus the relevant union member.
2195 	 */
2196 	err = __put_user(from->si_signo, &to->si_signo);
2197 	err |= __put_user(from->si_errno, &to->si_errno);
2198 	err |= __put_user((short)from->si_code, &to->si_code);
2199 	switch (from->si_code & __SI_MASK) {
2200 	case __SI_KILL:
2201 		err |= __put_user(from->si_pid, &to->si_pid);
2202 		err |= __put_user(from->si_uid, &to->si_uid);
2203 		break;
2204 	case __SI_TIMER:
2205 		 err |= __put_user(from->si_tid, &to->si_tid);
2206 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2207 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2208 		break;
2209 	case __SI_POLL:
2210 		err |= __put_user(from->si_band, &to->si_band);
2211 		err |= __put_user(from->si_fd, &to->si_fd);
2212 		break;
2213 	case __SI_FAULT:
2214 		err |= __put_user(from->si_addr, &to->si_addr);
2215 #ifdef __ARCH_SI_TRAPNO
2216 		err |= __put_user(from->si_trapno, &to->si_trapno);
2217 #endif
2218 		break;
2219 	case __SI_CHLD:
2220 		err |= __put_user(from->si_pid, &to->si_pid);
2221 		err |= __put_user(from->si_uid, &to->si_uid);
2222 		err |= __put_user(from->si_status, &to->si_status);
2223 		err |= __put_user(from->si_utime, &to->si_utime);
2224 		err |= __put_user(from->si_stime, &to->si_stime);
2225 		break;
2226 	case __SI_RT: /* This is not generated by the kernel as of now. */
2227 	case __SI_MESGQ: /* But this is */
2228 		err |= __put_user(from->si_pid, &to->si_pid);
2229 		err |= __put_user(from->si_uid, &to->si_uid);
2230 		err |= __put_user(from->si_ptr, &to->si_ptr);
2231 		break;
2232 	default: /* this is just in case for now ... */
2233 		err |= __put_user(from->si_pid, &to->si_pid);
2234 		err |= __put_user(from->si_uid, &to->si_uid);
2235 		break;
2236 	}
2237 	return err;
2238 }
2239 
2240 #endif
2241 
2242 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2243 		siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2244 		size_t, sigsetsize)
2245 {
2246 	int ret, sig;
2247 	sigset_t these;
2248 	struct timespec ts;
2249 	siginfo_t info;
2250 	long timeout = 0;
2251 
2252 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2253 	if (sigsetsize != sizeof(sigset_t))
2254 		return -EINVAL;
2255 
2256 	if (copy_from_user(&these, uthese, sizeof(these)))
2257 		return -EFAULT;
2258 
2259 	/*
2260 	 * Invert the set of allowed signals to get those we
2261 	 * want to block.
2262 	 */
2263 	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2264 	signotset(&these);
2265 
2266 	if (uts) {
2267 		if (copy_from_user(&ts, uts, sizeof(ts)))
2268 			return -EFAULT;
2269 		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2270 		    || ts.tv_sec < 0)
2271 			return -EINVAL;
2272 	}
2273 
2274 	spin_lock_irq(&current->sighand->siglock);
2275 	sig = dequeue_signal(current, &these, &info);
2276 	if (!sig) {
2277 		timeout = MAX_SCHEDULE_TIMEOUT;
2278 		if (uts)
2279 			timeout = (timespec_to_jiffies(&ts)
2280 				   + (ts.tv_sec || ts.tv_nsec));
2281 
2282 		if (timeout) {
2283 			/* None ready -- temporarily unblock those we're
2284 			 * interested while we are sleeping in so that we'll
2285 			 * be awakened when they arrive.  */
2286 			current->real_blocked = current->blocked;
2287 			sigandsets(&current->blocked, &current->blocked, &these);
2288 			recalc_sigpending();
2289 			spin_unlock_irq(&current->sighand->siglock);
2290 
2291 			timeout = schedule_timeout_interruptible(timeout);
2292 
2293 			spin_lock_irq(&current->sighand->siglock);
2294 			sig = dequeue_signal(current, &these, &info);
2295 			current->blocked = current->real_blocked;
2296 			siginitset(&current->real_blocked, 0);
2297 			recalc_sigpending();
2298 		}
2299 	}
2300 	spin_unlock_irq(&current->sighand->siglock);
2301 
2302 	if (sig) {
2303 		ret = sig;
2304 		if (uinfo) {
2305 			if (copy_siginfo_to_user(uinfo, &info))
2306 				ret = -EFAULT;
2307 		}
2308 	} else {
2309 		ret = -EAGAIN;
2310 		if (timeout)
2311 			ret = -EINTR;
2312 	}
2313 
2314 	return ret;
2315 }
2316 
2317 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2318 {
2319 	struct siginfo info;
2320 
2321 	info.si_signo = sig;
2322 	info.si_errno = 0;
2323 	info.si_code = SI_USER;
2324 	info.si_pid = task_tgid_vnr(current);
2325 	info.si_uid = current_uid();
2326 
2327 	return kill_something_info(sig, &info, pid);
2328 }
2329 
2330 static int
2331 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2332 {
2333 	struct task_struct *p;
2334 	int error = -ESRCH;
2335 
2336 	rcu_read_lock();
2337 	p = find_task_by_vpid(pid);
2338 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2339 		error = check_kill_permission(sig, info, p);
2340 		/*
2341 		 * The null signal is a permissions and process existence
2342 		 * probe.  No signal is actually delivered.
2343 		 */
2344 		if (!error && sig) {
2345 			error = do_send_sig_info(sig, info, p, false);
2346 			/*
2347 			 * If lock_task_sighand() failed we pretend the task
2348 			 * dies after receiving the signal. The window is tiny,
2349 			 * and the signal is private anyway.
2350 			 */
2351 			if (unlikely(error == -ESRCH))
2352 				error = 0;
2353 		}
2354 	}
2355 	rcu_read_unlock();
2356 
2357 	return error;
2358 }
2359 
2360 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2361 {
2362 	struct siginfo info;
2363 
2364 	info.si_signo = sig;
2365 	info.si_errno = 0;
2366 	info.si_code = SI_TKILL;
2367 	info.si_pid = task_tgid_vnr(current);
2368 	info.si_uid = current_uid();
2369 
2370 	return do_send_specific(tgid, pid, sig, &info);
2371 }
2372 
2373 /**
2374  *  sys_tgkill - send signal to one specific thread
2375  *  @tgid: the thread group ID of the thread
2376  *  @pid: the PID of the thread
2377  *  @sig: signal to be sent
2378  *
2379  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2380  *  exists but it's not belonging to the target process anymore. This
2381  *  method solves the problem of threads exiting and PIDs getting reused.
2382  */
2383 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2384 {
2385 	/* This is only valid for single tasks */
2386 	if (pid <= 0 || tgid <= 0)
2387 		return -EINVAL;
2388 
2389 	return do_tkill(tgid, pid, sig);
2390 }
2391 
2392 /*
2393  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2394  */
2395 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2396 {
2397 	/* This is only valid for single tasks */
2398 	if (pid <= 0)
2399 		return -EINVAL;
2400 
2401 	return do_tkill(0, pid, sig);
2402 }
2403 
2404 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2405 		siginfo_t __user *, uinfo)
2406 {
2407 	siginfo_t info;
2408 
2409 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2410 		return -EFAULT;
2411 
2412 	/* Not even root can pretend to send signals from the kernel.
2413 	   Nor can they impersonate a kill(), which adds source info.  */
2414 	if (info.si_code >= 0)
2415 		return -EPERM;
2416 	info.si_signo = sig;
2417 
2418 	/* POSIX.1b doesn't mention process groups.  */
2419 	return kill_proc_info(sig, &info, pid);
2420 }
2421 
2422 long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2423 {
2424 	/* This is only valid for single tasks */
2425 	if (pid <= 0 || tgid <= 0)
2426 		return -EINVAL;
2427 
2428 	/* Not even root can pretend to send signals from the kernel.
2429 	   Nor can they impersonate a kill(), which adds source info.  */
2430 	if (info->si_code >= 0)
2431 		return -EPERM;
2432 	info->si_signo = sig;
2433 
2434 	return do_send_specific(tgid, pid, sig, info);
2435 }
2436 
2437 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2438 		siginfo_t __user *, uinfo)
2439 {
2440 	siginfo_t info;
2441 
2442 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2443 		return -EFAULT;
2444 
2445 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2446 }
2447 
2448 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2449 {
2450 	struct task_struct *t = current;
2451 	struct k_sigaction *k;
2452 	sigset_t mask;
2453 
2454 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2455 		return -EINVAL;
2456 
2457 	k = &t->sighand->action[sig-1];
2458 
2459 	spin_lock_irq(&current->sighand->siglock);
2460 	if (oact)
2461 		*oact = *k;
2462 
2463 	if (act) {
2464 		sigdelsetmask(&act->sa.sa_mask,
2465 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2466 		*k = *act;
2467 		/*
2468 		 * POSIX 3.3.1.3:
2469 		 *  "Setting a signal action to SIG_IGN for a signal that is
2470 		 *   pending shall cause the pending signal to be discarded,
2471 		 *   whether or not it is blocked."
2472 		 *
2473 		 *  "Setting a signal action to SIG_DFL for a signal that is
2474 		 *   pending and whose default action is to ignore the signal
2475 		 *   (for example, SIGCHLD), shall cause the pending signal to
2476 		 *   be discarded, whether or not it is blocked"
2477 		 */
2478 		if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2479 			sigemptyset(&mask);
2480 			sigaddset(&mask, sig);
2481 			rm_from_queue_full(&mask, &t->signal->shared_pending);
2482 			do {
2483 				rm_from_queue_full(&mask, &t->pending);
2484 				t = next_thread(t);
2485 			} while (t != current);
2486 		}
2487 	}
2488 
2489 	spin_unlock_irq(&current->sighand->siglock);
2490 	return 0;
2491 }
2492 
2493 int
2494 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2495 {
2496 	stack_t oss;
2497 	int error;
2498 
2499 	oss.ss_sp = (void __user *) current->sas_ss_sp;
2500 	oss.ss_size = current->sas_ss_size;
2501 	oss.ss_flags = sas_ss_flags(sp);
2502 
2503 	if (uss) {
2504 		void __user *ss_sp;
2505 		size_t ss_size;
2506 		int ss_flags;
2507 
2508 		error = -EFAULT;
2509 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2510 			goto out;
2511 		error = __get_user(ss_sp, &uss->ss_sp) |
2512 			__get_user(ss_flags, &uss->ss_flags) |
2513 			__get_user(ss_size, &uss->ss_size);
2514 		if (error)
2515 			goto out;
2516 
2517 		error = -EPERM;
2518 		if (on_sig_stack(sp))
2519 			goto out;
2520 
2521 		error = -EINVAL;
2522 		/*
2523 		 *
2524 		 * Note - this code used to test ss_flags incorrectly
2525 		 *  	  old code may have been written using ss_flags==0
2526 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2527 		 *	  way that worked) - this fix preserves that older
2528 		 *	  mechanism
2529 		 */
2530 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2531 			goto out;
2532 
2533 		if (ss_flags == SS_DISABLE) {
2534 			ss_size = 0;
2535 			ss_sp = NULL;
2536 		} else {
2537 			error = -ENOMEM;
2538 			if (ss_size < MINSIGSTKSZ)
2539 				goto out;
2540 		}
2541 
2542 		current->sas_ss_sp = (unsigned long) ss_sp;
2543 		current->sas_ss_size = ss_size;
2544 	}
2545 
2546 	error = 0;
2547 	if (uoss) {
2548 		error = -EFAULT;
2549 		if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2550 			goto out;
2551 		error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2552 			__put_user(oss.ss_size, &uoss->ss_size) |
2553 			__put_user(oss.ss_flags, &uoss->ss_flags);
2554 	}
2555 
2556 out:
2557 	return error;
2558 }
2559 
2560 #ifdef __ARCH_WANT_SYS_SIGPENDING
2561 
2562 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2563 {
2564 	return do_sigpending(set, sizeof(*set));
2565 }
2566 
2567 #endif
2568 
2569 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2570 /* Some platforms have their own version with special arguments others
2571    support only sys_rt_sigprocmask.  */
2572 
2573 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2574 		old_sigset_t __user *, oset)
2575 {
2576 	int error;
2577 	old_sigset_t old_set, new_set;
2578 
2579 	if (set) {
2580 		error = -EFAULT;
2581 		if (copy_from_user(&new_set, set, sizeof(*set)))
2582 			goto out;
2583 		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2584 
2585 		spin_lock_irq(&current->sighand->siglock);
2586 		old_set = current->blocked.sig[0];
2587 
2588 		error = 0;
2589 		switch (how) {
2590 		default:
2591 			error = -EINVAL;
2592 			break;
2593 		case SIG_BLOCK:
2594 			sigaddsetmask(&current->blocked, new_set);
2595 			break;
2596 		case SIG_UNBLOCK:
2597 			sigdelsetmask(&current->blocked, new_set);
2598 			break;
2599 		case SIG_SETMASK:
2600 			current->blocked.sig[0] = new_set;
2601 			break;
2602 		}
2603 
2604 		recalc_sigpending();
2605 		spin_unlock_irq(&current->sighand->siglock);
2606 		if (error)
2607 			goto out;
2608 		if (oset)
2609 			goto set_old;
2610 	} else if (oset) {
2611 		old_set = current->blocked.sig[0];
2612 	set_old:
2613 		error = -EFAULT;
2614 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2615 			goto out;
2616 	}
2617 	error = 0;
2618 out:
2619 	return error;
2620 }
2621 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2622 
2623 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2624 SYSCALL_DEFINE4(rt_sigaction, int, sig,
2625 		const struct sigaction __user *, act,
2626 		struct sigaction __user *, oact,
2627 		size_t, sigsetsize)
2628 {
2629 	struct k_sigaction new_sa, old_sa;
2630 	int ret = -EINVAL;
2631 
2632 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2633 	if (sigsetsize != sizeof(sigset_t))
2634 		goto out;
2635 
2636 	if (act) {
2637 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2638 			return -EFAULT;
2639 	}
2640 
2641 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2642 
2643 	if (!ret && oact) {
2644 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2645 			return -EFAULT;
2646 	}
2647 out:
2648 	return ret;
2649 }
2650 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2651 
2652 #ifdef __ARCH_WANT_SYS_SGETMASK
2653 
2654 /*
2655  * For backwards compatibility.  Functionality superseded by sigprocmask.
2656  */
2657 SYSCALL_DEFINE0(sgetmask)
2658 {
2659 	/* SMP safe */
2660 	return current->blocked.sig[0];
2661 }
2662 
2663 SYSCALL_DEFINE1(ssetmask, int, newmask)
2664 {
2665 	int old;
2666 
2667 	spin_lock_irq(&current->sighand->siglock);
2668 	old = current->blocked.sig[0];
2669 
2670 	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2671 						  sigmask(SIGSTOP)));
2672 	recalc_sigpending();
2673 	spin_unlock_irq(&current->sighand->siglock);
2674 
2675 	return old;
2676 }
2677 #endif /* __ARCH_WANT_SGETMASK */
2678 
2679 #ifdef __ARCH_WANT_SYS_SIGNAL
2680 /*
2681  * For backwards compatibility.  Functionality superseded by sigaction.
2682  */
2683 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
2684 {
2685 	struct k_sigaction new_sa, old_sa;
2686 	int ret;
2687 
2688 	new_sa.sa.sa_handler = handler;
2689 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2690 	sigemptyset(&new_sa.sa.sa_mask);
2691 
2692 	ret = do_sigaction(sig, &new_sa, &old_sa);
2693 
2694 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2695 }
2696 #endif /* __ARCH_WANT_SYS_SIGNAL */
2697 
2698 #ifdef __ARCH_WANT_SYS_PAUSE
2699 
2700 SYSCALL_DEFINE0(pause)
2701 {
2702 	current->state = TASK_INTERRUPTIBLE;
2703 	schedule();
2704 	return -ERESTARTNOHAND;
2705 }
2706 
2707 #endif
2708 
2709 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2710 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
2711 {
2712 	sigset_t newset;
2713 
2714 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2715 	if (sigsetsize != sizeof(sigset_t))
2716 		return -EINVAL;
2717 
2718 	if (copy_from_user(&newset, unewset, sizeof(newset)))
2719 		return -EFAULT;
2720 	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2721 
2722 	spin_lock_irq(&current->sighand->siglock);
2723 	current->saved_sigmask = current->blocked;
2724 	current->blocked = newset;
2725 	recalc_sigpending();
2726 	spin_unlock_irq(&current->sighand->siglock);
2727 
2728 	current->state = TASK_INTERRUPTIBLE;
2729 	schedule();
2730 	set_restore_sigmask();
2731 	return -ERESTARTNOHAND;
2732 }
2733 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2734 
2735 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2736 {
2737 	return NULL;
2738 }
2739 
2740 void __init signals_init(void)
2741 {
2742 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2743 }
2744 
2745 #ifdef CONFIG_KGDB_KDB
2746 #include <linux/kdb.h>
2747 /*
2748  * kdb_send_sig_info - Allows kdb to send signals without exposing
2749  * signal internals.  This function checks if the required locks are
2750  * available before calling the main signal code, to avoid kdb
2751  * deadlocks.
2752  */
2753 void
2754 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
2755 {
2756 	static struct task_struct *kdb_prev_t;
2757 	int sig, new_t;
2758 	if (!spin_trylock(&t->sighand->siglock)) {
2759 		kdb_printf("Can't do kill command now.\n"
2760 			   "The sigmask lock is held somewhere else in "
2761 			   "kernel, try again later\n");
2762 		return;
2763 	}
2764 	spin_unlock(&t->sighand->siglock);
2765 	new_t = kdb_prev_t != t;
2766 	kdb_prev_t = t;
2767 	if (t->state != TASK_RUNNING && new_t) {
2768 		kdb_printf("Process is not RUNNING, sending a signal from "
2769 			   "kdb risks deadlock\n"
2770 			   "on the run queue locks. "
2771 			   "The signal has _not_ been sent.\n"
2772 			   "Reissue the kill command if you want to risk "
2773 			   "the deadlock.\n");
2774 		return;
2775 	}
2776 	sig = info->si_signo;
2777 	if (send_sig_info(sig, info, t))
2778 		kdb_printf("Fail to deliver Signal %d to process %d.\n",
2779 			   sig, t->pid);
2780 	else
2781 		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
2782 }
2783 #endif	/* CONFIG_KGDB_KDB */
2784